repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
DLYuanGod/TinyGPT-V
minigpt4/datasets/builders/image_text_pair_builder.py
[ { "identifier": "registry", "path": "minigpt4/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "BaseDatasetBuilder", "path": "minigpt4/datasets/builders/base_dataset_builder.py", "snippet": "class BaseDatasetBuilder:\n train_dataset_cls, eval_dataset_cls = None, None\n\n def __init__(self, cfg=None):\n super().__init__()\n\n if cfg is None:\n # help to create datasets from default config.\n self.config = load_dataset_config(self.default_config_path())\n elif isinstance(cfg, str):\n self.config = load_dataset_config(cfg)\n else:\n # when called from task.build_dataset()\n self.config = cfg\n\n self.data_type = self.config.data_type\n\n self.vis_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n self.text_processors = {\"train\": BaseProcessor(), \"eval\": BaseProcessor()}\n\n def build_datasets(self):\n # download, split, etc...\n # only called on 1 GPU/TPU in distributed\n\n if is_main_process():\n self._download_data()\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n # at this point, all the annotations and image/videos should be all downloaded to the specified locations.\n logging.info(\"Building datasets...\")\n datasets = self.build() # dataset['train'/'val'/'test']\n\n return datasets\n\n def build_processors(self):\n vis_proc_cfg = self.config.get(\"vis_processor\")\n txt_proc_cfg = self.config.get(\"text_processor\")\n\n if vis_proc_cfg is not None:\n vis_train_cfg = vis_proc_cfg.get(\"train\")\n vis_eval_cfg = vis_proc_cfg.get(\"eval\")\n\n self.vis_processors[\"train\"] = self._build_proc_from_cfg(vis_train_cfg)\n self.vis_processors[\"eval\"] = self._build_proc_from_cfg(vis_eval_cfg)\n\n if txt_proc_cfg is not None:\n txt_train_cfg = txt_proc_cfg.get(\"train\")\n txt_eval_cfg = txt_proc_cfg.get(\"eval\")\n\n self.text_processors[\"train\"] = self._build_proc_from_cfg(txt_train_cfg)\n self.text_processors[\"eval\"] = self._build_proc_from_cfg(txt_eval_cfg)\n\n @staticmethod\n def _build_proc_from_cfg(cfg):\n return (\n registry.get_processor_class(cfg.name).from_config(cfg)\n if cfg is not None\n else None\n )\n\n @classmethod\n def default_config_path(cls, type=\"default\"):\n return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type])\n\n def _download_data(self):\n self._download_ann()\n self._download_vis()\n\n def _download_ann(self):\n \"\"\"\n Download annotation files if necessary.\n All the vision-language datasets should have annotations of unified format.\n\n storage_path can be:\n (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative.\n (2) basename/dirname: will be suffixed with base name of URL if dirname is provided.\n\n Local annotation paths should be relative.\n \"\"\"\n anns = self.config.build_info.annotations\n\n splits = anns.keys()\n\n cache_root = registry.get_path(\"cache_root\")\n\n for split in splits:\n info = anns[split]\n\n urls, storage_paths = info.get(\"url\", None), info.storage\n\n if isinstance(urls, str):\n urls = [urls]\n if isinstance(storage_paths, str):\n storage_paths = [storage_paths]\n\n assert len(urls) == len(storage_paths)\n\n for url_or_filename, storage_path in zip(urls, storage_paths):\n # if storage_path is relative, make it full by prefixing with cache_root.\n if not os.path.isabs(storage_path):\n storage_path = os.path.join(cache_root, storage_path)\n\n dirname = os.path.dirname(storage_path)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n\n if os.path.isfile(url_or_filename):\n src, dst = url_or_filename, storage_path\n if not os.path.exists(dst):\n shutil.copyfile(src=src, dst=dst)\n else:\n logging.info(\"Using existing file {}.\".format(dst))\n else:\n if os.path.isdir(storage_path):\n # if only dirname is provided, suffix with basename of URL.\n raise ValueError(\n \"Expecting storage_path to be a file path, got directory {}\".format(\n storage_path\n )\n )\n else:\n filename = os.path.basename(storage_path)\n\n download_url(url=url_or_filename, root=dirname, filename=filename)\n\n def _download_vis(self):\n\n storage_path = self.config.build_info.get(self.data_type).storage\n storage_path = utils.get_cache_path(storage_path)\n\n if not os.path.exists(storage_path):\n warnings.warn(\n f\"\"\"\n The specified path {storage_path} for visual inputs does not exist.\n Please provide a correct path to the visual inputs or\n refer to datasets/download_scripts/README.md for downloading instructions.\n \"\"\"\n )\n\n def build(self):\n \"\"\"\n Create by split datasets inheriting torch.utils.data.Datasets.\n\n # build() can be dataset-specific. Overwrite to customize.\n \"\"\"\n self.build_processors()\n\n build_info = self.config.build_info\n\n ann_info = build_info.annotations\n vis_info = build_info.get(self.data_type)\n\n datasets = dict()\n for split in ann_info.keys():\n if split not in [\"train\", \"val\", \"test\"]:\n continue\n\n is_train = split == \"train\"\n\n # processors\n vis_processor = (\n self.vis_processors[\"train\"]\n if is_train\n else self.vis_processors[\"eval\"]\n )\n text_processor = (\n self.text_processors[\"train\"]\n if is_train\n else self.text_processors[\"eval\"]\n )\n\n # annotation path\n ann_paths = ann_info.get(split).storage\n if isinstance(ann_paths, str):\n ann_paths = [ann_paths]\n\n abs_ann_paths = []\n for ann_path in ann_paths:\n if not os.path.isabs(ann_path):\n ann_path = utils.get_cache_path(ann_path)\n abs_ann_paths.append(ann_path)\n ann_paths = abs_ann_paths\n\n # visual data storage path\n vis_path = os.path.join(vis_info.storage, split)\n\n if not os.path.isabs(vis_path):\n # vis_path = os.path.join(utils.get_cache_path(), vis_path)\n vis_path = utils.get_cache_path(vis_path)\n\n if not os.path.exists(vis_path):\n warnings.warn(\"storage path {} does not exist.\".format(vis_path))\n\n # create datasets\n dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls\n datasets[split] = dataset_cls(\n vis_processor=vis_processor,\n text_processor=text_processor,\n ann_paths=ann_paths,\n vis_root=vis_path,\n )\n\n return datasets" }, { "identifier": "LaionDataset", "path": "minigpt4/datasets/datasets/laion_dataset.py", "snippet": "class LaionDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }" }, { "identifier": "CCSBUDataset", "path": "minigpt4/datasets/datasets/cc_sbu_dataset.py", "snippet": "class CCSBUDataset(BaseDataset):\n def __init__(self, vis_processor, text_processor, location):\n super().__init__(vis_processor=vis_processor, text_processor=text_processor)\n\n self.inner_dataset = wds.DataPipeline(\n wds.ResampledShards(location),\n wds.tarfile_to_samples(handler=wds.warn_and_continue),\n wds.shuffle(1000, handler=wds.warn_and_continue),\n wds.decode(\"pilrgb\", handler=wds.warn_and_continue),\n wds.to_tuple(\"jpg\", \"json\", handler=wds.warn_and_continue),\n wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue),\n wds.map(self.to_dict, handler=wds.warn_and_continue),\n )\n\n def to_dict(self, sample):\n return {\n \"image\": sample[0],\n \"answer\": self.text_processor(sample[1][\"caption\"]),\n }" }, { "identifier": "CCSBUAlignDataset", "path": "minigpt4/datasets/datasets/cc_sbu_dataset.py", "snippet": "class CCSBUAlignDataset(CaptionDataset):\n\n def __getitem__(self, index):\n\n # TODO this assumes image input, not general enough\n ann = self.annotation[index]\n\n img_file = '{}.jpg'.format(ann[\"image_id\"])\n image_path = os.path.join(self.vis_root, img_file)\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n caption = ann[\"caption\"]\n\n return {\n \"image\": image,\n \"answer\": caption,\n \"image_id\": self.img_ids[ann[\"image_id\"]],\n }" }, { "identifier": "TextCapDataset", "path": "minigpt4/datasets/datasets/text_caps.py", "snippet": "class TextCapDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n 'Briefly describe this image.',\n 'Provide a concise depiction of this image.',\n 'Present a short description of this image.',\n 'Summarize this image in a few words.',\n 'A short image caption:',\n 'A short image description:',\n 'A photo of ',\n 'An image that shows ',\n 'Write a short description for the image. ',\n 'Write a description for the photo.',\n 'Provide a description of what is presented in the photo.',\n 'Briefly describe the content of the image.',\n 'Can you briefly explain what you see in the image?',\n 'Could you use a few words to describe what you perceive in the photo?',\n 'Please provide a short depiction of the picture.',\n 'Using language, provide a short account of the image.',\n 'Use a few words to illustrate what is happening in the picture.',\n ]\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n\n def __len__(self):\n return len(self.ann[\"data\"])\n\n\n def __getitem__(self, index):\n info = self.ann[\"data\"][index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n caption = info[\"caption_str\"]\n caption = self.text_processor(caption)\n instruction = \"<Img><ImageHere></Img> [caption] {} \".format(random.choice(self.instruction_pool))\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": caption,\n }" }, { "identifier": "LlavaDetailDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n \n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }" }, { "identifier": "LlavaReasonDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaReasonDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['conversations'][1]['value']\n instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n\n instruction = '<Img><ImageHere></Img> {} '.format(self.text_processor(instruction))\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['id'],\n }" }, { "identifier": "LlavaConversationDataset", "path": "minigpt4/datasets/datasets/llava_dataset.py", "snippet": "class LlavaConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.ann=[]\n\n \n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }" }, { "identifier": "UnnaturalDataset", "path": "minigpt4/datasets/datasets/unnatural_instruction.py", "snippet": "class UnnaturalDataset(Dataset):\n def __init__(self, text_processor, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.text_processor = text_processor\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index][\"instances\"][0]\n instruction = info[\"instruction_with_input\"]\n constraints = info[\"constraints\"]\n answer = info[\"output\"]\n if constraints != None:\n instruction = instruction+\" \"+constraints\n\n return {\n \"instruction_input\": self.text_processor(instruction),\n \"answer\": self.text_processor(answer),\n }" }, { "identifier": "MultiTaskConversationDataset", "path": "minigpt4/datasets/datasets/multitask_conversation.py", "snippet": "class MultiTaskConversationDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n self.connect_sym = \"!@#\"\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = 'COCO_train2014_{}.jpg'.format(info['id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n first_instruction = info['conversations'][0]['value'].replace('<image>', '').replace('\\n', '').strip()\n first_instruction = '<Img><ImageHere></Img> {} '.format(first_instruction)\n\n questions = [first_instruction]\n answers = []\n\n for i, item in enumerate(info[\"conversations\"][1:]):\n if i % 2 ==0: # assistant\n assistant_answer = item[\"value\"]\n answers.append(assistant_answer)\n else:\n human_instruction = item[\"value\"]+\" \"\n questions.append(human_instruction)\n\n questions = self.connect_sym.join(questions)\n answers = self.connect_sym.join(answers)\n\n\n return {\n \"image\": image,\n \"conv_q\": questions,\n 'conv_a': answers,\n \"image_id\": info['id'],\n \"connect_sym\": self.connect_sym\n }" }, { "identifier": "GroundedDetailDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class GroundedDetailDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[grounding] please describe this image in details',\n '[grounding] describe this image as detailed as possible',\n '[grounding] summarize this image in details',\n '[grounding] give a thorough description of what you see in this image',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n # image_file = 'COCO_train2014_{}.jpg'.format(info['image_id'])\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n answer = info['grounded_caption']\n instruction = random.choice(self.instruction_pool)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "CaptionToObjectDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class CaptionToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"caption\"]\n answer = info[\"output\"]\n\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"CaptionToObject instruction\", instruction)\n print(\"CaptionToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "PhraseToObjectDataset", "path": "minigpt4/datasets/datasets/flickr.py", "snippet": "class PhraseToObjectDataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.instruction_pool = [\n '[detection] {}',\n ]\n\n with open(ann_path, 'r') as f:\n self.ann = json.load(f)\n\n def __len__(self):\n return len(self.ann)\n\n def __getitem__(self, index):\n info = self.ann[index]\n image_file = '{}.jpg'.format(info['image_id'])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image = self.vis_processor(image)\n\n input = info[\"phrase\"]\n answer = \"<p>\"+input+\"</p> \"+info[\"bbox\"]\n instruction = random.choice(self.instruction_pool).format(input)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n print(\"PhraseToObject instruction\", instruction)\n print(\"PhraseToObject answer\", answer)\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answer,\n \"image_id\": info['image_id'],\n }" }, { "identifier": "ReferVisualGenomeDataset", "path": "minigpt4/datasets/datasets/vg_dataset.py", "snippet": "class ReferVisualGenomeDataset(Dataset):\n def __init__(self, vis_processor, text_processor, data_dir):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.data_dir = data_dir\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n all_regions = local.get_all_region_descriptions(self.data_dir)\n all_regions = [region for regions in all_regions for region in regions]\n\n # follow OFA practice, only regions smaller than 16384 pixels are used for refer\n self.regions = [region for region in all_regions if region.width * region.height < 16384]\n\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.regions)\n\n def preprocess(self, index):\n region = self.regions[index]\n image_file = region.image.url.split('/')[-2:]\n image_path = os.path.join(self.data_dir, *image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [100,100]\n\n sample_sentence = region.phrase\n refer_sentence = self.text_processor(sample_sentence)\n\n bbox = [region.x, region.y, region.width, region.height]\n\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": region.image.id,\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }" }, { "identifier": "ReferCOCODataset", "path": "minigpt4/datasets/datasets/coco_dataset.py", "snippet": "class ReferCOCODataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path, dataset='refcoco', splitBy='unc'):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n\n self.refer = REFER(ann_path, vis_root, dataset, splitBy)\n self.ref_ids = self.refer.getRefIds(split=\"train\")\n\n self.instruction_pool = [\n \"[refer] {}\",\n \"[refer] give me the location of {}\",\n \"[refer] where is {} ?\",\n \"[refer] from this image, tell me the location of {}\",\n \"[refer] the location of {} is\",\n \"[refer] could you tell me the location for {} ?\",\n \"[refer] where can I locate the {} ?\",\n ]\n\n\n def __len__(self):\n return len(self.ref_ids)\n\n def preprocess(self, index):\n ref_id = self.ref_ids[index]\n ref = self.refer.loadRefs(ref_id)[0]\n\n image_file = 'COCO_train2014_{:0>12}.jpg'.format(ref[\"image_id\"])\n image_path = os.path.join(self.vis_root, image_file)\n image = Image.open(image_path).convert(\"RGB\")\n image_orig_size = image.size\n image = self.vis_processor(image)\n image_new_size = [image.shape[1], image.shape[2]]\n\n image_new_size = [100,100]\n\n sample_sentence = random.choice(ref['sentences'])['raw']\n refer_sentence = self.text_processor(sample_sentence)\n\n\n bbox = self.refer.getRefBox(ref['ref_id'])\n bbox = [\n bbox[0] / image_orig_size[0] * image_new_size[0],\n bbox[1] / image_orig_size[1] * image_new_size[1],\n (bbox[0] + bbox[2]) / image_orig_size[0] * image_new_size[0],\n (bbox[1] + bbox[3]) / image_orig_size[1] * image_new_size[1]\n ]\n bbox = [int(x) for x in bbox]\n bbox = \"{{<{}><{}><{}><{}>}}\".format(*bbox)\n return {\n \"image\": image,\n \"refer_sentence\": refer_sentence,\n \"bbox\": bbox,\n \"image_id\": ref['image_id'],\n }\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n instruction = random.choice(self.instruction_pool).format(data['refer_sentence'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": data['bbox'],\n \"image_id\": data['image_id'],\n }" }, { "identifier": "InvReferCOCODataset", "path": "minigpt4/datasets/datasets/coco_dataset.py", "snippet": "class InvReferCOCODataset(ReferCOCODataset):\n def __init__(self, *args, **kwargs):\n super(InvReferCOCODataset, self).__init__(*args, **kwargs)\n\n self.instruction_pool = [\n \"[identify] {}\",\n \"[identify] what object is in this location {}\",\n \"[identify] identify the object present at this location {}\",\n \"[identify] what is it in {}\",\n \"[identify] describe this object in {}\",\n \"[identify] this {} is\",\n \"[identify] the object in {} is\",\n ]\n\n def __getitem__(self, index):\n data = self.preprocess(index)\n\n instruction = random.choice(self.instruction_pool).format(data['bbox'])\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n \n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['refer_sentence']),\n \"image_id\": data['image_id'],\n }" }, { "identifier": "GQADataset", "path": "minigpt4/datasets/datasets/gqa_datasets.py", "snippet": "class GQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def __getitem__(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n instruction = random.choice(self.instruction_pool).format(question)\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n answers = self.text_processor(ann[\"answer\"])\n\n return {\n \"image\": image,\n \"instruction_input\": instruction,\n \"answer\": answers,\n }" }, { "identifier": "AOKVQADataset", "path": "minigpt4/datasets/datasets/aok_vqa_datasets.py", "snippet": "class AOKVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n\n answer_key = \"direct_answers\"\n\n answer_weight = {}\n for answer in ann[answer_key]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[answer_key])\n else:\n answer_weight[answer] = 1 / len(ann[answer_key])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n return {\n \"image\": image,\n \"question\": question,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n question = self.text_processor(data[\"question\"])\n instruction = random.choice(self.instruction_pool).format(question)\n\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n answer = self.text_processor(data['answer'])\n\n return {\n \"image\": data['image'],\n \"instruction_input\": instruction,\n \"answer\": answer,\n }" }, { "identifier": "COCOVQADataset", "path": "minigpt4/datasets/datasets/coco_vqa_datasets.py", "snippet": "class COCOVQADataset(VQADataset, __DisplMixin):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n super().__init__(vis_processor, text_processor, vis_root, ann_paths)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n exist_annotation = []\n for ann in self.annotation:\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n if os.path.exists(image_path):\n exist_annotation.append(ann)\n self.annotation = exist_annotation\n\n\n def get_data(self, index):\n ann = self.annotation[index]\n\n image_path = os.path.join(self.vis_root, ann[\"image\"].split('/')[-1])\n image = Image.open(image_path).convert(\"RGB\")\n\n image = self.vis_processor(image)\n question = self.text_processor(ann[\"question\"])\n question_id = ann[\"question_id\"]\n\n answer_weight = {}\n for answer in ann[\"answer\"]:\n if answer in answer_weight.keys():\n answer_weight[answer] += 1 / len(ann[\"answer\"])\n else:\n answer_weight[answer] = 1 / len(ann[\"answer\"])\n\n answers = list(answer_weight.keys())\n weights = list(answer_weight.values())\n\n answer = random.choices(answers, weights=weights, k=1)[0] # random sample an answer according to weights\n\n\n return {\n \"image\": image,\n \"question\": question,\n \"question_id\": question_id,\n \"answer\": answer,\n }\n\n def __getitem__(self, index):\n data = self.get_data(index)\n instruction = random.choice(self.instruction_pool).format(data['question'])\n instruction = \"<Img><ImageHere></Img> {} \".format(instruction)\n\n return {\n \"image\": data['image'],\n \"question_id\": data[\"question_id\"],\n \"instruction_input\": instruction,\n \"answer\": self.text_processor(data['answer']),\n }" }, { "identifier": "OCRVQADataset", "path": "minigpt4/datasets/datasets/ocrvqa_dataset.py", "snippet": "class OCRVQADataset(Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_path):\n \"\"\"\n vis_root (string): Root directory of images (e.g. coco/images/)\n ann_root (string): directory to store the annotation file\n \"\"\"\n self.vis_root = vis_root\n\n self.vis_processor = vis_processor\n self.text_processor = text_processor\n self.data = self.create_data(ann_path)\n\n self.instruction_pool =[\n \"[vqa] {}\",\n \"[vqa] Based on the image, respond to this question with a short answer: {}\"\n ]\n\n def create_data(self, ann_path):\n processed_data = []\n with open(ann_path, 'r') as f:\n data = json.load(f)\n for k in data.keys():\n if data[k]['split'] != 1: continue # 1 for training, 2 for validation, 3 for test\n ext = os.path.splitext(data[k]['imageURL'])[1]\n imageFile = k + ext\n assert len(data[k]['questions']) == len(data[k]['answers'])\n for q, a in zip(data[k]['questions'], data[k]['answers']):\n processed_data.append(\n {'question': q,\n 'answer': a,\n 'image_path': imageFile,\n 'image_id': k,\n 'title': data[k]['title'],\n 'genre': data[k]['genre'],\n }\n )\n return processed_data\n\n def __len__(self):\n return len(self.data)" }, { "identifier": "COCOCapDataset", "path": "minigpt4/datasets/datasets/coco_caption.py", "snippet": "class COCOCapEvalDataset(CaptionEvalDataset):\nclass NoCapsEvalDataset(CaptionEvalDataset):\nclass RefCOCOEvalData(torch.utils.data.Dataset):\nclass EvalCaptionData(torch.utils.data.Dataset):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, vis_processor, text_processor, vis_root, ann_paths):\n def __getitem__(self, index):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __new__(cls, *args, **kwargs):\n def __len__(self):\n def __getitem__(self, idx):\n def __init__(self, loaded_data, vis_processor, root_path):\n def __len__(self):\n def __getitem__(self, idx):" } ]
import os import logging import warnings from minigpt4.common.registry import registry from minigpt4.datasets.builders.base_dataset_builder import BaseDatasetBuilder from minigpt4.datasets.datasets.laion_dataset import LaionDataset from minigpt4.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset from minigpt4.datasets.datasets.text_caps import TextCapDataset from minigpt4.datasets.datasets.llava_dataset import LlavaDetailDataset, LlavaReasonDataset, LlavaConversationDataset from minigpt4.datasets.datasets.unnatural_instruction import UnnaturalDataset from minigpt4.datasets.datasets.multitask_conversation import MultiTaskConversationDataset from minigpt4.datasets.datasets.flickr import GroundedDetailDataset,CaptionToObjectDataset,PhraseToObjectDataset from minigpt4.datasets.datasets.vg_dataset import ReferVisualGenomeDataset from minigpt4.datasets.datasets.coco_dataset import ReferCOCODataset, InvReferCOCODataset from minigpt4.datasets.datasets.gqa_datasets import GQADataset from minigpt4.datasets.datasets.aok_vqa_datasets import AOKVQADataset from minigpt4.datasets.datasets.coco_vqa_datasets import COCOVQADataset from minigpt4.datasets.datasets.ocrvqa_dataset import OCRVQADataset from minigpt4.datasets.datasets.coco_caption import COCOCapDataset
11,868
def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class DocumentVQABuilder(BaseDatasetBuilder): def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], vis_root=build_info.image_path, ann_path=build_info.ann_path ) return datasets @registry.register_builder("ocrvqa") class OCRVQABuilder(DocumentVQABuilder): train_dataset_cls = OCRVQADataset DATASET_CONFIG_DICT = {"default": "configs/datasets/ocrvqa/ocrvqa.yaml"} @registry.register_builder("cc_sbu") class CCSBUBuilder(BaseDatasetBuilder): train_dataset_cls = CCSBUDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/cc_sbu/defaults.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], location=build_info.storage, ).inner_dataset return datasets @registry.register_builder("laion") class LaionBuilder(BaseDatasetBuilder): train_dataset_cls = LaionDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], location=build_info.storage, ).inner_dataset return datasets @registry.register_builder("coco_caption") class COCOCapBuilder(BaseDatasetBuilder):
@registry.register_builder("multitask_conversation") class MultitaskConversationBuilder(BaseDatasetBuilder): train_dataset_cls = MultiTaskConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/multitask_conversation/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("unnatural_instruction") class UnnaturalInstructionBuilder(BaseDatasetBuilder): train_dataset_cls = UnnaturalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/nlp/unnatural_instruction.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( text_processor=self.text_processors["train"], ann_path=build_info.ann_path, ) return datasets @registry.register_builder("llava_detail") class LlavaDetailBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/detail.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_reason") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaReasonDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/reason.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("llava_conversation") class LlavaReasonBuilder(BaseDatasetBuilder): train_dataset_cls = LlavaConversationDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/llava/conversation.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class AllRefCOCOBuilder(BaseDatasetBuilder): def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info image_path = build_info.image_path ann_path = build_info.ann_path datasets = dict() if not os.path.exists(image_path): warnings.warn("image path {} does not exist.".format(image_path)) if not os.path.exists(ann_path): warnings.warn("ann path {} does not exist.".format(ann_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=ann_path, vis_root=image_path, dataset=build_info.dataset, splitBy=build_info.splitBy ) return datasets @registry.register_builder("refcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcoco.yaml", } @registry.register_builder("refcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocop.yaml", } @registry.register_builder("refcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = ReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/refcocog.yaml", } @registry.register_builder("invrefcoco") class RefCOCOBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcoco.yaml", } @registry.register_builder("invrefcocop") class RefCOCOPBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocop.yaml", } @registry.register_builder("invrefcocog") class RefCOCOGBuilder(AllRefCOCOBuilder): train_dataset_cls = InvReferCOCODataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco_bbox/invrefcocog.yaml", } @registry.register_builder("refvg") class RefVisualGenomeBuilder(BaseDatasetBuilder): train_dataset_cls = ReferVisualGenomeDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/vg/ref.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info data_dir = build_info.data_dir datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], data_dir=data_dir, ) return datasets @registry.register_builder("textcaps_caption") class TextcapCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = TextCapDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/textcaps/caption.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("coco_vqa") class COCOVQABuilder(BaseDatasetBuilder): train_dataset_cls = COCOVQADataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco/defaults_vqa.yaml", } @registry.register_builder("ok_vqa") class OKVQABuilder(COCOVQABuilder): DATASET_CONFIG_DICT = { "default": "configs/datasets/okvqa/defaults.yaml", } @registry.register_builder("aok_vqa") class AOKVQABuilder(BaseDatasetBuilder): train_dataset_cls = AOKVQADataset DATASET_CONFIG_DICT = {"default": "configs/datasets/aokvqa/defaults.yaml"} @registry.register_builder("gqa") class GQABuilder(BaseDatasetBuilder): train_dataset_cls = GQADataset DATASET_CONFIG_DICT = { "default": "configs/datasets/gqa/balanced_val.yaml", } @registry.register_builder("flickr_grounded_caption") class GroundedCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = GroundedDetailDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/default.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_CaptionToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = CaptionToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/caption_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets @registry.register_builder("flickr_ObjectToPhrase") class CaptionToPhraseBuilder(BaseDatasetBuilder): train_dataset_cls = PhraseToObjectDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/flickr/object_to_phrase.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info datasets = dict() # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_path=build_info.ann_path, vis_root=build_info.image_path, ) return datasets class DocumentVQABuilder(BaseDatasetBuilder): def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], vis_root=build_info.image_path, ann_path=build_info.ann_path ) return datasets @registry.register_builder("ocrvqa") class OCRVQABuilder(DocumentVQABuilder): train_dataset_cls = OCRVQADataset DATASET_CONFIG_DICT = {"default": "configs/datasets/ocrvqa/ocrvqa.yaml"} @registry.register_builder("cc_sbu") class CCSBUBuilder(BaseDatasetBuilder): train_dataset_cls = CCSBUDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/cc_sbu/defaults.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], location=build_info.storage, ).inner_dataset return datasets @registry.register_builder("laion") class LaionBuilder(BaseDatasetBuilder): train_dataset_cls = LaionDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], location=build_info.storage, ).inner_dataset return datasets @registry.register_builder("coco_caption") class COCOCapBuilder(BaseDatasetBuilder):
train_dataset_cls = COCOCapDataset
21
2023-12-28 05:47:18+00:00
16k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/models/attention.py
[ { "identifier": "USE_PEFT_BACKEND", "path": "diffusers/src/diffusers/utils/constants.py", "snippet": "USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version" }, { "identifier": "maybe_allow_in_graph", "path": "diffusers/src/diffusers/utils/torch_utils.py", "snippet": "def maybe_allow_in_graph(cls):\n return cls" }, { "identifier": "GEGLU", "path": "diffusers/src/diffusers/models/activations.py", "snippet": "class GEGLU(nn.Module):\n r\"\"\"\n A [variant](https://arxiv.org/abs/2002.05202) of the gated linear unit activation function.\n\n Parameters:\n dim_in (`int`): The number of channels in the input.\n dim_out (`int`): The number of channels in the output.\n \"\"\"\n\n def __init__(self, dim_in: int, dim_out: int):\n super().__init__()\n linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear\n\n self.proj = linear_cls(dim_in, dim_out * 2)\n\n def gelu(self, gate: torch.Tensor) -> torch.Tensor:\n if gate.device.type != \"mps\":\n return F.gelu(gate)\n # mps: gelu is not implemented for float16\n return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype)\n\n def forward(self, hidden_states, scale: float = 1.0):\n args = () if USE_PEFT_BACKEND else (scale,)\n hidden_states, gate = self.proj(hidden_states, *args).chunk(2, dim=-1)\n return hidden_states * self.gelu(gate)" }, { "identifier": "GELU", "path": "diffusers/src/diffusers/models/activations.py", "snippet": "class GELU(nn.Module):\n r\"\"\"\n GELU activation function with tanh approximation support with `approximate=\"tanh\"`.\n\n Parameters:\n dim_in (`int`): The number of channels in the input.\n dim_out (`int`): The number of channels in the output.\n approximate (`str`, *optional*, defaults to `\"none\"`): If `\"tanh\"`, use tanh approximation.\n \"\"\"\n\n def __init__(self, dim_in: int, dim_out: int, approximate: str = \"none\"):\n super().__init__()\n self.proj = nn.Linear(dim_in, dim_out)\n self.approximate = approximate\n\n def gelu(self, gate: torch.Tensor) -> torch.Tensor:\n if gate.device.type != \"mps\":\n return F.gelu(gate, approximate=self.approximate)\n # mps: gelu is not implemented for float16\n return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)\n\n def forward(self, hidden_states):\n hidden_states = self.proj(hidden_states)\n hidden_states = self.gelu(hidden_states)\n return hidden_states" }, { "identifier": "ApproximateGELU", "path": "diffusers/src/diffusers/models/activations.py", "snippet": "class ApproximateGELU(nn.Module):\n r\"\"\"\n The approximate form of the Gaussian Error Linear Unit (GELU). For more details, see section 2 of this\n [paper](https://arxiv.org/abs/1606.08415).\n\n Parameters:\n dim_in (`int`): The number of channels in the input.\n dim_out (`int`): The number of channels in the output.\n \"\"\"\n\n def __init__(self, dim_in: int, dim_out: int):\n super().__init__()\n self.proj = nn.Linear(dim_in, dim_out)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.proj(x)\n return x * torch.sigmoid(1.702 * x)" }, { "identifier": "Attention", "path": "diffusers/src/diffusers/models/attention_processor.py", "snippet": "class Attention(nn.Module):\n r\"\"\"\n A cross attention layer.\n\n Parameters:\n query_dim (`int`):\n The number of channels in the query.\n cross_attention_dim (`int`, *optional*):\n The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.\n heads (`int`, *optional*, defaults to 8):\n The number of heads to use for multi-head attention.\n dim_head (`int`, *optional*, defaults to 64):\n The number of channels in each head.\n dropout (`float`, *optional*, defaults to 0.0):\n The dropout probability to use.\n bias (`bool`, *optional*, defaults to False):\n Set to `True` for the query, key, and value linear layers to contain a bias parameter.\n upcast_attention (`bool`, *optional*, defaults to False):\n Set to `True` to upcast the attention computation to `float32`.\n upcast_softmax (`bool`, *optional*, defaults to False):\n Set to `True` to upcast the softmax computation to `float32`.\n cross_attention_norm (`str`, *optional*, defaults to `None`):\n The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`.\n cross_attention_norm_num_groups (`int`, *optional*, defaults to 32):\n The number of groups to use for the group norm in the cross attention.\n added_kv_proj_dim (`int`, *optional*, defaults to `None`):\n The number of channels to use for the added key and value projections. If `None`, no projection is used.\n norm_num_groups (`int`, *optional*, defaults to `None`):\n The number of groups to use for the group norm in the attention.\n spatial_norm_dim (`int`, *optional*, defaults to `None`):\n The number of channels to use for the spatial normalization.\n out_bias (`bool`, *optional*, defaults to `True`):\n Set to `True` to use a bias in the output linear layer.\n scale_qk (`bool`, *optional*, defaults to `True`):\n Set to `True` to scale the query and key by `1 / sqrt(dim_head)`.\n only_cross_attention (`bool`, *optional*, defaults to `False`):\n Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if\n `added_kv_proj_dim` is not `None`.\n eps (`float`, *optional*, defaults to 1e-5):\n An additional value added to the denominator in group normalization that is used for numerical stability.\n rescale_output_factor (`float`, *optional*, defaults to 1.0):\n A factor to rescale the output by dividing it with this value.\n residual_connection (`bool`, *optional*, defaults to `False`):\n Set to `True` to add the residual connection to the output.\n _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`):\n Set to `True` if the attention block is loaded from a deprecated state dict.\n processor (`AttnProcessor`, *optional*, defaults to `None`):\n The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and\n `AttnProcessor` otherwise.\n \"\"\"\n\n def __init__(\n self,\n query_dim: int,\n cross_attention_dim: Optional[int] = None,\n heads: int = 8,\n dim_head: int = 64,\n dropout: float = 0.0,\n bias: bool = False,\n upcast_attention: bool = False,\n upcast_softmax: bool = False,\n cross_attention_norm: Optional[str] = None,\n cross_attention_norm_num_groups: int = 32,\n added_kv_proj_dim: Optional[int] = None,\n norm_num_groups: Optional[int] = None,\n spatial_norm_dim: Optional[int] = None,\n out_bias: bool = True,\n scale_qk: bool = True,\n only_cross_attention: bool = False,\n eps: float = 1e-5,\n rescale_output_factor: float = 1.0,\n residual_connection: bool = False,\n _from_deprecated_attn_block: bool = False,\n processor: Optional[\"AttnProcessor\"] = None,\n ):\n super().__init__()\n self.inner_dim = dim_head * heads\n self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim\n self.upcast_attention = upcast_attention\n self.upcast_softmax = upcast_softmax\n self.rescale_output_factor = rescale_output_factor\n self.residual_connection = residual_connection\n self.dropout = dropout\n\n # we make use of this private variable to know whether this class is loaded\n # with an deprecated state dict so that we can convert it on the fly\n self._from_deprecated_attn_block = _from_deprecated_attn_block\n\n self.scale_qk = scale_qk\n self.scale = dim_head**-0.5 if self.scale_qk else 1.0\n\n self.heads = heads\n # for slice_size > 0 the attention score computation\n # is split across the batch axis to save memory\n # You can set slice_size with `set_attention_slice`\n self.sliceable_head_dim = heads\n\n self.added_kv_proj_dim = added_kv_proj_dim\n self.only_cross_attention = only_cross_attention\n\n if self.added_kv_proj_dim is None and self.only_cross_attention:\n raise ValueError(\n \"`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`.\"\n )\n\n if norm_num_groups is not None:\n self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True)\n else:\n self.group_norm = None\n\n if spatial_norm_dim is not None:\n self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim)\n else:\n self.spatial_norm = None\n\n if cross_attention_norm is None:\n self.norm_cross = None\n elif cross_attention_norm == \"layer_norm\":\n self.norm_cross = nn.LayerNorm(self.cross_attention_dim)\n elif cross_attention_norm == \"group_norm\":\n if self.added_kv_proj_dim is not None:\n # The given `encoder_hidden_states` are initially of shape\n # (batch_size, seq_len, added_kv_proj_dim) before being projected\n # to (batch_size, seq_len, cross_attention_dim). The norm is applied\n # before the projection, so we need to use `added_kv_proj_dim` as\n # the number of channels for the group norm.\n norm_cross_num_channels = added_kv_proj_dim\n else:\n norm_cross_num_channels = self.cross_attention_dim\n\n self.norm_cross = nn.GroupNorm(\n num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True\n )\n else:\n raise ValueError(\n f\"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'\"\n )\n\n if USE_PEFT_BACKEND:\n linear_cls = nn.Linear\n else:\n linear_cls = LoRACompatibleLinear\n\n self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias)\n\n if not self.only_cross_attention:\n # only relevant for the `AddedKVProcessor` classes\n self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)\n self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)\n else:\n self.to_k = None\n self.to_v = None\n\n if self.added_kv_proj_dim is not None:\n self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim)\n self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim)\n\n self.to_out = nn.ModuleList([])\n self.to_out.append(linear_cls(self.inner_dim, query_dim, bias=out_bias))\n self.to_out.append(nn.Dropout(dropout))\n\n # set attention processor\n # We use the AttnProcessor2_0 by default when torch 2.x is used which uses\n # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention\n # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1\n if processor is None:\n processor = (\n AttnProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") and self.scale_qk else AttnProcessor()\n )\n self.set_processor(processor)\n\n def set_use_memory_efficient_attention_xformers(\n self, use_memory_efficient_attention_xformers: bool, attention_op: Optional[Callable] = None\n ) -> None:\n r\"\"\"\n Set whether to use memory efficient attention from `xformers` or not.\n\n Args:\n use_memory_efficient_attention_xformers (`bool`):\n Whether to use memory efficient attention from `xformers` or not.\n attention_op (`Callable`, *optional*):\n The attention operation to use. Defaults to `None` which uses the default attention operation from\n `xformers`.\n \"\"\"\n is_lora = hasattr(self, \"processor\") and isinstance(\n self.processor,\n LORA_ATTENTION_PROCESSORS,\n )\n is_custom_diffusion = hasattr(self, \"processor\") and isinstance(\n self.processor,\n (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor, CustomDiffusionAttnProcessor2_0),\n )\n is_added_kv_processor = hasattr(self, \"processor\") and isinstance(\n self.processor,\n (\n AttnAddedKVProcessor,\n AttnAddedKVProcessor2_0,\n SlicedAttnAddedKVProcessor,\n XFormersAttnAddedKVProcessor,\n LoRAAttnAddedKVProcessor,\n ),\n )\n\n if use_memory_efficient_attention_xformers:\n if is_added_kv_processor and (is_lora or is_custom_diffusion):\n raise NotImplementedError(\n f\"Memory efficient attention is currently not supported for LoRA or custom diffusion for attention processor type {self.processor}\"\n )\n if not is_xformers_available():\n raise ModuleNotFoundError(\n (\n \"Refer to https://github.com/facebookresearch/xformers for more information on how to install\"\n \" xformers\"\n ),\n name=\"xformers\",\n )\n elif not torch.cuda.is_available():\n raise ValueError(\n \"torch.cuda.is_available() should be True but is False. xformers' memory efficient attention is\"\n \" only available for GPU \"\n )\n else:\n try:\n # Make sure we can run the memory efficient attention\n _ = xformers.ops.memory_efficient_attention(\n torch.randn((1, 2, 40), device=\"cuda\"),\n torch.randn((1, 2, 40), device=\"cuda\"),\n torch.randn((1, 2, 40), device=\"cuda\"),\n )\n except Exception as e:\n raise e\n\n if is_lora:\n # TODO (sayakpaul): should we throw a warning if someone wants to use the xformers\n # variant when using PT 2.0 now that we have LoRAAttnProcessor2_0?\n processor = LoRAXFormersAttnProcessor(\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n rank=self.processor.rank,\n attention_op=attention_op,\n )\n processor.load_state_dict(self.processor.state_dict())\n processor.to(self.processor.to_q_lora.up.weight.device)\n elif is_custom_diffusion:\n processor = CustomDiffusionXFormersAttnProcessor(\n train_kv=self.processor.train_kv,\n train_q_out=self.processor.train_q_out,\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n attention_op=attention_op,\n )\n processor.load_state_dict(self.processor.state_dict())\n if hasattr(self.processor, \"to_k_custom_diffusion\"):\n processor.to(self.processor.to_k_custom_diffusion.weight.device)\n elif is_added_kv_processor:\n # TODO(Patrick, Suraj, William) - currently xformers doesn't work for UnCLIP\n # which uses this type of cross attention ONLY because the attention mask of format\n # [0, ..., -10.000, ..., 0, ...,] is not supported\n # throw warning\n logger.info(\n \"Memory efficient attention with `xformers` might currently not work correctly if an attention mask is required for the attention operation.\"\n )\n processor = XFormersAttnAddedKVProcessor(attention_op=attention_op)\n else:\n processor = XFormersAttnProcessor(attention_op=attention_op)\n else:\n if is_lora:\n attn_processor_class = (\n LoRAAttnProcessor2_0 if hasattr(F, \"scaled_dot_product_attention\") else LoRAAttnProcessor\n )\n processor = attn_processor_class(\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n rank=self.processor.rank,\n )\n processor.load_state_dict(self.processor.state_dict())\n processor.to(self.processor.to_q_lora.up.weight.device)\n elif is_custom_diffusion:\n attn_processor_class = (\n CustomDiffusionAttnProcessor2_0\n if hasattr(F, \"scaled_dot_product_attention\")\n else CustomDiffusionAttnProcessor\n )\n processor = attn_processor_class(\n train_kv=self.processor.train_kv,\n train_q_out=self.processor.train_q_out,\n hidden_size=self.processor.hidden_size,\n cross_attention_dim=self.processor.cross_attention_dim,\n )\n processor.load_state_dict(self.processor.state_dict())\n if hasattr(self.processor, \"to_k_custom_diffusion\"):\n processor.to(self.processor.to_k_custom_diffusion.weight.device)\n else:\n # set attention processor\n # We use the AttnProcessor2_0 by default when torch 2.x is used which uses\n # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention\n # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1\n processor = (\n AttnProcessor2_0()\n if hasattr(F, \"scaled_dot_product_attention\") and self.scale_qk\n else AttnProcessor()\n )\n\n self.set_processor(processor)\n\n def set_attention_slice(self, slice_size: int) -> None:\n r\"\"\"\n Set the slice size for attention computation.\n\n Args:\n slice_size (`int`):\n The slice size for attention computation.\n \"\"\"\n if slice_size is not None and slice_size > self.sliceable_head_dim:\n raise ValueError(f\"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.\")\n\n if slice_size is not None and self.added_kv_proj_dim is not None:\n processor = SlicedAttnAddedKVProcessor(slice_size)\n elif slice_size is not None:\n processor = SlicedAttnProcessor(slice_size)\n elif self.added_kv_proj_dim is not None:\n processor = AttnAddedKVProcessor()\n else:\n # set attention processor\n # We use the AttnProcessor2_0 by default when torch 2.x is used which uses\n # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention\n # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1\n processor = (\n AttnProcessor2_0() if hasattr(F, \"scaled_dot_product_attention\") and self.scale_qk else AttnProcessor()\n )\n\n self.set_processor(processor)\n\n def set_processor(self, processor: \"AttnProcessor\", _remove_lora: bool = False) -> None:\n r\"\"\"\n Set the attention processor to use.\n\n Args:\n processor (`AttnProcessor`):\n The attention processor to use.\n _remove_lora (`bool`, *optional*, defaults to `False`):\n Set to `True` to remove LoRA layers from the model.\n \"\"\"\n if not USE_PEFT_BACKEND and hasattr(self, \"processor\") and _remove_lora and self.to_q.lora_layer is not None:\n deprecate(\n \"set_processor to offload LoRA\",\n \"0.26.0\",\n \"In detail, removing LoRA layers via calling `set_default_attn_processor` is deprecated. Please make sure to call `pipe.unload_lora_weights()` instead.\",\n )\n # TODO(Patrick, Sayak) - this can be deprecated once PEFT LoRA integration is complete\n # We need to remove all LoRA layers\n # Don't forget to remove ALL `_remove_lora` from the codebase\n for module in self.modules():\n if hasattr(module, \"set_lora_layer\"):\n module.set_lora_layer(None)\n\n # if current processor is in `self._modules` and if passed `processor` is not, we need to\n # pop `processor` from `self._modules`\n if (\n hasattr(self, \"processor\")\n and isinstance(self.processor, torch.nn.Module)\n and not isinstance(processor, torch.nn.Module)\n ):\n logger.info(f\"You are removing possibly trained weights of {self.processor} with {processor}\")\n self._modules.pop(\"processor\")\n\n self.processor = processor\n\n def get_processor(self, return_deprecated_lora: bool = False) -> \"AttentionProcessor\":\n r\"\"\"\n Get the attention processor in use.\n\n Args:\n return_deprecated_lora (`bool`, *optional*, defaults to `False`):\n Set to `True` to return the deprecated LoRA attention processor.\n\n Returns:\n \"AttentionProcessor\": The attention processor in use.\n \"\"\"\n if not return_deprecated_lora:\n return self.processor\n\n # TODO(Sayak, Patrick). The rest of the function is needed to ensure backwards compatible\n # serialization format for LoRA Attention Processors. It should be deleted once the integration\n # with PEFT is completed.\n is_lora_activated = {\n name: module.lora_layer is not None\n for name, module in self.named_modules()\n if hasattr(module, \"lora_layer\")\n }\n\n # 1. if no layer has a LoRA activated we can return the processor as usual\n if not any(is_lora_activated.values()):\n return self.processor\n\n # If doesn't apply LoRA do `add_k_proj` or `add_v_proj`\n is_lora_activated.pop(\"add_k_proj\", None)\n is_lora_activated.pop(\"add_v_proj\", None)\n # 2. else it is not posssible that only some layers have LoRA activated\n if not all(is_lora_activated.values()):\n raise ValueError(\n f\"Make sure that either all layers or no layers have LoRA activated, but have {is_lora_activated}\"\n )\n\n # 3. And we need to merge the current LoRA layers into the corresponding LoRA attention processor\n non_lora_processor_cls_name = self.processor.__class__.__name__\n lora_processor_cls = getattr(import_module(__name__), \"LoRA\" + non_lora_processor_cls_name)\n\n hidden_size = self.inner_dim\n\n # now create a LoRA attention processor from the LoRA layers\n if lora_processor_cls in [LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor]:\n kwargs = {\n \"cross_attention_dim\": self.cross_attention_dim,\n \"rank\": self.to_q.lora_layer.rank,\n \"network_alpha\": self.to_q.lora_layer.network_alpha,\n \"q_rank\": self.to_q.lora_layer.rank,\n \"q_hidden_size\": self.to_q.lora_layer.out_features,\n \"k_rank\": self.to_k.lora_layer.rank,\n \"k_hidden_size\": self.to_k.lora_layer.out_features,\n \"v_rank\": self.to_v.lora_layer.rank,\n \"v_hidden_size\": self.to_v.lora_layer.out_features,\n \"out_rank\": self.to_out[0].lora_layer.rank,\n \"out_hidden_size\": self.to_out[0].lora_layer.out_features,\n }\n\n if hasattr(self.processor, \"attention_op\"):\n kwargs[\"attention_op\"] = self.processor.attention_op\n\n lora_processor = lora_processor_cls(hidden_size, **kwargs)\n lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())\n lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())\n lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())\n lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict())\n elif lora_processor_cls == LoRAAttnAddedKVProcessor:\n lora_processor = lora_processor_cls(\n hidden_size,\n cross_attention_dim=self.add_k_proj.weight.shape[0],\n rank=self.to_q.lora_layer.rank,\n network_alpha=self.to_q.lora_layer.network_alpha,\n )\n lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())\n lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())\n lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())\n lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict())\n\n # only save if used\n if self.add_k_proj.lora_layer is not None:\n lora_processor.add_k_proj_lora.load_state_dict(self.add_k_proj.lora_layer.state_dict())\n lora_processor.add_v_proj_lora.load_state_dict(self.add_v_proj.lora_layer.state_dict())\n else:\n lora_processor.add_k_proj_lora = None\n lora_processor.add_v_proj_lora = None\n else:\n raise ValueError(f\"{lora_processor_cls} does not exist.\")\n\n return lora_processor\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n **cross_attention_kwargs,\n ) -> torch.Tensor:\n r\"\"\"\n The forward method of the `Attention` class.\n\n Args:\n hidden_states (`torch.Tensor`):\n The hidden states of the query.\n encoder_hidden_states (`torch.Tensor`, *optional*):\n The hidden states of the encoder.\n attention_mask (`torch.Tensor`, *optional*):\n The attention mask to use. If `None`, no mask is applied.\n **cross_attention_kwargs:\n Additional keyword arguments to pass along to the cross attention.\n\n Returns:\n `torch.Tensor`: The output of the attention layer.\n \"\"\"\n # The `Attention` class can call different attention processors / attention functions\n # here we simply pass along all tensors to the selected processor class\n # For standard processors that are defined here, `**cross_attention_kwargs` is empty\n return self.processor(\n self,\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n\n def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads`\n is the number of heads initialized while constructing the `Attention` class.\n\n Args:\n tensor (`torch.Tensor`): The tensor to reshape.\n\n Returns:\n `torch.Tensor`: The reshaped tensor.\n \"\"\"\n head_size = self.heads\n batch_size, seq_len, dim = tensor.shape\n tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)\n tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)\n return tensor\n\n def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor:\n r\"\"\"\n Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is\n the number of heads initialized while constructing the `Attention` class.\n\n Args:\n tensor (`torch.Tensor`): The tensor to reshape.\n out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is\n reshaped to `[batch_size * heads, seq_len, dim // heads]`.\n\n Returns:\n `torch.Tensor`: The reshaped tensor.\n \"\"\"\n head_size = self.heads\n batch_size, seq_len, dim = tensor.shape\n tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)\n tensor = tensor.permute(0, 2, 1, 3)\n\n if out_dim == 3:\n tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)\n\n return tensor\n\n def get_attention_scores(\n self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None\n ) -> torch.Tensor:\n r\"\"\"\n Compute the attention scores.\n\n Args:\n query (`torch.Tensor`): The query tensor.\n key (`torch.Tensor`): The key tensor.\n attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.\n\n Returns:\n `torch.Tensor`: The attention probabilities/scores.\n \"\"\"\n dtype = query.dtype\n if self.upcast_attention:\n query = query.float()\n key = key.float()\n\n if attention_mask is None:\n baddbmm_input = torch.empty(\n query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device\n )\n beta = 0\n else:\n baddbmm_input = attention_mask\n beta = 1\n\n attention_scores = torch.baddbmm(\n baddbmm_input,\n query,\n key.transpose(-1, -2),\n beta=beta,\n alpha=self.scale,\n )\n del baddbmm_input\n\n if self.upcast_softmax:\n attention_scores = attention_scores.float()\n\n attention_probs = attention_scores.softmax(dim=-1)\n del attention_scores\n\n attention_probs = attention_probs.to(dtype)\n\n return attention_probs\n\n def prepare_attention_mask(\n self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3\n ) -> torch.Tensor:\n r\"\"\"\n Prepare the attention mask for the attention computation.\n\n Args:\n attention_mask (`torch.Tensor`):\n The attention mask to prepare.\n target_length (`int`):\n The target length of the attention mask. This is the length of the attention mask after padding.\n batch_size (`int`):\n The batch size, which is used to repeat the attention mask.\n out_dim (`int`, *optional*, defaults to `3`):\n The output dimension of the attention mask. Can be either `3` or `4`.\n\n Returns:\n `torch.Tensor`: The prepared attention mask.\n \"\"\"\n head_size = self.heads\n if attention_mask is None:\n return attention_mask\n\n current_length: int = attention_mask.shape[-1]\n if current_length != target_length:\n if attention_mask.device.type == \"mps\":\n # HACK: MPS: Does not support padding by greater than dimension of input tensor.\n # Instead, we can manually construct the padding tensor.\n padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length)\n padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat([attention_mask, padding], dim=2)\n else:\n # TODO: for pipelines such as stable-diffusion, padding cross-attn mask:\n # we want to instead pad by (0, remaining_length), where remaining_length is:\n # remaining_length: int = target_length - current_length\n # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding\n attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)\n\n if out_dim == 3:\n if attention_mask.shape[0] < batch_size * head_size:\n attention_mask = attention_mask.repeat_interleave(head_size, dim=0)\n elif out_dim == 4:\n attention_mask = attention_mask.unsqueeze(1)\n attention_mask = attention_mask.repeat_interleave(head_size, dim=1)\n\n return attention_mask\n\n def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the\n `Attention` class.\n\n Args:\n encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder.\n\n Returns:\n `torch.Tensor`: The normalized encoder hidden states.\n \"\"\"\n assert self.norm_cross is not None, \"self.norm_cross must be defined to call self.norm_encoder_hidden_states\"\n\n if isinstance(self.norm_cross, nn.LayerNorm):\n encoder_hidden_states = self.norm_cross(encoder_hidden_states)\n elif isinstance(self.norm_cross, nn.GroupNorm):\n # Group norm norms along the channels dimension and expects\n # input to be in the shape of (N, C, *). In this case, we want\n # to norm along the hidden dimension, so we need to move\n # (batch_size, sequence_length, hidden_size) ->\n # (batch_size, hidden_size, sequence_length)\n encoder_hidden_states = encoder_hidden_states.transpose(1, 2)\n encoder_hidden_states = self.norm_cross(encoder_hidden_states)\n encoder_hidden_states = encoder_hidden_states.transpose(1, 2)\n else:\n assert False\n\n return encoder_hidden_states" }, { "identifier": "SinusoidalPositionalEmbedding", "path": "diffusers/src/diffusers/models/embeddings.py", "snippet": "class SinusoidalPositionalEmbedding(nn.Module):\n \"\"\"Apply positional information to a sequence of embeddings.\n\n Takes in a sequence of embeddings with shape (batch_size, seq_length, embed_dim) and adds positional embeddings to\n them\n\n Args:\n embed_dim: (int): Dimension of the positional embedding.\n max_seq_length: Maximum sequence length to apply positional embeddings\n\n \"\"\"\n\n def __init__(self, embed_dim: int, max_seq_length: int = 32):\n super().__init__()\n position = torch.arange(max_seq_length).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim))\n pe = torch.zeros(1, max_seq_length, embed_dim)\n pe[0, :, 0::2] = torch.sin(position * div_term)\n pe[0, :, 1::2] = torch.cos(position * div_term)\n self.register_buffer(\"pe\", pe)\n\n def forward(self, x):\n _, seq_length, _ = x.shape\n x = x + self.pe[:, :seq_length]\n return x" }, { "identifier": "LoRACompatibleLinear", "path": "diffusers/src/diffusers/models/lora.py", "snippet": "class LoRACompatibleLinear(nn.Linear):\n \"\"\"\n A Linear layer that can be used with LoRA.\n \"\"\"\n\n def __init__(self, *args, lora_layer: Optional[LoRALinearLayer] = None, **kwargs):\n super().__init__(*args, **kwargs)\n self.lora_layer = lora_layer\n\n def set_lora_layer(self, lora_layer: Optional[LoRALinearLayer]):\n self.lora_layer = lora_layer\n\n def _fuse_lora(self, lora_scale: float = 1.0, safe_fusing: bool = False):\n if self.lora_layer is None:\n return\n\n dtype, device = self.weight.data.dtype, self.weight.data.device\n\n w_orig = self.weight.data.float()\n w_up = self.lora_layer.up.weight.data.float()\n w_down = self.lora_layer.down.weight.data.float()\n\n if self.lora_layer.network_alpha is not None:\n w_up = w_up * self.lora_layer.network_alpha / self.lora_layer.rank\n\n fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])\n\n if safe_fusing and torch.isnan(fused_weight).any().item():\n raise ValueError(\n \"This LoRA weight seems to be broken. \"\n f\"Encountered NaN values when trying to fuse LoRA weights for {self}.\"\n \"LoRA weights will not be fused.\"\n )\n\n self.weight.data = fused_weight.to(device=device, dtype=dtype)\n\n # we can drop the lora layer now\n self.lora_layer = None\n\n # offload the up and down matrices to CPU to not blow the memory\n self.w_up = w_up.cpu()\n self.w_down = w_down.cpu()\n self._lora_scale = lora_scale\n\n def _unfuse_lora(self):\n if not (getattr(self, \"w_up\", None) is not None and getattr(self, \"w_down\", None) is not None):\n return\n\n fused_weight = self.weight.data\n dtype, device = fused_weight.dtype, fused_weight.device\n\n w_up = self.w_up.to(device=device).float()\n w_down = self.w_down.to(device).float()\n\n unfused_weight = fused_weight.float() - (self._lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])\n self.weight.data = unfused_weight.to(device=device, dtype=dtype)\n\n self.w_up = None\n self.w_down = None\n\n def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:\n if self.lora_layer is None:\n out = super().forward(hidden_states)\n return out\n else:\n out = super().forward(hidden_states) + (scale * self.lora_layer(hidden_states))\n return out" }, { "identifier": "AdaLayerNorm", "path": "diffusers/src/diffusers/models/normalization.py", "snippet": "class AdaLayerNorm(nn.Module):\n r\"\"\"\n Norm layer modified to incorporate timestep embeddings.\n\n Parameters:\n embedding_dim (`int`): The size of each embedding vector.\n num_embeddings (`int`): The size of the embeddings dictionary.\n \"\"\"\n\n def __init__(self, embedding_dim: int, num_embeddings: int):\n super().__init__()\n self.emb = nn.Embedding(num_embeddings, embedding_dim)\n self.silu = nn.SiLU()\n self.linear = nn.Linear(embedding_dim, embedding_dim * 2)\n self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False)\n\n def forward(self, x: torch.Tensor, timestep: torch.Tensor) -> torch.Tensor:\n emb = self.linear(self.silu(self.emb(timestep)))\n scale, shift = torch.chunk(emb, 2)\n x = self.norm(x) * (1 + scale) + shift\n return x" }, { "identifier": "AdaLayerNormZero", "path": "diffusers/src/diffusers/models/normalization.py", "snippet": "class AdaLayerNormZero(nn.Module):\n r\"\"\"\n Norm layer adaptive layer norm zero (adaLN-Zero).\n\n Parameters:\n embedding_dim (`int`): The size of each embedding vector.\n num_embeddings (`int`): The size of the embeddings dictionary.\n \"\"\"\n\n def __init__(self, embedding_dim: int, num_embeddings: int):\n super().__init__()\n\n self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim)\n\n self.silu = nn.SiLU()\n self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)\n self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)\n\n def forward(\n self,\n x: torch.Tensor,\n timestep: torch.Tensor,\n class_labels: torch.LongTensor,\n hidden_dtype: Optional[torch.dtype] = None,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype)))\n shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1)\n x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]\n return x, gate_msa, shift_mlp, scale_mlp, gate_mlp" } ]
from typing import Any, Dict, Optional from torch import nn from ..utils import USE_PEFT_BACKEND from ..utils.torch_utils import maybe_allow_in_graph from .activations import GEGLU, GELU, ApproximateGELU from .attention_processor import Attention from .embeddings import SinusoidalPositionalEmbedding from .lora import LoRACompatibleLinear from .normalization import AdaLayerNorm, AdaLayerNormZero import torch
12,521
self.attn1 = Attention( query_dim=time_mix_inner_dim, heads=num_attention_heads, dim_head=attention_head_dim, cross_attention_dim=None, ) # 2. Cross-Attn if cross_attention_dim is not None: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = nn.LayerNorm(time_mix_inner_dim) self.attn2 = Attention( query_dim=time_mix_inner_dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(time_mix_inner_dim) self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu") # let chunk size default to None self._chunk_size = None self._chunk_dim = None def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs): # Sets chunk feed-forward self._chunk_size = chunk_size # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off self._chunk_dim = 1 def forward( self, hidden_states: torch.FloatTensor, num_frames: int, encoder_hidden_states: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] batch_frames, seq_length, channels = hidden_states.shape batch_size = batch_frames // num_frames hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) residual = hidden_states hidden_states = self.norm_in(hidden_states) if self._chunk_size is not None: hidden_states = _chunked_feed_forward(self.ff, hidden_states, self._chunk_dim, self._chunk_size) else: hidden_states = self.ff_in(hidden_states) if self.is_res: hidden_states = hidden_states + residual norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states # 3. Cross-Attention if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states = attn_output + hidden_states # 4. Feed-forward norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.is_res: hidden_states = ff_output + hidden_states else: hidden_states = ff_output hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) return hidden_states class FeedForward(nn.Module): r""" A feed-forward layer. Parameters: dim (`int`): The number of channels in the input. dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. """ def __init__( self, dim: int, dim_out: Optional[int] = None, mult: int = 4, dropout: float = 0.0, activation_fn: str = "geglu", final_dropout: bool = False, ): super().__init__() inner_dim = int(dim * mult) dim_out = dim_out if dim_out is not None else dim
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def _chunked_feed_forward( ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int, lora_scale: Optional[float] = None ): # "feed_forward_chunk_size" can be used to save memory if hidden_states.shape[chunk_dim] % chunk_size != 0: raise ValueError( f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." ) num_chunks = hidden_states.shape[chunk_dim] // chunk_size if lora_scale is None: ff_output = torch.cat( [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], dim=chunk_dim, ) else: # TOOD(Patrick): LoRA scale can be removed once PEFT refactor is complete ff_output = torch.cat( [ff(hid_slice, scale=lora_scale) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)], dim=chunk_dim, ) return ff_output @maybe_allow_in_graph class GatedSelfAttentionDense(nn.Module): r""" A gated self-attention dense layer that combines visual features and object features. Parameters: query_dim (`int`): The number of channels in the query. context_dim (`int`): The number of channels in the context. n_heads (`int`): The number of heads to use for attention. d_head (`int`): The number of channels in each head. """ def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int): super().__init__() # we need a linear projection since we need cat visual feature and obj feature self.linear = nn.Linear(context_dim, query_dim) self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head) self.ff = FeedForward(query_dim, activation_fn="geglu") self.norm1 = nn.LayerNorm(query_dim) self.norm2 = nn.LayerNorm(query_dim) self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0))) self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0))) self.enabled = True def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor: if not self.enabled: return x n_visual = x.shape[1] objs = self.linear(objs) x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :] x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x)) return x @maybe_allow_in_graph class BasicTransformerBlock(nn.Module): r""" A basic Transformer block. Parameters: dim (`int`): The number of channels in the input and output. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. num_embeds_ada_norm (: obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. attention_bias (: obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. only_cross_attention (`bool`, *optional*): Whether to use only cross-attention layers. In this case two cross attention layers are used. double_self_attention (`bool`, *optional*): Whether to use two self-attention layers. In this case no cross attention layers are used. upcast_attention (`bool`, *optional*): Whether to upcast the attention computation to float32. This is useful for mixed precision training. norm_elementwise_affine (`bool`, *optional*, defaults to `True`): Whether to use learnable elementwise affine parameters for normalization. norm_type (`str`, *optional*, defaults to `"layer_norm"`): The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`. final_dropout (`bool` *optional*, defaults to False): Whether to apply a final dropout after the last feed-forward layer. attention_type (`str`, *optional*, defaults to `"default"`): The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. positional_embeddings (`str`, *optional*, defaults to `None`): The type of positional embeddings to apply to. num_positional_embeddings (`int`, *optional*, defaults to `None`): The maximum number of positional embeddings to apply. """ def __init__( self, dim: int, num_attention_heads: int, attention_head_dim: int, dropout=0.0, cross_attention_dim: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, attention_bias: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_elementwise_affine: bool = True, norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single' norm_eps: float = 1e-5, final_dropout: bool = False, attention_type: str = "default", positional_embeddings: Optional[str] = None, num_positional_embeddings: Optional[int] = None, ): super().__init__() self.only_cross_attention = only_cross_attention self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" self.use_ada_layer_norm_single = norm_type == "ada_norm_single" self.use_layer_norm = norm_type == "layer_norm" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." ) if positional_embeddings and (num_positional_embeddings is None): raise ValueError( "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined." ) if positional_embeddings == "sinusoidal": self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings) else: self.pos_embed = None # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) elif self.use_ada_layer_norm_zero: self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) else: self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.attn1 = Attention( query_dim=dim, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, cross_attention_dim=cross_attention_dim if only_cross_attention else None, upcast_attention=upcast_attention, ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = ( AdaLayerNorm(dim, num_embeds_ada_norm) if self.use_ada_layer_norm else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) ) self.attn2 = Attention( query_dim=dim, cross_attention_dim=cross_attention_dim if not double_self_attention else None, heads=num_attention_heads, dim_head=attention_head_dim, dropout=dropout, bias=attention_bias, upcast_attention=upcast_attention, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward if not self.use_ada_layer_norm_single: self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps) self.ff = FeedForward( dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout, ) # 4. Fuser if attention_type == "gated" or attention_type == "gated-text-image": self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim) # 5. Scale-shift for PixArt-Alpha. if self.use_ada_layer_norm_single: self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5) # let chunk size default to None self._chunk_size = None self._chunk_dim = 0 def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): # Sets chunk feed-forward self._chunk_size = chunk_size self._chunk_dim = dim def forward( self, hidden_states: torch.FloatTensor, attention_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, timestep: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, class_labels: Optional[torch.LongTensor] = None, ) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] if self.use_ada_layer_norm: norm_hidden_states = self.norm1(hidden_states, timestep) elif self.use_ada_layer_norm_zero: norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype ) elif self.use_layer_norm: norm_hidden_states = self.norm1(hidden_states) elif self.use_ada_layer_norm_single: shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1) ).chunk(6, dim=1) norm_hidden_states = self.norm1(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa norm_hidden_states = norm_hidden_states.squeeze(1) else: raise ValueError("Incorrect norm used") if self.pos_embed is not None: norm_hidden_states = self.pos_embed(norm_hidden_states) # 1. Retrieve lora scale. lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0 # 2. Prepare GLIGEN inputs cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} gligen_kwargs = cross_attention_kwargs.pop("gligen", None) attn_output = self.attn1( norm_hidden_states, encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, attention_mask=attention_mask, **cross_attention_kwargs, ) if self.use_ada_layer_norm_zero: attn_output = gate_msa.unsqueeze(1) * attn_output elif self.use_ada_layer_norm_single: attn_output = gate_msa * attn_output hidden_states = attn_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) # 2.5 GLIGEN Control if gligen_kwargs is not None: hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"]) # 3. Cross-Attention if self.attn2 is not None: if self.use_ada_layer_norm: norm_hidden_states = self.norm2(hidden_states, timestep) elif self.use_ada_layer_norm_zero or self.use_layer_norm: norm_hidden_states = self.norm2(hidden_states) elif self.use_ada_layer_norm_single: # For PixArt norm2 isn't applied here: # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103 norm_hidden_states = hidden_states else: raise ValueError("Incorrect norm") if self.pos_embed is not None and self.use_ada_layer_norm_single is False: norm_hidden_states = self.pos_embed(norm_hidden_states) attn_output = self.attn2( norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=encoder_attention_mask, **cross_attention_kwargs, ) hidden_states = attn_output + hidden_states # 4. Feed-forward if not self.use_ada_layer_norm_single: norm_hidden_states = self.norm3(hidden_states) if self.use_ada_layer_norm_zero: norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self.use_ada_layer_norm_single: norm_hidden_states = self.norm2(hidden_states) norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory ff_output = _chunked_feed_forward( self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size, lora_scale=lora_scale ) else: ff_output = self.ff(norm_hidden_states, scale=lora_scale) if self.use_ada_layer_norm_zero: ff_output = gate_mlp.unsqueeze(1) * ff_output elif self.use_ada_layer_norm_single: ff_output = gate_mlp * ff_output hidden_states = ff_output + hidden_states if hidden_states.ndim == 4: hidden_states = hidden_states.squeeze(1) return hidden_states @maybe_allow_in_graph class TemporalBasicTransformerBlock(nn.Module): r""" A basic Transformer block for video like data. Parameters: dim (`int`): The number of channels in the input and output. time_mix_inner_dim (`int`): The number of channels for temporal attention. num_attention_heads (`int`): The number of heads to use for multi-head attention. attention_head_dim (`int`): The number of channels in each head. cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. """ def __init__( self, dim: int, time_mix_inner_dim: int, num_attention_heads: int, attention_head_dim: int, cross_attention_dim: Optional[int] = None, ): super().__init__() self.is_res = dim == time_mix_inner_dim self.norm_in = nn.LayerNorm(dim) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn self.norm_in = nn.LayerNorm(dim) self.ff_in = FeedForward( dim, dim_out=time_mix_inner_dim, activation_fn="geglu", ) self.norm1 = nn.LayerNorm(time_mix_inner_dim) self.attn1 = Attention( query_dim=time_mix_inner_dim, heads=num_attention_heads, dim_head=attention_head_dim, cross_attention_dim=None, ) # 2. Cross-Attn if cross_attention_dim is not None: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. self.norm2 = nn.LayerNorm(time_mix_inner_dim) self.attn2 = Attention( query_dim=time_mix_inner_dim, cross_attention_dim=cross_attention_dim, heads=num_attention_heads, dim_head=attention_head_dim, ) # is self-attn if encoder_hidden_states is none else: self.norm2 = None self.attn2 = None # 3. Feed-forward self.norm3 = nn.LayerNorm(time_mix_inner_dim) self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu") # let chunk size default to None self._chunk_size = None self._chunk_dim = None def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs): # Sets chunk feed-forward self._chunk_size = chunk_size # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off self._chunk_dim = 1 def forward( self, hidden_states: torch.FloatTensor, num_frames: int, encoder_hidden_states: Optional[torch.FloatTensor] = None, ) -> torch.FloatTensor: # Notice that normalization is always applied before the real computation in the following blocks. # 0. Self-Attention batch_size = hidden_states.shape[0] batch_frames, seq_length, channels = hidden_states.shape batch_size = batch_frames // num_frames hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels) residual = hidden_states hidden_states = self.norm_in(hidden_states) if self._chunk_size is not None: hidden_states = _chunked_feed_forward(self.ff, hidden_states, self._chunk_dim, self._chunk_size) else: hidden_states = self.ff_in(hidden_states) if self.is_res: hidden_states = hidden_states + residual norm_hidden_states = self.norm1(hidden_states) attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None) hidden_states = attn_output + hidden_states # 3. Cross-Attention if self.attn2 is not None: norm_hidden_states = self.norm2(hidden_states) attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states) hidden_states = attn_output + hidden_states # 4. Feed-forward norm_hidden_states = self.norm3(hidden_states) if self._chunk_size is not None: ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size) else: ff_output = self.ff(norm_hidden_states) if self.is_res: hidden_states = ff_output + hidden_states else: hidden_states = ff_output hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels) hidden_states = hidden_states.permute(0, 2, 1, 3) hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels) return hidden_states class FeedForward(nn.Module): r""" A feed-forward layer. Parameters: dim (`int`): The number of channels in the input. dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. """ def __init__( self, dim: int, dim_out: Optional[int] = None, mult: int = 4, dropout: float = 0.0, activation_fn: str = "geglu", final_dropout: bool = False, ): super().__init__() inner_dim = int(dim * mult) dim_out = dim_out if dim_out is not None else dim
linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
7
2023-12-28 08:17:40+00:00
16k
FoundationVision/UniRef
detectron2/evaluation/coco_evaluation.py
[ { "identifier": "CfgNode", "path": "detectron2/config/config.py", "snippet": "class CfgNode(_CfgNode):\n \"\"\"\n The same as `fvcore.common.config.CfgNode`, but different in:\n\n 1. Use unsafe yaml loading by default.\n Note that this may lead to arbitrary code execution: you must not\n load a config file from untrusted sources before manually inspecting\n the content of the file.\n 2. Support config versioning.\n When attempting to merge an old config, it will convert the old config automatically.\n\n .. automethod:: clone\n .. automethod:: freeze\n .. automethod:: defrost\n .. automethod:: is_frozen\n .. automethod:: load_yaml_with_base\n .. automethod:: merge_from_list\n .. automethod:: merge_from_other_cfg\n \"\"\"\n\n @classmethod\n def _open_cfg(cls, filename):\n return PathManager.open(filename, \"r\")\n\n # Note that the default value of allow_unsafe is changed to True\n def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:\n \"\"\"\n Load content from the given config file and merge it into self.\n\n Args:\n cfg_filename: config filename\n allow_unsafe: allow unsafe yaml syntax\n \"\"\"\n assert PathManager.isfile(cfg_filename), f\"Config file '{cfg_filename}' does not exist!\"\n loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)\n loaded_cfg = type(self)(loaded_cfg)\n\n # defaults.py needs to import CfgNode\n from .defaults import _C\n\n latest_ver = _C.VERSION\n assert (\n latest_ver == self.VERSION\n ), \"CfgNode.merge_from_file is only allowed on a config object of latest version!\"\n\n logger = logging.getLogger(__name__)\n\n loaded_ver = loaded_cfg.get(\"VERSION\", None)\n if loaded_ver is None:\n from .compat import guess_version\n\n loaded_ver = guess_version(loaded_cfg, cfg_filename)\n assert loaded_ver <= self.VERSION, \"Cannot merge a v{} config into a v{} config.\".format(\n loaded_ver, self.VERSION\n )\n\n if loaded_ver == self.VERSION:\n self.merge_from_other_cfg(loaded_cfg)\n else:\n # compat.py needs to import CfgNode\n from .compat import upgrade_config, downgrade_config\n\n logger.warning(\n \"Loading an old v{} config file '{}' by automatically upgrading to v{}. \"\n \"See docs/CHANGELOG.md for instructions to update your files.\".format(\n loaded_ver, cfg_filename, self.VERSION\n )\n )\n # To convert, first obtain a full config at an old version\n old_self = downgrade_config(self, to_version=loaded_ver)\n old_self.merge_from_other_cfg(loaded_cfg)\n new_config = upgrade_config(old_self)\n self.clear()\n self.update(new_config)\n\n def dump(self, *args, **kwargs):\n \"\"\"\n Returns:\n str: a yaml string representation of the config\n \"\"\"\n # to make it show up in docs\n return super().dump(*args, **kwargs)" }, { "identifier": "MetadataCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "convert_to_coco_json", "path": "detectron2/data/datasets/coco.py", "snippet": "def convert_to_coco_json(dataset_name, output_file, allow_cached=True):\n \"\"\"\n Converts dataset into COCO format and saves it to a json file.\n dataset_name must be registered in DatasetCatalog and in detectron2's standard format.\n\n Args:\n dataset_name:\n reference from the config file to the catalogs\n must be registered in DatasetCatalog and in detectron2's standard format\n output_file: path of json file that will be saved to\n allow_cached: if json file is already present then skip conversion\n \"\"\"\n\n # TODO: The dataset or the conversion script *may* change,\n # a checksum would be useful for validating the cached data\n\n PathManager.mkdirs(os.path.dirname(output_file))\n with file_lock(output_file):\n if PathManager.exists(output_file) and allow_cached:\n logger.warning(\n f\"Using previously cached COCO format annotations at '{output_file}'. \"\n \"You need to clear the cache file if your dataset has been modified.\"\n )\n else:\n logger.info(f\"Converting annotations of dataset '{dataset_name}' to COCO format ...)\")\n coco_dict = convert_to_coco_dict(dataset_name)\n\n logger.info(f\"Caching COCO format annotations at '{output_file}' ...\")\n tmp_file = output_file + \".tmp\"\n with PathManager.open(tmp_file, \"w\") as f:\n json.dump(coco_dict, f)\n shutil.move(tmp_file, output_file)" }, { "identifier": "Boxes", "path": "detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "pairwise_iou", "path": "detectron2/structures/boxes.py", "snippet": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "create_small_table", "path": "detectron2/utils/logger.py", "snippet": "def create_small_table(small_dict):\n \"\"\"\n Create a small table using the keys of small_dict as headers. This is only\n suitable for small dictionaries.\n\n Args:\n small_dict (dict): a result dictionary of only a few items.\n\n Returns:\n str: the table as a string.\n \"\"\"\n keys, values = tuple(zip(*small_dict.items()))\n table = tabulate(\n [values],\n headers=keys,\n tablefmt=\"pipe\",\n floatfmt=\".3f\",\n stralign=\"center\",\n numalign=\"center\",\n )\n return table" }, { "identifier": "DatasetEvaluator", "path": "detectron2/evaluation/evaluator.py", "snippet": "class DatasetEvaluator:\n \"\"\"\n Base class for a dataset evaluator.\n\n The function :func:`inference_on_dataset` runs the model over\n all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.\n\n This class will accumulate information of the inputs/outputs (by :meth:`process`),\n and produce evaluation results in the end (by :meth:`evaluate`).\n \"\"\"\n\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n def process(self, inputs, outputs):\n \"\"\"\n Process the pair of inputs and outputs.\n If they contain batches, the pairs can be consumed one-by-one using `zip`:\n\n .. code-block:: python\n\n for input_, output in zip(inputs, outputs):\n # do evaluation on single input/output pair\n ...\n\n Args:\n inputs (list): the inputs that's used to call the model.\n outputs (list): the return value of `model(inputs)`\n \"\"\"\n pass\n\n def evaluate(self):\n \"\"\"\n Evaluate/summarize the performance, after processing all input/output pairs.\n\n Returns:\n dict:\n A new evaluator class can return a dict of arbitrary format\n as long as the user can process the results.\n In our train_net.py, we expect the following format:\n\n * key: the name of the task (e.g., bbox)\n * value: a dict of {metric name: score}, e.g.: {\"AP50\": 80}\n \"\"\"\n pass" }, { "identifier": "RefCOCOeval", "path": "detectron2/evaluation/refcocoeval.py", "snippet": "class RefCOCOeval:\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n # for computing overall iou\n self.total_intersection_area = 0\n self.total_union_area = 0\n self.iou_list = []\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n # evaluateImg = self.evaluateImg\n # maxDet = p.maxDets[-1]\n # self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n # for catId in catIds\n # for areaRng in p.areaRng\n # for imgId in p.imgIds\n # ]\n # self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n\n # for computing overall iou\n # there is only one bbox and segm\n if p.iouType == 'bbox':\n g, d = g[0], d[0]\n g_bbox = [g[0], g[1], g[2] + g[0], g[3] + g[1]] # x1y1wh -> x1y1x2y2\n d_bbox = [d[0], d[1], d[2] + d[0], d[3] + d[1]] # x1y1wh -> x1y1x2y2\n g_bbox = torch.tensor(g_bbox).unsqueeze(0)\n d_bbox = torch.tensor(d_bbox).unsqueeze(0)\n iou, intersection, union = compute_bbox_iou(d_bbox, g_bbox)\n elif p.iouType == 'segm':\n g_segm = decode(g[0])\n d_segm = decode(d[0])\n g_segm = torch.tensor(g_segm).unsqueeze(0)\n d_segm = torch.tensor(d_segm).unsqueeze(0)\n iou, intersection, union = compute_mask_iou(d_segm, g_segm)\n else:\n raise Exception('unknown iouType for iou computation')\n iou, intersection, union = iou.item(), intersection.item(), union.item()\n self.total_intersection_area += intersection\n self.total_union_area += union\n self.iou_list.append(iou)\n return ious\n\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()" } ]
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from .evaluator import DatasetEvaluator from detectron2.evaluation.fast_eval_api import COCOeval_opt from detectron2.evaluation.refcocoeval import RefCOCOeval
11,812
# Copyright (c) Facebook, Inc. and its affiliates. try: except ImportError: COCOeval_opt = COCOeval
# Copyright (c) Facebook, Inc. and its affiliates. try: except ImportError: COCOeval_opt = COCOeval
class COCOEvaluator(DatasetEvaluator):
8
2023-12-22 13:31:33+00:00
16k
xhuangcv/humannorm
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n # improve the resolution of DMTET at these steps\n progressive_resolution_steps: Optional[int] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n self.cached_sdf = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # adjust the position of mesh\n if \"full_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.3\n elif \"half_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.1\n elif \"head_only\" in mesh_path:\n mesh.vertices[:,2] = mesh.vertices[:,2] + 0.15\n elif \"t-pose\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.4\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(2000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((40000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n\n sdf_loss: Optional[Float[Tensor, \"*N 1\"]] = None\n if self.cfg.use_sdf_loss and self.cached_sdf is not None:\n selected_points_idx = torch.LongTensor(random.sample(range(points_unscaled.shape[0]), 100000))\n gt_sdf = torch.from_numpy(-self.cached_sdf(points_unscaled[selected_points_idx].cpu().numpy())).to(\n points_unscaled\n )[..., None]\n sdf_loss = F.mse_loss(gt_sdf, sdf[selected_points_idx], reduction='sum')\n return sdf, deformation, sdf_loss\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n\n if global_step >= (self.cfg.start_sdf_loss_step + 1) and self.cached_sdf is None:\n\n from pysdf import SDF\n import trimesh\n\n mesh_v_pos = np.load('.threestudio_cache/mesh_v_pos.npy')\n mesh_t_pos_idx = np.load('.threestudio_cache/mesh_t_pos_idx.npy')\n cached_mesh = trimesh.Trimesh(\n vertices=mesh_v_pos,\n faces=mesh_t_pos_idx,\n )\n self.cached_sdf = SDF(cached_mesh.vertices, cached_mesh.faces)\n\n if self.cfg.progressive_resolution_steps is not None:\n if global_step >= self.cfg.progressive_resolution_steps[0] and self.cfg.isosurface_resolution < 256:\n self.cfg.isosurface_resolution = 256\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n if global_step >= self.cfg.progressive_resolution_steps[1] and self.cfg.isosurface_resolution < 512:\n self.cfg.isosurface_resolution = 512\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n \n setattr(co, 'max_cost', 2.0)\n setattr(po, 'resolution', 4096)\n \n atlas.generate(co, po, verbose=True)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF from tqdm import tqdm import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh
13,963
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone())
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone())
self.isosurface_helper = MarchingTetrahedraHelper(
5
2023-12-23 12:37:48+00:00
16k
Con6924/SPM
evaluate_task.py
[ { "identifier": "config", "path": "src/configs/config.py", "snippet": "PRECISION_TYPES = Literal[\"fp32\", \"fp16\", \"bf16\", \"float32\", \"float16\", \"bfloat16\"]\nclass PretrainedModelConfig(BaseModel):\nclass NetworkConfig(BaseModel):\nclass TrainConfig(BaseModel): \nclass SaveConfig(BaseModel):\nclass LoggingConfig(BaseModel):\nclass InferenceConfig(BaseModel):\nclass OtherConfig(BaseModel):\nclass RootConfig(BaseModel):\ndef parse_precision(precision: str) -> torch.dtype:\ndef load_config_from_yaml(config_path: str) -> RootConfig:" }, { "identifier": "RootConfig", "path": "src/configs/config.py", "snippet": "class RootConfig(BaseModel):\n prompts_file: Optional[str] = None\n \n pretrained_model: PretrainedModelConfig\n\n network: Optional[NetworkConfig] = None\n\n train: Optional[TrainConfig] = None\n\n save: Optional[SaveConfig] = None\n\n logging: Optional[LoggingConfig] = None\n\n inference: Optional[InferenceConfig] = None\n\n other: Optional[OtherConfig] = None" }, { "identifier": "GenerationConfig", "path": "src/configs/generation_config.py", "snippet": "class GenerationConfig(BaseModel):\n prompts: list[str] = []\n negative_prompt: str = \"bad anatomy,watermark,extra digit,signature,worst quality,jpeg artifacts,normal quality,low quality,long neck,lowres,error,blurry,missing fingers,fewer digits,missing arms,text,cropped,Humpbacked,bad hands,username\"\n unconditional_prompt: str = \"\"\n width: int = 512\n height: int = 512\n num_inference_steps: int = 30\n guidance_scale: float = 7.5\n seed: int = 2024\n generate_num: int = 1\n\n save_path: str = None # can be a template, e.g. \"path/to/img_{}.png\",\n # then the generated images will be saved as \"path/to/img_0.png\", \"path/to/img_1.png\", ...\n\n def dict(self):\n results = {}\n for attr in vars(self):\n if not attr.startswith(\"_\"):\n results[attr] = getattr(self, attr)\n return results\n \n @staticmethod\n def fix_format(cfg):\n for k, v in cfg.items():\n if isinstance(v, list):\n cfg[k] = v[0]\n elif isinstance(v, torch.Tensor):\n cfg[k] = v.item()" }, { "identifier": "train_util", "path": "src/engine/train_util.py", "snippet": "UNET_IN_CHANNELS = 4 # Stable Diffusion の in_channels は 4 で固定。XLも同じ。\nVAE_SCALE_FACTOR = 8 # 2 ** (len(vae.config.block_out_channels) - 1) = 8\nUNET_ATTENTION_TIME_EMBED_DIM = 256 # XL\nTEXT_ENCODER_2_PROJECTION_DIM = 1280\nUNET_PROJECTION_CLASS_EMBEDDING_INPUT_DIM = 2816\ndef get_random_noise(\n batch_size: int, height: int, width: int, generator: torch.Generator = None\n) -> torch.Tensor:\ndef apply_noise_offset(latents: torch.FloatTensor, noise_offset: float):\ndef get_initial_latents(\n scheduler: SchedulerMixin,\n n_imgs: int,\n height: int,\n width: int,\n n_prompts: int,\n generator=None,\n) -> torch.Tensor:\ndef text_tokenize(\n tokenizer: CLIPTokenizer, # 普通ならひとつ、XLならふたつ!\n prompts: list[str],\n):\ndef text_encode(text_encoder: CLIPTextModel, tokens):\ndef encode_prompts(\n tokenizer: CLIPTokenizer,\n text_encoder: CLIPTokenizer,\n prompts: list[str],\n return_tokens: bool = False,\n):\ndef text_encode_xl(\n text_encoder: SDXL_TEXT_ENCODER_TYPE,\n tokens: torch.FloatTensor,\n num_images_per_prompt: int = 1,\n):\ndef encode_prompts_xl(\n tokenizers: list[CLIPTokenizer],\n text_encoders: list[SDXL_TEXT_ENCODER_TYPE],\n prompts: list[str],\n num_images_per_prompt: int = 1,\n) -> tuple[torch.FloatTensor, torch.FloatTensor]:\ndef concat_embeddings(\n unconditional: torch.FloatTensor,\n conditional: torch.FloatTensor,\n n_imgs: int,\n):\ndef predict_noise(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n timestep: int, # 現在のタイムステップ\n latents: torch.FloatTensor,\n text_embeddings: torch.FloatTensor, # uncond な text embed と cond な text embed を結合したもの\n guidance_scale=7.5,\n) -> torch.FloatTensor:\ndef diffusion(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n latents: torch.FloatTensor, # ただのノイズだけのlatents\n text_embeddings: torch.FloatTensor,\n total_timesteps: int = 1000,\n start_timesteps=0,\n **kwargs,\n):\ndef rescale_noise_cfg(\n noise_cfg: torch.FloatTensor, noise_pred_text, guidance_rescale=0.0\n):\ndef predict_noise_xl(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n timestep: int, # 現在のタイムステップ\n latents: torch.FloatTensor,\n text_embeddings: torch.FloatTensor, # uncond な text embed と cond な text embed を結合したもの\n add_text_embeddings: torch.FloatTensor, # pooled なやつ\n add_time_ids: torch.FloatTensor,\n guidance_scale=7.5,\n guidance_rescale=0.7,\n) -> torch.FloatTensor:\ndef diffusion_xl(\n unet: UNet2DConditionModel,\n scheduler: SchedulerMixin,\n latents: torch.FloatTensor, # ただのノイズだけのlatents\n text_embeddings: tuple[torch.FloatTensor, torch.FloatTensor],\n add_text_embeddings: torch.FloatTensor, # pooled なやつ\n add_time_ids: torch.FloatTensor,\n guidance_scale: float = 1.0,\n total_timesteps: int = 1000,\n start_timesteps=0,\n):\ndef get_add_time_ids(\n height: int,\n width: int,\n dynamic_crops: bool = False,\n dtype: torch.dtype = torch.float32,\n):\ndef get_optimizer(config, trainable_params):\ndef get_scheduler_fix(config, optimizer: Optimizer, num_processes: int = 1):\n def wrap_check_needless_num_warmup_steps(return_vals):\ndef get_random_resolution_in_bucket(bucket_resolution: int = 512) -> tuple[int, int]:\ndef text2img(pipe: DiffusionPipeline,\n prompts: Union[str, list[str]], \n negative_prompt: Union[str, list[str]] = \"\", \n width: int = 512, \n height: int = 512,\n num_inference_steps: int = 30,\n guidance_scale: int = 7.5,\n seed: int = None,\n generate_num: int = 1,\n tag: str = \"\",\n **kwargs):\ndef latent2img(pipe: DiffusionPipeline,\n scheduler,\n noise_pred: torch.FloatTensor,\n latents: torch.FloatTensor,\n timestep: int,\n tag: str = \"ori\",\n **kwargs):" }, { "identifier": "model_util", "path": "src/models/model_util.py", "snippet": "TOKENIZER_V1_MODEL_NAME = \"CompVis/stable-diffusion-v1-4\"\nTOKENIZER_V2_MODEL_NAME = \"stabilityai/stable-diffusion-2-1\"\nAVAILABLE_SCHEDULERS = Literal[\"ddim\", \"ddpm\", \"lms\", \"euler_a\"]\nSDXL_TEXT_ENCODER_TYPE = Union[CLIPTextModel, CLIPTextModelWithProjection]\nDIFFUSERS_CACHE_DIR = \".cache/\" # if you want to change the cache dir, change this\nLOCAL_ONLY = False # if you want to use only local files, change this\ndef load_diffusers_model(\n pretrained_model_name_or_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel,]:\ndef load_checkpoint_model(\n checkpoint_path: str,\n v2: bool = False,\n clip_skip: Optional[int] = None,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, DiffusionPipeline]:\ndef load_models(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n v2: bool = False,\n v_pred: bool = False,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[CLIPTokenizer, CLIPTextModel, UNet2DConditionModel, SchedulerMixin, DiffusionPipeline, ]:\ndef load_diffusers_model_xl(\n pretrained_model_name_or_path: str,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel,]:\ndef load_checkpoint_model_xl(\n checkpoint_path: str,\n weight_dtype: torch.dtype = torch.float32,\n device = \"cuda\",\n) -> tuple[list[CLIPTokenizer], list[SDXL_TEXT_ENCODER_TYPE], UNet2DConditionModel, DiffusionPipeline, ]:\ndef load_models_xl(\n pretrained_model_name_or_path: str,\n scheduler_name: AVAILABLE_SCHEDULERS,\n weight_dtype: torch.dtype = torch.float32,\n) -> tuple[\ndef create_noise_scheduler(\n scheduler_name: AVAILABLE_SCHEDULERS = \"ddpm\",\n prediction_type: Literal[\"epsilon\", \"v_prediction\"] = \"epsilon\",\n) -> SchedulerMixin:" }, { "identifier": "SPMLayer", "path": "src/models/spm.py", "snippet": "class SPMLayer(nn.Module):\n \"\"\"\n replaces forward method of the original Linear, instead of replacing the original Linear module.\n \"\"\"\n\n def __init__(\n self,\n spm_name,\n org_module: nn.Module,\n multiplier=1.0,\n dim=4,\n alpha=1,\n ):\n \"\"\"if alpha == 0 or None, alpha is rank (no scaling).\"\"\"\n super().__init__()\n self.spm_name = spm_name\n self.dim = dim\n\n if org_module.__class__.__name__ == \"Linear\":\n in_dim = org_module.in_features\n out_dim = org_module.out_features\n self.lora_down = nn.Linear(in_dim, dim, bias=False)\n self.lora_up = nn.Linear(dim, out_dim, bias=False)\n\n elif org_module.__class__.__name__ == \"Conv2d\":\n in_dim = org_module.in_channels\n out_dim = org_module.out_channels\n\n self.dim = min(self.dim, in_dim, out_dim)\n if self.dim != dim:\n print(f\"{spm_name} dim (rank) is changed to: {self.dim}\")\n\n kernel_size = org_module.kernel_size\n stride = org_module.stride\n padding = org_module.padding\n self.lora_down = nn.Conv2d(\n in_dim, self.dim, kernel_size, stride, padding, bias=False\n )\n self.lora_up = nn.Conv2d(self.dim, out_dim, (1, 1), (1, 1), bias=False)\n\n if type(alpha) == torch.Tensor:\n alpha = alpha.detach().numpy()\n alpha = dim if alpha is None or alpha == 0 else alpha\n self.scale = alpha / self.dim\n self.register_buffer(\"alpha\", torch.tensor(alpha))\n\n # same as microsoft's\n nn.init.kaiming_uniform_(self.lora_down.weight, a=math.sqrt(5))\n nn.init.zeros_(self.lora_up.weight)\n\n self.multiplier = multiplier\n self.org_module = org_module # remove in applying\n\n def apply_to(self):\n self.org_forward = self.org_module.forward\n self.org_module.forward = self.forward\n del self.org_module\n\n def forward(self, x):\n return (\n self.org_forward(x)\n + self.lora_up(self.lora_down(x)) * self.multiplier * self.scale\n )" }, { "identifier": "SPMNetwork", "path": "src/models/spm.py", "snippet": "class SPMNetwork(nn.Module):\n UNET_TARGET_REPLACE_MODULE_TRANSFORMER = [\n \"Transformer2DModel\",\n ]\n UNET_TARGET_REPLACE_MODULE_CONV = [\n \"ResnetBlock2D\",\n \"Downsample2D\",\n \"Upsample2D\",\n ]\n\n SPM_PREFIX_UNET = \"lora_unet\" # aligning with SD webui usage\n DEFAULT_TARGET_REPLACE = UNET_TARGET_REPLACE_MODULE_TRANSFORMER\n\n def __init__(\n self,\n unet: UNet2DConditionModel,\n rank: int = 4,\n multiplier: float = 1.0,\n alpha: float = 1.0,\n module = SPMLayer,\n module_kwargs = None,\n ) -> None:\n super().__init__()\n\n self.multiplier = multiplier\n self.dim = rank\n self.alpha = alpha\n\n self.module = module\n self.module_kwargs = module_kwargs or {}\n\n # unet spm\n self.unet_spm_layers = self.create_modules(\n SPMNetwork.SPM_PREFIX_UNET,\n unet,\n SPMNetwork.DEFAULT_TARGET_REPLACE,\n self.dim,\n self.multiplier,\n )\n print(f\"Create SPM for U-Net: {len(self.unet_spm_layers)} modules.\")\n\n spm_names = set()\n for spm_layer in self.unet_spm_layers:\n assert (\n spm_layer.spm_name not in spm_names\n ), f\"duplicated SPM layer name: {spm_layer.spm_name}. {spm_names}\"\n spm_names.add(spm_layer.spm_name)\n\n for spm_layer in self.unet_spm_layers:\n spm_layer.apply_to()\n self.add_module(\n spm_layer.spm_name,\n spm_layer,\n )\n\n del unet\n\n torch.cuda.empty_cache()\n\n def create_modules(\n self,\n prefix: str,\n root_module: nn.Module,\n target_replace_modules: List[str],\n rank: int,\n multiplier: float,\n ) -> list:\n spm_layers = []\n\n for name, module in root_module.named_modules():\n if module.__class__.__name__ in target_replace_modules:\n for child_name, child_module in module.named_modules():\n if child_module.__class__.__name__ in [\"Linear\", \"Conv2d\"]:\n spm_name = prefix + \".\" + name + \".\" + child_name\n spm_name = spm_name.replace(\".\", \"_\")\n print(f\"{spm_name}\")\n spm_layer = self.module(\n spm_name, child_module, multiplier, rank, self.alpha, **self.module_kwargs\n )\n spm_layers.append(spm_layer)\n\n return spm_layers\n\n def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):\n all_params = []\n\n if self.unet_spm_layers:\n params = []\n [params.extend(spm_layer.parameters()) for spm_layer in self.unet_spm_layers]\n param_data = {\"params\": params}\n if default_lr is not None:\n param_data[\"lr\"] = default_lr\n all_params.append(param_data)\n\n return all_params\n\n def save_weights(self, file, dtype=None, metadata: Optional[dict] = None):\n state_dict = self.state_dict()\n\n if dtype is not None:\n for key in list(state_dict.keys()):\n v = state_dict[key]\n v = v.detach().clone().to(\"cpu\").to(dtype)\n state_dict[key] = v\n\n for key in list(state_dict.keys()):\n if not key.startswith(\"lora\"):\n del state_dict[key]\n\n if os.path.splitext(file)[1] == \".safetensors\":\n save_file(state_dict, file, metadata)\n else:\n torch.save(state_dict, file)\n\n def __enter__(self):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 1.0\n\n def __exit__(self, exc_type, exc_value, tb):\n for spm_layer in self.unet_spm_layers:\n spm_layer.multiplier = 0" }, { "identifier": "load_state_dict", "path": "src/models/merge_spm.py", "snippet": "def load_state_dict(file_name, dtype):\n if os.path.splitext(file_name)[1] == \".safetensors\":\n sd = load_file(file_name)\n metadata = load_metadata_from_safetensors(file_name)\n else:\n sd = torch.load(file_name, map_location=\"cpu\")\n metadata = {}\n\n for key in list(sd.keys()):\n if type(sd[key]) == torch.Tensor:\n sd[key] = sd[key].to(dtype)\n\n return sd, metadata" }, { "identifier": "SLDPipeline", "path": "src/misc/sld_pipeline.py", "snippet": "class SLDPipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline for text-to-image generation using Safe Latent Diffusion.\n\n The implementation is based on the [`StableDiffusionPipeline`]\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPFeatureExtractor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n ],\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPFeatureExtractor,\n ):\n super().__init__()\n safety_concept: Optional[str] = 'hate, harassment, violence, suffering, humiliation, harm, suicide, ' \\\n 'sexual, nudity, bodily fluids, blood, obscene gestures, illegal activity, ' \\\n 'drug use, theft, vandalism, weapons, child abuse, brutality, cruelty'\n\n if hasattr(scheduler.config, \"steps_offset\") and scheduler.config.steps_offset != 1:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`\"\n f\" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure \"\n \"to update the config accordingly as leaving `steps_offset` might led to incorrect results\"\n \" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,\"\n \" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`\"\n \" file\"\n )\n deprecate(\"steps_offset!=1\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"steps_offset\"] = 1\n scheduler._internal_dict = FrozenDict(new_config)\n\n if hasattr(scheduler.config, \"clip_sample\") and scheduler.config.clip_sample is True:\n deprecation_message = (\n f\"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`.\"\n \" `clip_sample` should be set to False in the configuration file. Please make sure to update the\"\n \" config accordingly as not setting `clip_sample` in the config might lead to incorrect results in\"\n \" future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very\"\n \" nice if you could open a Pull request for the `scheduler/scheduler_config.json` file\"\n )\n deprecate(\"clip_sample not set\", \"1.0.0\", deprecation_message, standard_warn=False)\n new_config = dict(scheduler.config)\n new_config[\"clip_sample\"] = False\n scheduler._internal_dict = FrozenDict(new_config)\n\n if safety_checker is None:\n logger.warn(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self._safety_text_concept = safety_concept\n\n @property\n def safety_concept(self):\n r\"\"\"\n Getter method for the safety concept used with SLD\n\n Returns:\n `str`:\n The text describing the safety concept\n \"\"\"\n return self._safety_text_concept\n\n @safety_concept.setter\n def safety_concept(self, concept):\n r\"\"\"\n Setter method for the safety concept used with SLD\n\n Args:\n concept (`str`):\n The text of the new safety concept\n \"\"\"\n self._safety_text_concept = concept\n\n def enable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Enable memory efficient attention as implemented in xformers.\n\n When this option is enabled, you should observe lower GPU memory usage and a potential speed up at inference\n time. Speed up at training time is not guaranteed.\n\n Warning: When Memory Efficient Attention and Sliced attention are both enabled, the Memory Efficient Attention\n is used.\n \"\"\"\n self.unet.set_use_memory_efficient_attention_xformers(True)\n\n def disable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Disable memory efficient attention as implemented in xformers.\n \"\"\"\n self.unet.set_use_memory_efficient_attention_xformers(False)\n\n def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = \"auto\"):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,\n `attention_head_dim` must be a multiple of `slice_size`.\n \"\"\"\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = self.unet.config.attention_head_dim // 2\n self.unet.set_attention_slice(slice_size)\n\n def disable_attention_slicing(self):\n r\"\"\"\n Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go\n back to computing attention in one step.\n \"\"\"\n # set slice_size = `None` to disable `attention slicing`\n self.enable_attention_slicing(None)\n\n def enable_sequential_cpu_offload(self):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(\"cuda\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:\n if cpu_offloaded_model is not None:\n cpu_offload(cpu_offloaded_model, device)\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n height: int = 512,\n width: int = 512,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[torch.Generator] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n sld_guidance_scale: Optional[float] = 1000,\n sld_warmup_steps: Optional[int] = 10,\n sld_threshold: Optional[float] = 0.01,\n sld_momentum_scale: Optional[float] = 0.3,\n sld_mom_beta: Optional[float] = 0.4,\n **kwargs,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`):\n The prompt or prompts to guide the image generation.\n height (`int`, *optional*, defaults to 512):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to 512):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored\n if `guidance_scale` is less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator`, *optional*):\n A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation\n deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n sld_guidance_scale (`float`, *optional*, defaults to 1000):\n The guidance scale of safe latent diffusion. If set to be less than 1, safety guidance will be disabled.\n sld_warmup_steps (`int`, *optional*, defaults to 10):\n Number of warmup steps for safety guidance. SLD will only be applied for diffusion steps greater\n than `sld_warmup_steps`.\n sld_threshold (`float`, *optional*, defaults to 0.01):\n Threshold that separates the hyperplane between appropriate and inappropriate images.\n sld_momentum_scale (`float`, *optional*, defaults to 0.3):\n Scale of the SLD momentum to be added to the safety guidance at each diffusion step.\n If set to 0.0 momentum will be disabled. Momentum is already built up during warmup,\n i.e. for diffusion steps smaller than `sld_warmup_steps`.\n sld_mom_beta (`float`, *optional*, defaults to 0.4):\n Defines how safety guidance momentum builds up. `sld_mom_beta` indicates how much of the previous\n momentum will be kept. Momentum is already built up during warmup, i.e. for diffusion steps smaller than\n `sld_warmup_steps`.\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n if isinstance(prompt, str):\n batch_size = 1\n elif isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n enable_safety_guidance = True\n if sld_guidance_scale < 1:\n enable_safety_guidance = False\n logger.warn('You have disabled safety guidance.')\n\n # get prompt text embeddings\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n\n if text_input_ids.shape[-1] > self.tokenizer.model_max_length:\n removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]\n text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]\n\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n bs_embed, seq_len, _ = text_embeddings.shape\n text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)\n text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n max_length = text_input_ids.shape[-1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]\n\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = uncond_embeddings.shape[1]\n uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt, 1)\n uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # Encode the safety concept text\n if enable_safety_guidance:\n safety_concept_input = self.tokenizer(\n [self._safety_text_concept],\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n safety_embeddings = self.text_encoder(safety_concept_input.input_ids.to(self.device))[0]\n\n # duplicate safety embeddings for each generation per prompt, using mps friendly method\n seq_len = safety_embeddings.shape[1]\n safety_embeddings = safety_embeddings.repeat(batch_size, num_images_per_prompt, 1)\n safety_embeddings = safety_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings, safety_embeddings])\n\n else:\n text_embeddings = torch.cat([uncond_embeddings, text_embeddings])\n\n # get the initial random noise unless the user supplied it\n\n # Unlike in other pipelines, latents need to be generated in the target device\n # for 1-to-1 results reproducibility with the CompVis implementation.\n # However this currently doesn't work in `mps`.\n latents_shape = (batch_size * num_images_per_prompt, self.unet.in_channels, height // 8, width // 8)\n latents_dtype = text_embeddings.dtype\n if latents is None:\n if self.device.type == \"mps\":\n # randn does not work reproducibly on mps\n latents = torch.randn(latents_shape, generator=generator, device=\"cpu\", dtype=latents_dtype).to(\n self.device\n )\n else:\n latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)\n else:\n if latents.shape != latents_shape:\n raise ValueError(f\"Unexpected latents shape, got {latents.shape}, expected {latents_shape}\")\n latents = latents.to(self.device)\n\n # set timesteps\n self.scheduler.set_timesteps(num_inference_steps)\n timesteps = self.scheduler.timesteps\n\n # Some schedulers like PNDM have timesteps as arrays\n # It's more optimized to move all timesteps to correct device beforehand\n timesteps_tensor = self.scheduler.timesteps.to(self.device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n\n safety_momentum = None\n\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * (3 if enable_safety_guidance else 2)) \\\n if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_out = noise_pred.chunk((3 if enable_safety_guidance else 2))\n noise_pred_uncond, noise_pred_text = noise_pred_out[0], noise_pred_out[1]\n\n # default classifier free guidance\n noise_guidance = (noise_pred_text - noise_pred_uncond)\n\n # Perform SLD guidance\n if enable_safety_guidance:\n if safety_momentum is None:\n safety_momentum = torch.zeros_like(noise_guidance)\n noise_pred_safety_concept = noise_pred_out[2]\n\n # Equation 6\n scale = torch.clamp(\n torch.abs((noise_pred_text - noise_pred_safety_concept)) * sld_guidance_scale, max=1.)\n\n # Equation 6\n safety_concept_scale = torch.where(\n (noise_pred_text - noise_pred_safety_concept) >= sld_threshold,\n torch.zeros_like(scale), scale)\n\n # Equation 4\n noise_guidance_safety = torch.mul(\n (noise_pred_safety_concept - noise_pred_uncond), safety_concept_scale)\n\n # Equation 7\n noise_guidance_safety = noise_guidance_safety + sld_momentum_scale * safety_momentum\n\n # Equation 8\n safety_momentum = sld_mom_beta * safety_momentum + (1 - sld_mom_beta) * noise_guidance_safety\n\n if i >= sld_warmup_steps: # Warmup\n # Equation 3\n noise_guidance = noise_guidance - noise_guidance_safety\n\n noise_pred = noise_pred_uncond + guidance_scale * noise_guidance\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n latents = 1 / 0.18215 * latents\n image = self.vae.decode(latents).sample\n\n image = (image / 2 + 0.5).clamp(0, 1)\n\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n\n if self.safety_checker is not None:\n safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors=\"pt\").to(\n self.device\n )\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)\n )\n else:\n has_nsfw_concept = None\n\n if output_type == \"pil\":\n image = self.numpy_to_pil(image)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return SLDPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept,\n applied_safety_concept=self._safety_text_concept if enable_safety_guidance else None)" } ]
import argparse import gc import warnings import torch from pathlib import Path from typing import Literal from torch.utils.data import DataLoader from accelerate import PartialState, Accelerator from src.configs import config from src.configs.config import RootConfig from src.configs.generation_config import GenerationConfig from src.engine import train_util from src.evaluation import * from src.models import model_util from src.models.spm import SPMLayer, SPMNetwork from src.models.merge_spm import load_state_dict from src.misc.sld_pipeline import SLDPipeline
11,759
def get_dataloader(args, num_processes=1): # parse task_args arguments task_args = parse_extra_args(args.task_args) task_args["save_folder"] = args.img_save_path task_args["output_path"] = args.save_path # parse generation arguments cfg = parse_extra_args(args.generation_cfg) cfg = GenerationConfig(**cfg) dataset_class = None if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model( base_model, v2=v2, weight_dtype=weight_dtype, device=distributed_state.device, ) special_token_ids = set( tokenizer.convert_tokens_to_ids(tokenizer.special_tokens_map.values()) ) text_encoder.to(distributed_state.device, dtype=weight_dtype) text_encoder.eval() unet.to(distributed_state.device, dtype=weight_dtype) unet.enable_xformers_memory_efficient_attention() unet.requires_grad_(False) unet.eval() if len(spm_model_paths) > 0: # load the SPM models spms, metadatas = zip( *[
DIFFUSERS_CACHE_DIR = ".cache/" UNET_NAME = "unet" TEXT_ENCODER_NAME = "text_encoder" MATCHING_METRICS = Literal[ "clipcos", "clipcos_tokenuni", "tokenuni", "allone", ] distributed_state = PartialState() accelerator = Accelerator() def flush(): torch.cuda.empty_cache() gc.collect() def parse_extra_args(extra_args): if extra_args is None or extra_args == ['']: return {} extra_args_dict = {} for extra_arg in extra_args: key, value = extra_arg.split("=") # convert value to various types if value.isdigit(): value = int(value) elif value.replace(".", "", 1).isdigit(): value = float(value) elif value[0] == "[" and value[-1] == "]": value = [i.replace('+', ' ') for i in value[1:-1].split(",")] value = [v.strip() for v in value] if value[0].isdigit(): value = [int(v) for v in value] elif value[0].replace(".", "", 1).isdigit(): value = [float(v) for v in value] extra_args_dict[key] = value return extra_args_dict def get_dataloader(args, num_processes=1): # parse task_args arguments task_args = parse_extra_args(args.task_args) task_args["save_folder"] = args.img_save_path task_args["output_path"] = args.save_path # parse generation arguments cfg = parse_extra_args(args.generation_cfg) cfg = GenerationConfig(**cfg) dataset_class = None if args.task == "general": dataset_class = ClipTemplateDataset elif args.task == "artwork": dataset_class = ArtworkDataset elif args.task == "i2p": dataset_class = I2PDataset elif args.task == "coco": dataset_class = Coco30kGenerationDataset else: raise ValueError(f"Unknown task: {args.task}") dataset = dataset_class(**task_args, base_cfg=cfg) dataloader = DataLoader(dataset, batch_size=num_processes, num_workers=0, shuffle=False) return dataloader def get_evaluator(args): evaluator_class = None if args.task == "general": evaluator_class = ClipEvaluator elif args.task == "artwork": evaluator_class = ArtworkEvaluator elif args.task == "i2p": evaluator_class = I2PEvaluator elif args.task == "coco": evaluator_class = CocoEvaluator else: raise ValueError(f"Unknown task: {args.task}") evaluator = evaluator_class( save_folder=args.img_save_path, output_path=args.save_path ) return evaluator def calculate_matching_score( prompt_tokens, prompt_embeds, erased_prompt_tokens, erased_prompt_embeds, matching_metric: MATCHING_METRICS, special_token_ids: set[int], weight_dtype: torch.dtype = torch.float32, ): scores = [] if "allone" in matching_metric: scores.append(torch.ones(prompt_embeds.shape[0]).to("cpu", dtype=weight_dtype)) if "clipcos" in matching_metric: clipcos = torch.cosine_similarity( prompt_embeds.flatten(1, 2), erased_prompt_embeds.flatten(1, 2), dim=-1 ).cpu() scores.append(clipcos) if "tokenuni" in matching_metric: prompt_set = set(prompt_tokens[0].tolist()) - special_token_ids tokenuni = [] for ep in erased_prompt_tokens: ep_set = set(ep.tolist()) - special_token_ids tokenuni.append(len(prompt_set.intersection(ep_set)) / len(ep_set)) scores.append(torch.tensor(tokenuni).to("cpu", dtype=weight_dtype)) return torch.max(torch.stack(scores), dim=0)[0] @torch.no_grad() def infer_with_spm( dataloader: DataLoader, spm_paths: list[str], matching_metric: MATCHING_METRICS, facilitate_factor: float = 1.0, assigned_multipliers: list[float] = None, finetuned_model_path: str = None, sld_target_concept: str = None, base_model: str = "CompVis/stable-diffusion-v1-4", v2: bool = False, precision: str = "fp32", ): spm_model_paths = [ lp / f"{lp.name}_last.safetensors" if lp.is_dir() else lp for lp in spm_paths ] weight_dtype = config.parse_precision(precision) if finetuned_model_path is not None and Path(finetuned_model_path).is_dir(): # folder path for the diffuser model base_model = finetuned_model_path print(f"Using models from {base_model}") # load the pretrained SD tokenizer, text_encoder, unet, pipe = model_util.load_checkpoint_model( base_model, v2=v2, weight_dtype=weight_dtype, device=distributed_state.device, ) special_token_ids = set( tokenizer.convert_tokens_to_ids(tokenizer.special_tokens_map.values()) ) text_encoder.to(distributed_state.device, dtype=weight_dtype) text_encoder.eval() unet.to(distributed_state.device, dtype=weight_dtype) unet.enable_xformers_memory_efficient_attention() unet.requires_grad_(False) unet.eval() if len(spm_model_paths) > 0: # load the SPM models spms, metadatas = zip( *[
load_state_dict(spm_model_path, weight_dtype)
7
2023-12-26 03:19:16+00:00
16k
dakpinaroglu/Frame2seq
frame2seq/openfold/model/structure_module.py
[ { "identifier": "Linear", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n init: str = \"default\",\n init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n init:\n The initializer to use. Choose from:\n\n \"default\": LeCun fan-in truncated normal initialization\n \"relu\": He initialization w/ truncated normal distribution\n \"glorot\": Fan-average Glorot uniform initialization\n \"gating\": Weights=0, Bias=1\n \"normal\": Normal initialization with std=1/sqrt(fan_in)\n \"final\": Weights=0, Bias=0\n\n Overridden by init_fn if the latter is not None.\n init_fn:\n A custom initializer taking weight and bias as inputs.\n Overrides init if not None.\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)\n\n with torch.no_grad():\n if init_fn is not None:\n init_fn(self.weight, self.bias)\n else:\n if init == \"default\":\n lecun_normal_init_(self.weight)\n elif init == \"relu\":\n he_normal_init_(self.weight)\n elif init == \"glorot\":\n glorot_uniform_init_(self.weight)\n elif init == \"gating\":\n gating_init_(self.weight)\n if bias:\n self.bias.fill_(1.0)\n elif init == \"normal\":\n normal_init_(self.weight)\n elif init == \"final\":\n final_init_(self.weight)\n else:\n raise ValueError(\"Invalid init string.\")" }, { "identifier": "LayerNorm", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n d = x.dtype\n # deepspeed_is_initialized = (\n # deepspeed_is_installed and \n # deepspeed.utils.is_initialized()\n # )\n # if(d is torch.bfloat16 and not deepspeed_is_initialized):\n # with torch.cuda.amp.autocast(enabled=False):\n # out = nn.functional.layer_norm(\n # x, \n # self.c_in, \n # self.weight.to(dtype=d), \n # self.bias.to(dtype=d), \n # self.eps\n # )\n # else:\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "ipa_point_weights_init_", "path": "frame2seq/openfold/model/primitives.py", "snippet": "def ipa_point_weights_init_(weights):\n with torch.no_grad():\n softplus_inverse_1 = 0.541324854612918\n weights.fill_(softplus_inverse_1)" }, { "identifier": "restype_rigid_group_default_frame", "path": "frame2seq/openfold/np/residue_constants.py", "snippet": "def load_stereo_chemical_props() -> Tuple[\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(\n sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False\n) -> np.ndarray:\ndef _make_standard_atom_mask() -> np.ndarray:\ndef chi_angle_atom(atom_index: int) -> np.ndarray:\ndef _make_rigid_transformation_4x4(ex, ey, translation):\ndef _make_rigid_group_constants():\ndef make_atom14_dists_bounds(\n overlap_tolerance=1.5, bond_length_tolerance_factor=15\n):\ndef _make_atom14_ambiguity_feats():\ndef aatype_to_str_sequence(aatype):\nHHBLITS_AA_TO_ID = {\n \"A\": 0,\n \"B\": 2,\n \"C\": 1,\n \"D\": 2,\n \"E\": 3,\n \"F\": 4,\n \"G\": 5,\n \"H\": 6,\n \"I\": 7,\n \"J\": 20,\n \"K\": 8,\n \"L\": 9,\n \"M\": 10,\n \"N\": 11,\n \"O\": 20,\n \"P\": 12,\n \"Q\": 13,\n \"R\": 14,\n \"S\": 15,\n \"T\": 16,\n \"U\": 1,\n \"V\": 17,\n \"W\": 18,\n \"X\": 20,\n \"Y\": 19,\n \"Z\": 3,\n \"-\": 21,\n}\nID_TO_HHBLITS_AA = {\n 0: \"A\",\n 1: \"C\", # Also U.\n 2: \"D\", # Also B.\n 3: \"E\", # Also Z.\n 4: \"F\",\n 5: \"G\",\n 6: \"H\",\n 7: \"I\",\n 8: \"K\",\n 9: \"L\",\n 10: \"M\",\n 11: \"N\",\n 12: \"P\",\n 13: \"Q\",\n 14: \"R\",\n 15: \"S\",\n 16: \"T\",\n 17: \"V\",\n 18: \"W\",\n 19: \"Y\",\n 20: \"X\", # Includes J and O.\n 21: \"-\",\n}\nMAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(\n restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])\n for i in range(len(restypes_with_x_and_gap))\n)\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()" }, { "identifier": "frames_and_literature_positions_to_atom14_pos", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def frames_and_literature_positions_to_atom14_pos(\n r: Rigid,\n aatype: torch.Tensor,\n default_frames,\n group_idx,\n atom_mask,\n lit_positions,\n):\n # [*, N, 14, 4, 4]\n default_4x4 = default_frames[aatype, ...]\n\n # [*, N, 14]\n group_mask = group_idx[aatype, ...]\n\n # [*, N, 14, 8]\n group_mask = nn.functional.one_hot(\n group_mask,\n num_classes=default_frames.shape[-3],\n )\n\n # [*, N, 14, 8]\n t_atoms_to_global = r[..., None, :] * group_mask\n\n # [*, N, 14]\n t_atoms_to_global = t_atoms_to_global.map_tensor_fn(\n lambda x: torch.sum(x, dim=-1)\n )\n\n # [*, N, 14, 1]\n atom_mask = atom_mask[aatype, ...].unsqueeze(-1)\n\n # [*, N, 14, 3]\n lit_positions = lit_positions[aatype, ...]\n pred_positions = t_atoms_to_global.apply(lit_positions)\n pred_positions = pred_positions * atom_mask\n\n return pred_positions" }, { "identifier": "torsion_angles_to_frames", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def torsion_angles_to_frames(\n r: Rigid,\n alpha: torch.Tensor,\n aatype: torch.Tensor,\n rrgdf: torch.Tensor,\n):\n # [*, N, 8, 4, 4]\n default_4x4 = rrgdf[aatype, ...]\n\n # [*, N, 8] transformations, i.e.\n # One [*, N, 8, 3, 3] rotation matrix and\n # One [*, N, 8, 3] translation matrix\n default_r = r.from_tensor_4x4(default_4x4)\n\n bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))\n bb_rot[..., 1] = 1\n\n # [*, N, 8, 2]\n alpha = torch.cat(\n [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2\n )\n\n # [*, N, 8, 3, 3]\n # Produces rotation matrices of the form:\n # [\n # [1, 0 , 0 ],\n # [0, a_2,-a_1],\n # [0, a_1, a_2]\n # ]\n # This follows the original code rather than the supplement, which uses\n # different indices.\n\n all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)\n all_rots[..., 0, 0] = 1\n all_rots[..., 1, 1] = alpha[..., 1]\n all_rots[..., 1, 2] = -alpha[..., 0]\n all_rots[..., 2, 1:] = alpha\n\n all_rots = Rigid(Rotation(rot_mats=all_rots), None)\n\n all_frames = default_r.compose(all_rots)\n\n chi2_frame_to_frame = all_frames[..., 5]\n chi3_frame_to_frame = all_frames[..., 6]\n chi4_frame_to_frame = all_frames[..., 7]\n\n chi1_frame_to_bb = all_frames[..., 4]\n chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)\n chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)\n chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)\n\n all_frames_to_bb = Rigid.cat(\n [\n all_frames[..., :5],\n chi2_frame_to_bb.unsqueeze(-1),\n chi3_frame_to_bb.unsqueeze(-1),\n chi4_frame_to_bb.unsqueeze(-1),\n ],\n dim=-1,\n )\n\n all_frames_to_global = r[..., None].compose(all_frames_to_bb)\n\n return all_frames_to_global" }, { "identifier": "is_fp16_enabled", "path": "frame2seq/openfold/utils/precision_utils.py", "snippet": "def is_fp16_enabled():\n # Autocast world\n try:\n fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16\n fp16_enabled = fp16_enabled and torch.is_autocast_enabled()\n except AttributeError:\n fp16_enabled = False\n\n return fp16_enabled" }, { "identifier": "Rotation", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rotation:\n \"\"\"\n A 3D rotation. Depending on how the object is initialized, the\n rotation is represented by either a rotation matrix or a\n quaternion, though both formats are made available by helper functions.\n To simplify gradient computation, the underlying format of the\n rotation cannot be changed in-place. Like Rigid, the class is designed\n to mimic the behavior of a torch Tensor, almost as if each Rotation\n object were a tensor of rotations, in one format or another.\n \"\"\"\n def __init__(self,\n rot_mats: Optional[torch.Tensor] = None,\n quats: Optional[torch.Tensor] = None,\n normalize_quats: bool = True,\n ):\n \"\"\"\n Args:\n rot_mats:\n A [*, 3, 3] rotation matrix tensor. Mutually exclusive with\n quats\n quats:\n A [*, 4] quaternion. Mutually exclusive with rot_mats. If\n normalize_quats is not True, must be a unit quaternion\n normalize_quats:\n If quats is specified, whether to normalize quats\n \"\"\"\n if((rot_mats is None and quats is None) or \n (rot_mats is not None and quats is not None)):\n raise ValueError(\"Exactly one input argument must be specified\")\n\n if((rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or \n (quats is not None and quats.shape[-1] != 4)):\n raise ValueError(\n \"Incorrectly shaped rotation matrix or quaternion\"\n )\n\n # Force full-precision\n if(quats is not None):\n quats = quats.to(dtype=torch.float32)\n if(rot_mats is not None):\n rot_mats = rot_mats.to(dtype=torch.float32)\n\n if(quats is not None and normalize_quats):\n quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n\n self._rot_mats = rot_mats\n self._quats = quats\n\n @staticmethod\n def identity(\n shape,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rotation:\n \"\"\"\n Returns an identity Rotation.\n\n Args:\n shape:\n The \"shape\" of the resulting Rotation object. See documentation\n for the shape property\n dtype:\n The torch dtype for the rotation\n device:\n The torch device for the new rotation\n requires_grad:\n Whether the underlying tensors in the new rotation object\n should require gradient computation\n fmt:\n One of \"quat\" or \"rot_mat\". Determines the underlying format\n of the new object's rotation \n Returns:\n A new identity rotation\n \"\"\"\n if(fmt == \"rot_mat\"):\n rot_mats = identity_rot_mats(\n shape, dtype, device, requires_grad,\n )\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(fmt == \"quat\"):\n quats = identity_quats(shape, dtype, device, requires_grad)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(f\"Invalid format: f{fmt}\")\n\n # Magic methods\n\n def __getitem__(self, index: Any) -> Rotation:\n \"\"\"\n Allows torch-style indexing over the virtual shape of the rotation\n object. See documentation for the shape property.\n\n Args:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n Returns:\n The indexed rotation\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif(self._quats is not None):\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Pointwise left multiplication of the rotation with a tensor. Can be\n used to e.g. mask the Rotation.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats * right[..., None, None]\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats * right[..., None]\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Reverse pointwise multiplication of the rotation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n \n # Properties\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the virtual shape of the rotation object. This shape is\n defined as the batch dimensions of the underlying rotation matrix\n or quaternion. If the Rotation was initialized with a [10, 3, 3]\n rotation matrix tensor, for example, the resulting shape would be\n [10].\n \n Returns:\n The virtual shape of the rotation object\n \"\"\"\n s = None\n if(self._quats is not None):\n s = self._quats.shape[:-1]\n else:\n s = self._rot_mats.shape[:-2]\n\n return s\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Returns the dtype of the underlying rotation.\n\n Returns:\n The dtype of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.dtype\n elif(self._quats is not None):\n return self._quats.dtype\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n The device of the underlying rotation\n\n Returns:\n The device of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.device\n elif(self._quats is not None):\n return self._quats.device\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def requires_grad(self) -> bool:\n \"\"\"\n Returns the requires_grad property of the underlying rotation\n\n Returns:\n The requires_grad property of the underlying tensor\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.requires_grad\n elif(self._quats is not None):\n return self._quats.requires_grad\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_rot_mats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a rotation matrix tensor.\n\n Returns:\n The rotation as a rotation matrix tensor\n \"\"\"\n rot_mats = self._rot_mats\n if(rot_mats is None):\n if(self._quats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n rot_mats = quat_to_rot(self._quats)\n\n return rot_mats \n\n def get_quats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a quaternion tensor.\n\n Depending on whether the Rotation was initialized with a\n quaternion, this function may call torch.linalg.eigh.\n\n Returns:\n The rotation as a quaternion tensor.\n \"\"\"\n quats = self._quats\n if(quats is None):\n if(self._rot_mats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n quats = rot_to_quat(self._rot_mats)\n\n return quats\n\n def get_cur_rot(self) -> torch.Tensor:\n \"\"\"\n Return the underlying rotation in its current form\n\n Returns:\n The stored rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats\n elif(self._quats is not None):\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n # Rotation functions\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor, \n normalize_quats: bool = True\n ) -> Rotation:\n \"\"\"\n Returns a new quaternion Rotation after updating the current\n object's underlying rotation with a quaternion update, formatted\n as a [*, 3] tensor whose final three columns represent x, y, z such \n that (1, x, y, z) is the desired (not necessarily unit) quaternion\n update.\n\n Args:\n q_update_vec:\n A [*, 3] quaternion update tensor\n normalize_quats:\n Whether to normalize the output quaternion\n Returns:\n An updated Rotation\n \"\"\"\n quats = self.get_quats()\n new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n return Rotation(\n rot_mats=None, \n quats=new_quats, \n normalize_quats=normalize_quats,\n )\n\n def compose_r(self, r: Rotation) -> Rotation:\n \"\"\"\n Compose the rotation matrices of the current Rotation object with\n those of another.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n r1 = self.get_rot_mats()\n r2 = r.get_rot_mats()\n new_rot_mats = rot_matmul(r1, r2)\n return Rotation(rot_mats=new_rot_mats, quats=None)\n\n def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Compose the quaternions of the current Rotation object with those\n of another.\n\n Depending on whether either Rotation was initialized with\n quaternions, this function may call torch.linalg.eigh.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n q1 = self.get_quats()\n q2 = r.get_quats()\n new_quats = quat_multiply(q1, q2)\n return Rotation(\n rot_mats=None, quats=new_quats, normalize_quats=normalize_quats\n )\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply the current Rotation as a rotation matrix to a set of 3D\n coordinates.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The inverse of the apply() method.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] inverse-rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n inv_rot_mats = invert_rot_mat(rot_mats) \n return rot_vec_mul(inv_rot_mats, pts)\n\n def invert(self) -> Rotation:\n \"\"\"\n Returns the inverse of the current Rotation.\n\n Returns:\n The inverse of the current Rotation\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=invert_rot_mat(self._rot_mats), \n quats=None\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None,\n quats=invert_quat(self._quats),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n # \"Tensor\" stuff\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shape of the Rotation object.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed Rotation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n @staticmethod\n def cat(\n rs: Sequence[Rotation], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates rotations along one of the batch dimensions. Analogous\n to torch.cat().\n\n Note that the output of this operation is always a rotation matrix,\n regardless of the format of input rotations.\n\n Args:\n rs: \n A list of rotation objects\n dim: \n The dimension along which the rotations should be \n concatenated\n Returns:\n A concatenated Rotation object in rotation matrix format\n \"\"\"\n rot_mats = [r.get_rot_mats() for r in rs]\n rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)\n\n return Rotation(rot_mats=rot_mats, quats=None) \n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rotation:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying rotation tensors,\n mapping over the rotation dimension(s). Can be used e.g. to sum out\n a one-hot batch dimension.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rotation \n Returns:\n The transformed Rotation object\n \"\"\" \n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))\n rot_mats = torch.stack(\n list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1\n )\n rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = torch.stack(\n list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1\n )\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n \n def cuda(self) -> Rotation:\n \"\"\"\n Analogous to the cuda() method of torch Tensors\n\n Returns:\n A copy of the Rotation in CUDA memory\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.cuda(),\n normalize_quats=False\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def to(self, \n device: Optional[torch.device], \n dtype: Optional[torch.dtype]\n ) -> Rotation:\n \"\"\"\n Analogous to the to() method of torch Tensors\n\n Args:\n device:\n A torch device\n dtype:\n A torch dtype\n Returns:\n A copy of the Rotation using the new device and dtype\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=self._rot_mats.to(device=device, dtype=dtype), \n quats=None,\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.to(device=device, dtype=dtype),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def detach(self) -> Rotation:\n \"\"\"\n Returns a copy of the Rotation whose underlying Tensor has been\n detached from its torch graph.\n\n Returns:\n A copy of the Rotation whose underlying Tensor has been detached\n from its torch graph\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.detach(), \n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")" }, { "identifier": "Rigid", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" } ]
from functools import reduce from operator import mul from typing import Optional, Tuple, Sequence from frame2seq.openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_ from frame2seq.openfold.np.residue_constants import ( restype_rigid_group_default_frame, restype_atom14_to_rigid_group, restype_atom14_mask, restype_atom14_rigid_group_positions, ) from frame2seq.openfold.utils.feats import ( frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames, ) from frame2seq.openfold.utils.precision_utils import is_fp16_enabled from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) import importlib import math import sys import torch import torch.nn as nn
14,146
mask=None, inplace_safe=False, _offload_inference=False, ): """ Args: evoformer_output_dict: Dictionary containing: "single": [*, N_res, C_s] single representation "pair": [*, N_res, N_res, C_z] pair representation aatype: [*, N_res] amino acid indices mask: Optional [*, N_res] sequence mask Returns: A dictionary of outputs """ s = evoformer_output_dict["single"] if mask is None: # [*, N] mask = s.new_ones(s.shape[:-1]) # [*, N, C_s] s = self.layer_norm_s(s) # [*, N, N, C_z] z = self.layer_norm_z(evoformer_output_dict["pair"]) z_reference_list = None if(_offload_inference): assert(sys.getrefcount(evoformer_output_dict["pair"]) == 2) evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu() z_reference_list = [z] z = None # [*, N, C_s] s_initial = s s = self.linear_in(s) # [*, N] rigids = Rigid.identity( s.shape[:-1], s.dtype, s.device, self.training, fmt="quat", ) outputs = [] for i in range(self.no_blocks): # [*, N, C_s] s = s + self.ipa( s, z, rigids, mask, inplace_safe=inplace_safe, _offload_inference=_offload_inference, _z_reference_list=z_reference_list ) s = self.ipa_dropout(s) s = self.layer_norm_ipa(s) s = self.transition(s) # [*, N] rigids = rigids.compose_q_update_vec(self.bb_update(s)) # To hew as closely as possible to AlphaFold, we convert our # quaternion-based transformations to rotation-matrix ones # here backb_to_global = Rigid( Rotation( rot_mats=rigids.get_rots().get_rot_mats(), quats=None ), rigids.get_trans(), ) backb_to_global = backb_to_global.scale_translation( self.trans_scale_factor ) # [*, N, 7, 2] unnormalized_angles, angles = self.angle_resnet(s, s_initial) all_frames_to_global = self.torsion_angles_to_frames( backb_to_global, angles, aatype, ) pred_xyz = self.frames_and_literature_positions_to_atom14_pos( all_frames_to_global, aatype, ) scaled_rigids = rigids.scale_translation(self.trans_scale_factor) preds = { "frames": scaled_rigids.to_tensor_7(), "sidechain_frames": all_frames_to_global.to_tensor_4x4(), "unnormalized_angles": unnormalized_angles, "angles": angles, "positions": pred_xyz, "states": s, } outputs.append(preds) rigids = rigids.stop_rot_gradient() del z, z_reference_list if(_offload_inference): evoformer_output_dict["pair"] = ( evoformer_output_dict["pair"].to(s.device) )
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu") self.linear_2 = Linear(self.c_hidden, self.c_hidden, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ Implements Algorithm 20, lines 11-14 """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # NOTE: The ReLU's applied to the inputs are absent from the supplement # pseudocode but present in the source. For maximal compatibility with # the pretrained weights, I'm going with the source. # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res] if(is_fp16_enabled()): with torch.cuda.amp.autocast(enabled=False): a = torch.matmul( permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res] ) else: a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.c_hidden)) a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) if(inplace_safe): pt_att *= pt_att else: pt_att = pt_att ** 2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view( *((1,) * len(pt_att.shape[:-2]) + (-1, 1)) ) head_weights = head_weights * math.sqrt( 1.0 / (3 * (self.no_qk_points * 9.0 / 2)) ) if(inplace_safe): pt_att *= head_weights else: pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.inf * (square_mask - 1) """ Frame2seq implementation of IPA regularization via attention dropout """ if attn_drop_rate > 0.0: random_square_mask = torch.rand(square_mask.shape, device=square_mask.device) random_square_mask = self.inf * -1 * (random_square_mask < attn_drop_rate) square_mask += random_square_mask # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) if(inplace_safe): a += pt_att del pt_att a += square_mask.unsqueeze(-3) # in-place softmax attn_core_inplace_cuda.forward_( a, reduce(mul, a.shape[:-1]), a.shape[-1], ) else: a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) ################ # Compute output ################ # [*, N_res, H, C_hidden] o = torch.matmul( a, v.transpose(-2, -3).to(dtype=a.dtype) ).transpose(-2, -3) # [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] if(inplace_safe): v_pts = permute_final_dims(v_pts, (1, 3, 0, 2)) o_pt = [ torch.matmul(a, v.to(a.dtype)) for v in torch.unbind(v_pts, dim=-3) ] o_pt = torch.stack(o_pt, dim=-3) else: o_pt = torch.sum( ( a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :] ), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims( torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2 ) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if(_offload_inference): z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat( (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1 ).to(dtype=z[0].dtype) ) return s class BackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, c_s): """ Args: c_s: Single representation channel dimension """ super(BackboneUpdate, self).__init__() self.c_s = c_s self.linear = Linear(self.c_s, 6, init="final") def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class StructureModuleTransitionLayer(nn.Module): def __init__(self, c): super(StructureModuleTransitionLayer, self).__init__() self.c = c self.linear_1 = Linear(self.c, self.c, init="relu") self.linear_2 = Linear(self.c, self.c, init="relu") self.linear_3 = Linear(self.c, self.c, init="final") self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class StructureModuleTransition(nn.Module): def __init__(self, c, num_layers, dropout_rate): super(StructureModuleTransition, self).__init__() self.c = c self.num_layers = num_layers self.dropout_rate = dropout_rate self.layers = nn.ModuleList() for _ in range(self.num_layers): l = StructureModuleTransitionLayer(self.c) self.layers.append(l) self.dropout = nn.Dropout(self.dropout_rate) self.layer_norm = LayerNorm(self.c) def forward(self, s): for l in self.layers: s = l(s) s = self.dropout(s) s = self.layer_norm(s) return s class StructureModule(nn.Module): def __init__( self, c_s, c_z, c_ipa, c_resnet, no_heads_ipa, no_qk_points, no_v_points, dropout_rate, no_blocks, no_transition_layers, no_resnet_blocks, no_angles, trans_scale_factor, epsilon, inf, **kwargs, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_ipa: IPA hidden channel dimension c_resnet: Angle resnet (Alg. 23 lines 11-14) hidden channel dimension no_heads_ipa: Number of IPA heads no_qk_points: Number of query/key points to generate during IPA no_v_points: Number of value points to generate during IPA dropout_rate: Dropout rate used throughout the layer no_blocks: Number of structure module blocks no_transition_layers: Number of layers in the single representation transition (Alg. 23 lines 8-9) no_resnet_blocks: Number of blocks in the angle resnet no_angles: Number of angles to generate in the angle resnet trans_scale_factor: Scale of single representation transition hidden dimension epsilon: Small number used in angle resnet normalization inf: Large number used for attention masking """ super(StructureModule, self).__init__() self.c_s = c_s self.c_z = c_z self.c_ipa = c_ipa self.c_resnet = c_resnet self.no_heads_ipa = no_heads_ipa self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.dropout_rate = dropout_rate self.no_blocks = no_blocks self.no_transition_layers = no_transition_layers self.no_resnet_blocks = no_resnet_blocks self.no_angles = no_angles self.trans_scale_factor = trans_scale_factor self.epsilon = epsilon self.inf = inf # Buffers to be lazily initialized later # self.default_frames # self.group_idx # self.atom_mask # self.lit_positions self.layer_norm_s = LayerNorm(self.c_s) self.layer_norm_z = LayerNorm(self.c_z) self.linear_in = Linear(self.c_s, self.c_s) self.ipa = InvariantPointAttention( self.c_s, self.c_z, self.c_ipa, self.no_heads_ipa, self.no_qk_points, self.no_v_points, inf=self.inf, eps=self.epsilon, ) self.ipa_dropout = nn.Dropout(self.dropout_rate) self.layer_norm_ipa = LayerNorm(self.c_s) self.transition = StructureModuleTransition( self.c_s, self.no_transition_layers, self.dropout_rate, ) self.bb_update = BackboneUpdate(self.c_s) self.angle_resnet = AngleResnet( self.c_s, self.c_resnet, self.no_resnet_blocks, self.no_angles, self.epsilon, ) def forward( self, evoformer_output_dict, aatype, mask=None, inplace_safe=False, _offload_inference=False, ): """ Args: evoformer_output_dict: Dictionary containing: "single": [*, N_res, C_s] single representation "pair": [*, N_res, N_res, C_z] pair representation aatype: [*, N_res] amino acid indices mask: Optional [*, N_res] sequence mask Returns: A dictionary of outputs """ s = evoformer_output_dict["single"] if mask is None: # [*, N] mask = s.new_ones(s.shape[:-1]) # [*, N, C_s] s = self.layer_norm_s(s) # [*, N, N, C_z] z = self.layer_norm_z(evoformer_output_dict["pair"]) z_reference_list = None if(_offload_inference): assert(sys.getrefcount(evoformer_output_dict["pair"]) == 2) evoformer_output_dict["pair"] = evoformer_output_dict["pair"].cpu() z_reference_list = [z] z = None # [*, N, C_s] s_initial = s s = self.linear_in(s) # [*, N] rigids = Rigid.identity( s.shape[:-1], s.dtype, s.device, self.training, fmt="quat", ) outputs = [] for i in range(self.no_blocks): # [*, N, C_s] s = s + self.ipa( s, z, rigids, mask, inplace_safe=inplace_safe, _offload_inference=_offload_inference, _z_reference_list=z_reference_list ) s = self.ipa_dropout(s) s = self.layer_norm_ipa(s) s = self.transition(s) # [*, N] rigids = rigids.compose_q_update_vec(self.bb_update(s)) # To hew as closely as possible to AlphaFold, we convert our # quaternion-based transformations to rotation-matrix ones # here backb_to_global = Rigid( Rotation( rot_mats=rigids.get_rots().get_rot_mats(), quats=None ), rigids.get_trans(), ) backb_to_global = backb_to_global.scale_translation( self.trans_scale_factor ) # [*, N, 7, 2] unnormalized_angles, angles = self.angle_resnet(s, s_initial) all_frames_to_global = self.torsion_angles_to_frames( backb_to_global, angles, aatype, ) pred_xyz = self.frames_and_literature_positions_to_atom14_pos( all_frames_to_global, aatype, ) scaled_rigids = rigids.scale_translation(self.trans_scale_factor) preds = { "frames": scaled_rigids.to_tensor_7(), "sidechain_frames": all_frames_to_global.to_tensor_4x4(), "unnormalized_angles": unnormalized_angles, "angles": angles, "positions": pred_xyz, "states": s, } outputs.append(preds) rigids = rigids.stop_rot_gradient() del z, z_reference_list if(_offload_inference): evoformer_output_dict["pair"] = ( evoformer_output_dict["pair"].to(s.device) )
outputs = dict_multimap(torch.stack, outputs)
9
2023-12-25 09:29:36+00:00
16k
KyanChen/TTP
mmdet/datasets/transforms/formatting.py
[ { "identifier": "TRANSFORMS", "path": "mmdet/registry.py", "snippet": "TRANSFORMS = Registry(\n 'transform',\n parent=MMENGINE_TRANSFORMS,\n locations=['mmdet.datasets.transforms'])" }, { "identifier": "DetDataSample", "path": "mmdet/structures/det_data_sample.py", "snippet": "class DetDataSample(BaseDataElement):\n \"\"\"A data structure interface of MMDetection. They are used as interfaces\n between different components.\n\n The attributes in ``DetDataSample`` are divided into several parts:\n\n - ``proposals``(InstanceData): Region proposals used in two-stage\n detectors.\n - ``gt_instances``(InstanceData): Ground truth of instance annotations.\n - ``pred_instances``(InstanceData): Instances of detection predictions.\n - ``pred_track_instances``(InstanceData): Instances of tracking\n predictions.\n - ``ignored_instances``(InstanceData): Instances to be ignored during\n training/testing.\n - ``gt_panoptic_seg``(PixelData): Ground truth of panoptic\n segmentation.\n - ``pred_panoptic_seg``(PixelData): Prediction of panoptic\n segmentation.\n - ``gt_sem_seg``(PixelData): Ground truth of semantic segmentation.\n - ``pred_sem_seg``(PixelData): Prediction of semantic segmentation.\n\n Examples:\n >>> import torch\n >>> import numpy as np\n >>> from mmengine.structures import InstanceData\n >>> from mmdet.structures import DetDataSample\n\n >>> data_sample = DetDataSample()\n >>> img_meta = dict(img_shape=(800, 1196),\n ... pad_shape=(800, 1216))\n >>> gt_instances = InstanceData(metainfo=img_meta)\n >>> gt_instances.bboxes = torch.rand((5, 4))\n >>> gt_instances.labels = torch.rand((5,))\n >>> data_sample.gt_instances = gt_instances\n >>> assert 'img_shape' in data_sample.gt_instances.metainfo_keys()\n >>> len(data_sample.gt_instances)\n 5\n >>> print(data_sample)\n <DetDataSample(\n\n META INFORMATION\n\n DATA FIELDS\n gt_instances: <InstanceData(\n\n META INFORMATION\n pad_shape: (800, 1216)\n img_shape: (800, 1196)\n\n DATA FIELDS\n labels: tensor([0.8533, 0.1550, 0.5433, 0.7294, 0.5098])\n bboxes:\n tensor([[9.7725e-01, 5.8417e-01, 1.7269e-01, 6.5694e-01],\n [1.7894e-01, 5.1780e-01, 7.0590e-01, 4.8589e-01],\n [7.0392e-01, 6.6770e-01, 1.7520e-01, 1.4267e-01],\n [2.2411e-01, 5.1962e-01, 9.6953e-01, 6.6994e-01],\n [4.1338e-01, 2.1165e-01, 2.7239e-04, 6.8477e-01]])\n ) at 0x7f21fb1b9190>\n ) at 0x7f21fb1b9880>\n >>> pred_instances = InstanceData(metainfo=img_meta)\n >>> pred_instances.bboxes = torch.rand((5, 4))\n >>> pred_instances.scores = torch.rand((5,))\n >>> data_sample = DetDataSample(pred_instances=pred_instances)\n >>> assert 'pred_instances' in data_sample\n\n >>> pred_track_instances = InstanceData(metainfo=img_meta)\n >>> pred_track_instances.bboxes = torch.rand((5, 4))\n >>> pred_track_instances.scores = torch.rand((5,))\n >>> data_sample = DetDataSample(\n ... pred_track_instances=pred_track_instances)\n >>> assert 'pred_track_instances' in data_sample\n\n >>> data_sample = DetDataSample()\n >>> gt_instances_data = dict(\n ... bboxes=torch.rand(2, 4),\n ... labels=torch.rand(2),\n ... masks=np.random.rand(2, 2, 2))\n >>> gt_instances = InstanceData(**gt_instances_data)\n >>> data_sample.gt_instances = gt_instances\n >>> assert 'gt_instances' in data_sample\n >>> assert 'masks' in data_sample.gt_instances\n\n >>> data_sample = DetDataSample()\n >>> gt_panoptic_seg_data = dict(panoptic_seg=torch.rand(2, 4))\n >>> gt_panoptic_seg = PixelData(**gt_panoptic_seg_data)\n >>> data_sample.gt_panoptic_seg = gt_panoptic_seg\n >>> print(data_sample)\n <DetDataSample(\n\n META INFORMATION\n\n DATA FIELDS\n _gt_panoptic_seg: <BaseDataElement(\n\n META INFORMATION\n\n DATA FIELDS\n panoptic_seg: tensor([[0.7586, 0.1262, 0.2892, 0.9341],\n [0.3200, 0.7448, 0.1052, 0.5371]])\n ) at 0x7f66c2bb7730>\n gt_panoptic_seg: <BaseDataElement(\n\n META INFORMATION\n\n DATA FIELDS\n panoptic_seg: tensor([[0.7586, 0.1262, 0.2892, 0.9341],\n [0.3200, 0.7448, 0.1052, 0.5371]])\n ) at 0x7f66c2bb7730>\n ) at 0x7f66c2bb7280>\n >>> data_sample = DetDataSample()\n >>> gt_segm_seg_data = dict(segm_seg=torch.rand(2, 2, 2))\n >>> gt_segm_seg = PixelData(**gt_segm_seg_data)\n >>> data_sample.gt_segm_seg = gt_segm_seg\n >>> assert 'gt_segm_seg' in data_sample\n >>> assert 'segm_seg' in data_sample.gt_segm_seg\n \"\"\"\n\n @property\n def proposals(self) -> InstanceData:\n return self._proposals\n\n @proposals.setter\n def proposals(self, value: InstanceData):\n self.set_field(value, '_proposals', dtype=InstanceData)\n\n @proposals.deleter\n def proposals(self):\n del self._proposals\n\n @property\n def gt_instances(self) -> InstanceData:\n return self._gt_instances\n\n @gt_instances.setter\n def gt_instances(self, value: InstanceData):\n self.set_field(value, '_gt_instances', dtype=InstanceData)\n\n @gt_instances.deleter\n def gt_instances(self):\n del self._gt_instances\n\n @property\n def pred_instances(self) -> InstanceData:\n return self._pred_instances\n\n @pred_instances.setter\n def pred_instances(self, value: InstanceData):\n self.set_field(value, '_pred_instances', dtype=InstanceData)\n\n @pred_instances.deleter\n def pred_instances(self):\n del self._pred_instances\n\n # directly add ``pred_track_instances`` in ``DetDataSample``\n # so that the ``TrackDataSample`` does not bother to access the\n # instance-level information.\n @property\n def pred_track_instances(self) -> InstanceData:\n return self._pred_track_instances\n\n @pred_track_instances.setter\n def pred_track_instances(self, value: InstanceData):\n self.set_field(value, '_pred_track_instances', dtype=InstanceData)\n\n @pred_track_instances.deleter\n def pred_track_instances(self):\n del self._pred_track_instances\n\n @property\n def ignored_instances(self) -> InstanceData:\n return self._ignored_instances\n\n @ignored_instances.setter\n def ignored_instances(self, value: InstanceData):\n self.set_field(value, '_ignored_instances', dtype=InstanceData)\n\n @ignored_instances.deleter\n def ignored_instances(self):\n del self._ignored_instances\n\n @property\n def gt_panoptic_seg(self) -> PixelData:\n return self._gt_panoptic_seg\n\n @gt_panoptic_seg.setter\n def gt_panoptic_seg(self, value: PixelData):\n self.set_field(value, '_gt_panoptic_seg', dtype=PixelData)\n\n @gt_panoptic_seg.deleter\n def gt_panoptic_seg(self):\n del self._gt_panoptic_seg\n\n @property\n def pred_panoptic_seg(self) -> PixelData:\n return self._pred_panoptic_seg\n\n @pred_panoptic_seg.setter\n def pred_panoptic_seg(self, value: PixelData):\n self.set_field(value, '_pred_panoptic_seg', dtype=PixelData)\n\n @pred_panoptic_seg.deleter\n def pred_panoptic_seg(self):\n del self._pred_panoptic_seg\n\n @property\n def gt_sem_seg(self) -> PixelData:\n return self._gt_sem_seg\n\n @gt_sem_seg.setter\n def gt_sem_seg(self, value: PixelData):\n self.set_field(value, '_gt_sem_seg', dtype=PixelData)\n\n @gt_sem_seg.deleter\n def gt_sem_seg(self):\n del self._gt_sem_seg\n\n @property\n def pred_sem_seg(self) -> PixelData:\n return self._pred_sem_seg\n\n @pred_sem_seg.setter\n def pred_sem_seg(self, value: PixelData):\n self.set_field(value, '_pred_sem_seg', dtype=PixelData)\n\n @pred_sem_seg.deleter\n def pred_sem_seg(self):\n del self._pred_sem_seg" }, { "identifier": "ReIDDataSample", "path": "mmdet/structures/reid_data_sample.py", "snippet": "class ReIDDataSample(BaseDataElement):\n \"\"\"A data structure interface of ReID task.\n\n It's used as interfaces between different components.\n\n Meta field:\n img_shape (Tuple): The shape of the corresponding input image.\n Used for visualization.\n ori_shape (Tuple): The original shape of the corresponding image.\n Used for visualization.\n num_classes (int): The number of all categories.\n Used for label format conversion.\n\n Data field:\n gt_label (LabelData): The ground truth label.\n pred_label (LabelData): The predicted label.\n scores (torch.Tensor): The outputs of model.\n \"\"\"\n\n @property\n def gt_label(self):\n return self._gt_label\n\n @gt_label.setter\n def gt_label(self, value: LabelData):\n self.set_field(value, '_gt_label', dtype=LabelData)\n\n @gt_label.deleter\n def gt_label(self):\n del self._gt_label\n\n def set_gt_label(\n self, value: Union[np.ndarray, torch.Tensor, Sequence[Number], Number]\n ) -> 'ReIDDataSample':\n \"\"\"Set label of ``gt_label``.\"\"\"\n label = format_label(value, self.get('num_classes'))\n if 'gt_label' in self: # setting for the second time\n self.gt_label.label = label.label\n else: # setting for the first time\n self.gt_label = label\n return self\n\n def set_gt_score(self, value: torch.Tensor) -> 'ReIDDataSample':\n \"\"\"Set score of ``gt_label``.\"\"\"\n assert isinstance(value, torch.Tensor), \\\n f'The value should be a torch.Tensor but got {type(value)}.'\n assert value.ndim == 1, \\\n f'The dims of value should be 1, but got {value.ndim}.'\n\n if 'num_classes' in self:\n assert value.size(0) == self.num_classes, \\\n f\"The length of value ({value.size(0)}) doesn't \"\\\n f'match the num_classes ({self.num_classes}).'\n metainfo = {'num_classes': self.num_classes}\n else:\n metainfo = {'num_classes': value.size(0)}\n\n if 'gt_label' in self: # setting for the second time\n self.gt_label.score = value\n else: # setting for the first time\n self.gt_label = LabelData(score=value, metainfo=metainfo)\n return self\n\n @property\n def pred_feature(self):\n return self._pred_feature\n\n @pred_feature.setter\n def pred_feature(self, value: torch.Tensor):\n self.set_field(value, '_pred_feature', dtype=torch.Tensor)\n\n @pred_feature.deleter\n def pred_feature(self):\n del self._pred_feature" }, { "identifier": "TrackDataSample", "path": "mmdet/structures/track_data_sample.py", "snippet": "class TrackDataSample(BaseDataElement):\n \"\"\"A data structure interface of tracking task in MMDetection. It is used\n as interfaces between different components.\n\n This data structure can be viewd as a wrapper of multiple DetDataSample to\n some extent. Specifically, it only contains a property:\n ``video_data_samples`` which is a list of DetDataSample, each of which\n corresponds to a single frame. If you want to get the property of a single\n frame, you must first get the corresponding ``DetDataSample`` by indexing\n and then get the property of the frame, such as ``gt_instances``,\n ``pred_instances`` and so on. As for metainfo, it differs from\n ``DetDataSample`` in that each value corresponds to the metainfo key is a\n list where each element corresponds to information of a single frame.\n\n Examples:\n >>> import torch\n >>> from mmengine.structures import InstanceData\n >>> from mmdet.structures import DetDataSample, TrackDataSample\n >>> track_data_sample = TrackDataSample()\n >>> # set the 1st frame\n >>> frame1_data_sample = DetDataSample(metainfo=dict(\n ... img_shape=(100, 100), frame_id=0))\n >>> frame1_gt_instances = InstanceData()\n >>> frame1_gt_instances.bbox = torch.zeros([2, 4])\n >>> frame1_data_sample.gt_instances = frame1_gt_instances\n >>> # set the 2nd frame\n >>> frame2_data_sample = DetDataSample(metainfo=dict(\n ... img_shape=(100, 100), frame_id=1))\n >>> frame2_gt_instances = InstanceData()\n >>> frame2_gt_instances.bbox = torch.ones([3, 4])\n >>> frame2_data_sample.gt_instances = frame2_gt_instances\n >>> track_data_sample.video_data_samples = [frame1_data_sample,\n ... frame2_data_sample]\n >>> # set metainfo for track_data_sample\n >>> track_data_sample.set_metainfo(dict(key_frames_inds=[0]))\n >>> track_data_sample.set_metainfo(dict(ref_frames_inds=[1]))\n >>> print(track_data_sample)\n <TrackDataSample(\n\n META INFORMATION\n key_frames_inds: [0]\n ref_frames_inds: [1]\n\n DATA FIELDS\n video_data_samples: [<DetDataSample(\n\n META INFORMATION\n img_shape: (100, 100)\n\n DATA FIELDS\n gt_instances: <InstanceData(\n\n META INFORMATION\n\n DATA FIELDS\n bbox: tensor([[0., 0., 0., 0.],\n [0., 0., 0., 0.]])\n ) at 0x7f639320dcd0>\n ) at 0x7f64bd223340>, <DetDataSample(\n\n META INFORMATION\n img_shape: (100, 100)\n\n DATA FIELDS\n gt_instances: <InstanceData(\n\n META INFORMATION\n\n DATA FIELDS\n bbox: tensor([[1., 1., 1., 1.],\n [1., 1., 1., 1.],\n [1., 1., 1., 1.]])\n ) at 0x7f64bd128b20>\n ) at 0x7f64bd1346d0>]\n ) at 0x7f64bd2237f0>\n >>> print(len(track_data_sample))\n 2\n >>> key_data_sample = track_data_sample.get_key_frames()\n >>> print(key_data_sample[0].frame_id)\n 0\n >>> ref_data_sample = track_data_sample.get_ref_frames()\n >>> print(ref_data_sample[0].frame_id)\n 1\n >>> frame1_data_sample = track_data_sample[0]\n >>> print(frame1_data_sample.gt_instances.bbox)\n tensor([[0., 0., 0., 0.],\n [0., 0., 0., 0.]])\n >>> # Tensor-like methods\n >>> cuda_track_data_sample = track_data_sample.to('cuda')\n >>> cuda_track_data_sample = track_data_sample.cuda()\n >>> cpu_track_data_sample = track_data_sample.cpu()\n >>> cpu_track_data_sample = track_data_sample.to('cpu')\n >>> fp16_instances = cuda_track_data_sample.to(\n ... device=None, dtype=torch.float16, non_blocking=False,\n ... copy=False, memory_format=torch.preserve_format)\n \"\"\"\n\n @property\n def video_data_samples(self) -> List[DetDataSample]:\n return self._video_data_samples\n\n @video_data_samples.setter\n def video_data_samples(self, value: List[DetDataSample]):\n if isinstance(value, DetDataSample):\n value = [value]\n assert isinstance(value, list), 'video_data_samples must be a list'\n assert isinstance(\n value[0], DetDataSample\n ), 'video_data_samples must be a list of DetDataSample, but got '\n f'{value[0]}'\n self.set_field(value, '_video_data_samples', dtype=list)\n\n @video_data_samples.deleter\n def video_data_samples(self):\n del self._video_data_samples\n\n def __getitem__(self, index):\n assert hasattr(self,\n '_video_data_samples'), 'video_data_samples not set'\n return self._video_data_samples[index]\n\n def get_key_frames(self):\n assert hasattr(self, 'key_frames_inds'), \\\n 'key_frames_inds not set'\n assert isinstance(self.key_frames_inds, Sequence)\n key_frames_info = []\n for index in self.key_frames_inds:\n key_frames_info.append(self[index])\n return key_frames_info\n\n def get_ref_frames(self):\n assert hasattr(self, 'ref_frames_inds'), \\\n 'ref_frames_inds not set'\n ref_frames_info = []\n assert isinstance(self.ref_frames_inds, Sequence)\n for index in self.ref_frames_inds:\n ref_frames_info.append(self[index])\n return ref_frames_info\n\n def __len__(self):\n return len(self._video_data_samples) if hasattr(\n self, '_video_data_samples') else 0\n\n # TODO: add UT for this Tensor-like method\n # Tensor-like methods\n def to(self, *args, **kwargs) -> 'BaseDataElement':\n \"\"\"Apply same name function to all tensors in data_fields.\"\"\"\n new_data = self.new()\n for k, v_list in self.items():\n data_list = []\n for v in v_list:\n if hasattr(v, 'to'):\n v = v.to(*args, **kwargs)\n data_list.append(v)\n if len(data_list) > 0:\n new_data.set_data({f'{k}': data_list})\n return new_data\n\n # Tensor-like methods\n def cpu(self) -> 'BaseDataElement':\n \"\"\"Convert all tensors to CPU in data.\"\"\"\n new_data = self.new()\n for k, v_list in self.items():\n data_list = []\n for v in v_list:\n if isinstance(v, (torch.Tensor, BaseDataElement)):\n v = v.cpu()\n data_list.append(v)\n if len(data_list) > 0:\n new_data.set_data({f'{k}': data_list})\n return new_data\n\n # Tensor-like methods\n def cuda(self) -> 'BaseDataElement':\n \"\"\"Convert all tensors to GPU in data.\"\"\"\n new_data = self.new()\n for k, v_list in self.items():\n data_list = []\n for v in v_list:\n if isinstance(v, (torch.Tensor, BaseDataElement)):\n v = v.cuda()\n data_list.append(v)\n if len(data_list) > 0:\n new_data.set_data({f'{k}': data_list})\n return new_data\n\n # Tensor-like methods\n def npu(self) -> 'BaseDataElement':\n \"\"\"Convert all tensors to NPU in data.\"\"\"\n new_data = self.new()\n for k, v_list in self.items():\n data_list = []\n for v in v_list:\n if isinstance(v, (torch.Tensor, BaseDataElement)):\n v = v.npu()\n data_list.append(v)\n if len(data_list) > 0:\n new_data.set_data({f'{k}': data_list})\n return new_data\n\n # Tensor-like methods\n def detach(self) -> 'BaseDataElement':\n \"\"\"Detach all tensors in data.\"\"\"\n new_data = self.new()\n for k, v_list in self.items():\n data_list = []\n for v in v_list:\n if isinstance(v, (torch.Tensor, BaseDataElement)):\n v = v.detach()\n data_list.append(v)\n if len(data_list) > 0:\n new_data.set_data({f'{k}': data_list})\n return new_data\n\n # Tensor-like methods\n def numpy(self) -> 'BaseDataElement':\n \"\"\"Convert all tensors to np.ndarray in data.\"\"\"\n new_data = self.new()\n for k, v_list in self.items():\n data_list = []\n for v in v_list:\n if isinstance(v, (torch.Tensor, BaseDataElement)):\n v = v.detach().cpu().numpy()\n data_list.append(v)\n if len(data_list) > 0:\n new_data.set_data({f'{k}': data_list})\n return new_data\n\n def to_tensor(self) -> 'BaseDataElement':\n \"\"\"Convert all np.ndarray to tensor in data.\"\"\"\n new_data = self.new()\n for k, v_list in self.items():\n data_list = []\n for v in v_list:\n if isinstance(v, np.ndarray):\n v = torch.from_numpy(v)\n elif isinstance(v, BaseDataElement):\n v = v.to_tensor()\n data_list.append(v)\n if len(data_list) > 0:\n new_data.set_data({f'{k}': data_list})\n return new_data\n\n # Tensor-like methods\n def clone(self) -> 'BaseDataElement':\n \"\"\"Deep copy the current data element.\n\n Returns:\n BaseDataElement: The copy of current data element.\n \"\"\"\n clone_data = self.__class__()\n clone_data.set_metainfo(dict(self.metainfo_items()))\n\n for k, v_list in self.items():\n clone_item_list = []\n for v in v_list:\n clone_item_list.append(v.clone())\n clone_data.set_data({k: clone_item_list})\n return clone_data" }, { "identifier": "BaseBoxes", "path": "mmdet/structures/bbox/base_boxes.py", "snippet": "class BaseBoxes(metaclass=ABCMeta):\n \"\"\"The base class for 2D box types.\n\n The functions of ``BaseBoxes`` lie in three fields:\n\n - Verify the boxes shape.\n - Support tensor-like operations.\n - Define abstract functions for 2D boxes.\n\n In ``__init__`` , ``BaseBoxes`` verifies the validity of the data shape\n w.r.t ``box_dim``. The tensor with the dimension >= 2 and the length\n of the last dimension being ``box_dim`` will be regarded as valid.\n ``BaseBoxes`` will restore them at the field ``tensor``. It's necessary\n to override ``box_dim`` in subclass to guarantee the data shape is\n correct.\n\n There are many basic tensor-like functions implemented in ``BaseBoxes``.\n In most cases, users can operate ``BaseBoxes`` instance like a normal\n tensor. To protect the validity of data shape, All tensor-like functions\n cannot modify the last dimension of ``self.tensor``.\n\n When creating a new box type, users need to inherit from ``BaseBoxes``\n and override abstract methods and specify the ``box_dim``. Then, register\n the new box type by using the decorator ``register_box_type``.\n\n Args:\n data (Tensor or np.ndarray or Sequence): The box data with shape\n (..., box_dim).\n dtype (torch.dtype, Optional): data type of boxes. Defaults to None.\n device (str or torch.device, Optional): device of boxes.\n Default to None.\n clone (bool): Whether clone ``boxes`` or not. Defaults to True.\n \"\"\"\n\n # Used to verify the last dimension length\n # Should override it in subclass.\n box_dim: int = 0\n\n def __init__(self,\n data: Union[Tensor, np.ndarray, Sequence],\n dtype: Optional[torch.dtype] = None,\n device: Optional[DeviceType] = None,\n clone: bool = True) -> None:\n if isinstance(data, (np.ndarray, Tensor, Sequence)):\n data = torch.as_tensor(data)\n else:\n raise TypeError('boxes should be Tensor, ndarray, or Sequence, ',\n f'but got {type(data)}')\n\n if device is not None or dtype is not None:\n data = data.to(dtype=dtype, device=device)\n # Clone the data to avoid potential bugs\n if clone:\n data = data.clone()\n # handle the empty input like []\n if data.numel() == 0:\n data = data.reshape((-1, self.box_dim))\n\n assert data.dim() >= 2 and data.size(-1) == self.box_dim, \\\n ('The boxes dimension must >= 2 and the length of the last '\n f'dimension must be {self.box_dim}, but got boxes with '\n f'shape {data.shape}.')\n self.tensor = data\n\n def convert_to(self, dst_type: Union[str, type]) -> 'BaseBoxes':\n \"\"\"Convert self to another box type.\n\n Args:\n dst_type (str or type): destination box type.\n\n Returns:\n :obj:`BaseBoxes`: destination box type object .\n \"\"\"\n from .box_type import convert_box_type\n return convert_box_type(self, dst_type=dst_type)\n\n def empty_boxes(self: T,\n dtype: Optional[torch.dtype] = None,\n device: Optional[DeviceType] = None) -> T:\n \"\"\"Create empty box.\n\n Args:\n dtype (torch.dtype, Optional): data type of boxes.\n device (str or torch.device, Optional): device of boxes.\n\n Returns:\n T: empty boxes with shape of (0, box_dim).\n \"\"\"\n empty_box = self.tensor.new_zeros(\n 0, self.box_dim, dtype=dtype, device=device)\n return type(self)(empty_box, clone=False)\n\n def fake_boxes(self: T,\n sizes: Tuple[int],\n fill: float = 0,\n dtype: Optional[torch.dtype] = None,\n device: Optional[DeviceType] = None) -> T:\n \"\"\"Create fake boxes with specific sizes and fill values.\n\n Args:\n sizes (Tuple[int]): The size of fake boxes. The last value must\n be equal with ``self.box_dim``.\n fill (float): filling value. Defaults to 0.\n dtype (torch.dtype, Optional): data type of boxes.\n device (str or torch.device, Optional): device of boxes.\n\n Returns:\n T: Fake boxes with shape of ``sizes``.\n \"\"\"\n fake_boxes = self.tensor.new_full(\n sizes, fill, dtype=dtype, device=device)\n return type(self)(fake_boxes, clone=False)\n\n def __getitem__(self: T, index: IndexType) -> T:\n \"\"\"Rewrite getitem to protect the last dimension shape.\"\"\"\n boxes = self.tensor\n if isinstance(index, np.ndarray):\n index = torch.as_tensor(index, device=self.device)\n if isinstance(index, Tensor) and index.dtype == torch.bool:\n assert index.dim() < boxes.dim()\n elif isinstance(index, tuple):\n assert len(index) < boxes.dim()\n # `Ellipsis`(...) is commonly used in index like [None, ...].\n # When `Ellipsis` is in index, it must be the last item.\n if Ellipsis in index:\n assert index[-1] is Ellipsis\n\n boxes = boxes[index]\n if boxes.dim() == 1:\n boxes = boxes.reshape(1, -1)\n return type(self)(boxes, clone=False)\n\n def __setitem__(self: T, index: IndexType, values: Union[Tensor, T]) -> T:\n \"\"\"Rewrite setitem to protect the last dimension shape.\"\"\"\n assert type(values) is type(self), \\\n 'The value to be set must be the same box type as self'\n values = values.tensor\n\n if isinstance(index, np.ndarray):\n index = torch.as_tensor(index, device=self.device)\n if isinstance(index, Tensor) and index.dtype == torch.bool:\n assert index.dim() < self.tensor.dim()\n elif isinstance(index, tuple):\n assert len(index) < self.tensor.dim()\n # `Ellipsis`(...) is commonly used in index like [None, ...].\n # When `Ellipsis` is in index, it must be the last item.\n if Ellipsis in index:\n assert index[-1] is Ellipsis\n\n self.tensor[index] = values\n\n def __len__(self) -> int:\n \"\"\"Return the length of self.tensor first dimension.\"\"\"\n return self.tensor.size(0)\n\n def __deepcopy__(self, memo):\n \"\"\"Only clone the ``self.tensor`` when applying deepcopy.\"\"\"\n cls = self.__class__\n other = cls.__new__(cls)\n memo[id(self)] = other\n other.tensor = self.tensor.clone()\n return other\n\n def __repr__(self) -> str:\n \"\"\"Return a strings that describes the object.\"\"\"\n return self.__class__.__name__ + '(\\n' + str(self.tensor) + ')'\n\n def new_tensor(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_tensor`` from self.tensor.\"\"\"\n return self.tensor.new_tensor(*args, **kwargs)\n\n def new_full(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_full`` from self.tensor.\"\"\"\n return self.tensor.new_full(*args, **kwargs)\n\n def new_empty(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_empty`` from self.tensor.\"\"\"\n return self.tensor.new_empty(*args, **kwargs)\n\n def new_ones(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_ones`` from self.tensor.\"\"\"\n return self.tensor.new_ones(*args, **kwargs)\n\n def new_zeros(self, *args, **kwargs) -> Tensor:\n \"\"\"Reload ``new_zeros`` from self.tensor.\"\"\"\n return self.tensor.new_zeros(*args, **kwargs)\n\n def size(self, dim: Optional[int] = None) -> Union[int, torch.Size]:\n \"\"\"Reload new_zeros from self.tensor.\"\"\"\n # self.tensor.size(dim) cannot work when dim=None.\n return self.tensor.size() if dim is None else self.tensor.size(dim)\n\n def dim(self) -> int:\n \"\"\"Reload ``dim`` from self.tensor.\"\"\"\n return self.tensor.dim()\n\n @property\n def device(self) -> torch.device:\n \"\"\"Reload ``device`` from self.tensor.\"\"\"\n return self.tensor.device\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"Reload ``dtype`` from self.tensor.\"\"\"\n return self.tensor.dtype\n\n @property\n def shape(self) -> torch.Size:\n return self.tensor.shape\n\n def numel(self) -> int:\n \"\"\"Reload ``numel`` from self.tensor.\"\"\"\n return self.tensor.numel()\n\n def numpy(self) -> np.ndarray:\n \"\"\"Reload ``numpy`` from self.tensor.\"\"\"\n return self.tensor.numpy()\n\n def to(self: T, *args, **kwargs) -> T:\n \"\"\"Reload ``to`` from self.tensor.\"\"\"\n return type(self)(self.tensor.to(*args, **kwargs), clone=False)\n\n def cpu(self: T) -> T:\n \"\"\"Reload ``cpu`` from self.tensor.\"\"\"\n return type(self)(self.tensor.cpu(), clone=False)\n\n def cuda(self: T, *args, **kwargs) -> T:\n \"\"\"Reload ``cuda`` from self.tensor.\"\"\"\n return type(self)(self.tensor.cuda(*args, **kwargs), clone=False)\n\n def clone(self: T) -> T:\n \"\"\"Reload ``clone`` from self.tensor.\"\"\"\n return type(self)(self.tensor)\n\n def detach(self: T) -> T:\n \"\"\"Reload ``detach`` from self.tensor.\"\"\"\n return type(self)(self.tensor.detach(), clone=False)\n\n def view(self: T, *shape: Tuple[int]) -> T:\n \"\"\"Reload ``view`` from self.tensor.\"\"\"\n return type(self)(self.tensor.view(shape), clone=False)\n\n def reshape(self: T, *shape: Tuple[int]) -> T:\n \"\"\"Reload ``reshape`` from self.tensor.\"\"\"\n return type(self)(self.tensor.reshape(shape), clone=False)\n\n def expand(self: T, *sizes: Tuple[int]) -> T:\n \"\"\"Reload ``expand`` from self.tensor.\"\"\"\n return type(self)(self.tensor.expand(sizes), clone=False)\n\n def repeat(self: T, *sizes: Tuple[int]) -> T:\n \"\"\"Reload ``repeat`` from self.tensor.\"\"\"\n return type(self)(self.tensor.repeat(sizes), clone=False)\n\n def transpose(self: T, dim0: int, dim1: int) -> T:\n \"\"\"Reload ``transpose`` from self.tensor.\"\"\"\n ndim = self.tensor.dim()\n assert dim0 != -1 and dim0 != ndim - 1\n assert dim1 != -1 and dim1 != ndim - 1\n return type(self)(self.tensor.transpose(dim0, dim1), clone=False)\n\n def permute(self: T, *dims: Tuple[int]) -> T:\n \"\"\"Reload ``permute`` from self.tensor.\"\"\"\n assert dims[-1] == -1 or dims[-1] == self.tensor.dim() - 1\n return type(self)(self.tensor.permute(dims), clone=False)\n\n def split(self: T,\n split_size_or_sections: Union[int, Sequence[int]],\n dim: int = 0) -> List[T]:\n \"\"\"Reload ``split`` from self.tensor.\"\"\"\n assert dim != -1 and dim != self.tensor.dim() - 1\n boxes_list = self.tensor.split(split_size_or_sections, dim=dim)\n return [type(self)(boxes, clone=False) for boxes in boxes_list]\n\n def chunk(self: T, chunks: int, dim: int = 0) -> List[T]:\n \"\"\"Reload ``chunk`` from self.tensor.\"\"\"\n assert dim != -1 and dim != self.tensor.dim() - 1\n boxes_list = self.tensor.chunk(chunks, dim=dim)\n return [type(self)(boxes, clone=False) for boxes in boxes_list]\n\n def unbind(self: T, dim: int = 0) -> T:\n \"\"\"Reload ``unbind`` from self.tensor.\"\"\"\n assert dim != -1 and dim != self.tensor.dim() - 1\n boxes_list = self.tensor.unbind(dim=dim)\n return [type(self)(boxes, clone=False) for boxes in boxes_list]\n\n def flatten(self: T, start_dim: int = 0, end_dim: int = -2) -> T:\n \"\"\"Reload ``flatten`` from self.tensor.\"\"\"\n assert end_dim != -1 and end_dim != self.tensor.dim() - 1\n return type(self)(self.tensor.flatten(start_dim, end_dim), clone=False)\n\n def squeeze(self: T, dim: Optional[int] = None) -> T:\n \"\"\"Reload ``squeeze`` from self.tensor.\"\"\"\n boxes = self.tensor.squeeze() if dim is None else \\\n self.tensor.squeeze(dim)\n return type(self)(boxes, clone=False)\n\n def unsqueeze(self: T, dim: int) -> T:\n \"\"\"Reload ``unsqueeze`` from self.tensor.\"\"\"\n assert dim != -1 and dim != self.tensor.dim()\n return type(self)(self.tensor.unsqueeze(dim), clone=False)\n\n @classmethod\n def cat(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T:\n \"\"\"Cancatenates a box instance list into one single box instance.\n Similar to ``torch.cat``.\n\n Args:\n box_list (Sequence[T]): A sequence of box instances.\n dim (int): The dimension over which the box are concatenated.\n Defaults to 0.\n\n Returns:\n T: Concatenated box instance.\n \"\"\"\n assert isinstance(box_list, Sequence)\n if len(box_list) == 0:\n raise ValueError('box_list should not be a empty list.')\n\n assert dim != -1 and dim != box_list[0].dim() - 1\n assert all(isinstance(boxes, cls) for boxes in box_list)\n\n th_box_list = [boxes.tensor for boxes in box_list]\n return cls(torch.cat(th_box_list, dim=dim), clone=False)\n\n @classmethod\n def stack(cls: Type[T], box_list: Sequence[T], dim: int = 0) -> T:\n \"\"\"Concatenates a sequence of tensors along a new dimension. Similar to\n ``torch.stack``.\n\n Args:\n box_list (Sequence[T]): A sequence of box instances.\n dim (int): Dimension to insert. Defaults to 0.\n\n Returns:\n T: Concatenated box instance.\n \"\"\"\n assert isinstance(box_list, Sequence)\n if len(box_list) == 0:\n raise ValueError('box_list should not be a empty list.')\n\n assert dim != -1 and dim != box_list[0].dim()\n assert all(isinstance(boxes, cls) for boxes in box_list)\n\n th_box_list = [boxes.tensor for boxes in box_list]\n return cls(torch.stack(th_box_list, dim=dim), clone=False)\n\n @abstractproperty\n def centers(self) -> Tensor:\n \"\"\"Return a tensor representing the centers of boxes.\"\"\"\n pass\n\n @abstractproperty\n def areas(self) -> Tensor:\n \"\"\"Return a tensor representing the areas of boxes.\"\"\"\n pass\n\n @abstractproperty\n def widths(self) -> Tensor:\n \"\"\"Return a tensor representing the widths of boxes.\"\"\"\n pass\n\n @abstractproperty\n def heights(self) -> Tensor:\n \"\"\"Return a tensor representing the heights of boxes.\"\"\"\n pass\n\n @abstractmethod\n def flip_(self,\n img_shape: Tuple[int, int],\n direction: str = 'horizontal') -> None:\n \"\"\"Flip boxes horizontally or vertically in-place.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n direction (str): Flip direction, options are \"horizontal\",\n \"vertical\" and \"diagonal\". Defaults to \"horizontal\"\n \"\"\"\n pass\n\n @abstractmethod\n def translate_(self, distances: Tuple[float, float]) -> None:\n \"\"\"Translate boxes in-place.\n\n Args:\n distances (Tuple[float, float]): translate distances. The first\n is horizontal distance and the second is vertical distance.\n \"\"\"\n pass\n\n @abstractmethod\n def clip_(self, img_shape: Tuple[int, int]) -> None:\n \"\"\"Clip boxes according to the image shape in-place.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n \"\"\"\n pass\n\n @abstractmethod\n def rotate_(self, center: Tuple[float, float], angle: float) -> None:\n \"\"\"Rotate all boxes in-place.\n\n Args:\n center (Tuple[float, float]): Rotation origin.\n angle (float): Rotation angle represented in degrees. Positive\n values mean clockwise rotation.\n \"\"\"\n pass\n\n @abstractmethod\n def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None:\n \"\"\"Geometric transformat boxes in-place.\n\n Args:\n homography_matrix (Tensor or np.ndarray]):\n Shape (3, 3) for geometric transformation.\n \"\"\"\n pass\n\n @abstractmethod\n def rescale_(self, scale_factor: Tuple[float, float]) -> None:\n \"\"\"Rescale boxes w.r.t. rescale_factor in-place.\n\n Note:\n Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n changes the width and the height of boxes, but ``rescale_`` also\n rescales the box centers simultaneously.\n\n Args:\n scale_factor (Tuple[float, float]): factors for scaling boxes.\n The length should be 2.\n \"\"\"\n pass\n\n @abstractmethod\n def resize_(self, scale_factor: Tuple[float, float]) -> None:\n \"\"\"Resize the box width and height w.r.t scale_factor in-place.\n\n Note:\n Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n changes the width and the height of boxes, but ``rescale_`` also\n rescales the box centers simultaneously.\n\n Args:\n scale_factor (Tuple[float, float]): factors for scaling box\n shapes. The length should be 2.\n \"\"\"\n pass\n\n @abstractmethod\n def is_inside(self,\n img_shape: Tuple[int, int],\n all_inside: bool = False,\n allowed_border: int = 0) -> BoolTensor:\n \"\"\"Find boxes inside the image.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n all_inside (bool): Whether the boxes are all inside the image or\n part inside the image. Defaults to False.\n allowed_border (int): Boxes that extend beyond the image shape\n boundary by more than ``allowed_border`` are considered\n \"outside\" Defaults to 0.\n Returns:\n BoolTensor: A BoolTensor indicating whether the box is inside\n the image. Assuming the original boxes have shape (m, n, box_dim),\n the output has shape (m, n).\n \"\"\"\n pass\n\n @abstractmethod\n def find_inside_points(self,\n points: Tensor,\n is_aligned: bool = False) -> BoolTensor:\n \"\"\"Find inside box points. Boxes dimension must be 2.\n\n Args:\n points (Tensor): Points coordinates. Has shape of (m, 2).\n is_aligned (bool): Whether ``points`` has been aligned with boxes\n or not. If True, the length of boxes and ``points`` should be\n the same. Defaults to False.\n\n Returns:\n BoolTensor: A BoolTensor indicating whether a point is inside\n boxes. Assuming the boxes has shape of (n, box_dim), if\n ``is_aligned`` is False. The index has shape of (m, n). If\n ``is_aligned`` is True, m should be equal to n and the index has\n shape of (m, ).\n \"\"\"\n pass\n\n @abstractstaticmethod\n def overlaps(boxes1: 'BaseBoxes',\n boxes2: 'BaseBoxes',\n mode: str = 'iou',\n is_aligned: bool = False,\n eps: float = 1e-6) -> Tensor:\n \"\"\"Calculate overlap between two set of boxes with their types\n converted to the present box type.\n\n Args:\n boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim)\n or empty.\n boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim)\n or empty.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n over foreground). Defaults to \"iou\".\n is_aligned (bool): If True, then m and n must be equal. Defaults\n to False.\n eps (float): A value added to the denominator for numerical\n stability. Defaults to 1e-6.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n \"\"\"\n pass\n\n @abstractstaticmethod\n def from_instance_masks(masks: MaskType) -> 'BaseBoxes':\n \"\"\"Create boxes from instance masks.\n\n Args:\n masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or\n PolygonMasks instance with length of n.\n\n Returns:\n :obj:`BaseBoxes`: Converted boxes with shape of (n, box_dim).\n \"\"\"\n pass" } ]
from typing import Optional, Sequence from mmcv.transforms import to_tensor from mmcv.transforms.base import BaseTransform from mmengine.structures import InstanceData, PixelData from mmdet.registry import TRANSFORMS from mmdet.structures import DetDataSample, ReIDDataSample, TrackDataSample from mmdet.structures.bbox import BaseBoxes import numpy as np
11,720
# Copyright (c) OpenMMLab. All rights reserved. @TRANSFORMS.register_module() class PackDetInputs(BaseTransform): """Pack the inputs data for the detection / semantic segmentation / panoptic segmentation. The ``img_meta`` item is always populated. The contents of the ``img_meta`` dictionary depends on ``meta_keys``. By default this includes: - ``img_id``: id of the image - ``img_path``: path to the image file - ``ori_shape``: original shape of the image as a tuple (h, w) - ``img_shape``: shape of the image input to the network as a tuple \ (h, w). Note that images may be zero padded on the \ bottom/right if the batch tensor is larger than this shape. - ``scale_factor``: a float indicating the preprocessing scale - ``flip``: a boolean indicating if image flip transform was used - ``flip_direction``: the flipping direction Args: meta_keys (Sequence[str], optional): Meta keys to be converted to ``mmcv.DataContainer`` and collected in ``data[img_metas]``. Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'flip', 'flip_direction')`` """ mapping_table = { 'gt_bboxes': 'bboxes', 'gt_bboxes_labels': 'labels', 'gt_masks': 'masks' } def __init__(self, meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'flip', 'flip_direction')): self.meta_keys = meta_keys def transform(self, results: dict) -> dict: """Method to pack the input data. Args: results (dict): Result dict from the data pipeline. Returns: dict: - 'inputs' (obj:`torch.Tensor`): The forward data of models. - 'data_sample' (obj:`DetDataSample`): The annotation info of the sample. """ packed_results = dict() if 'img' in results: img = results['img'] if len(img.shape) < 3: img = np.expand_dims(img, -1) # To improve the computational speed by by 3-5 times, apply: # If image is not contiguous, use # `numpy.transpose()` followed by `numpy.ascontiguousarray()` # If image is already contiguous, use # `torch.permute()` followed by `torch.contiguous()` # Refer to https://github.com/open-mmlab/mmdetection/pull/9533 # for more details if not img.flags.c_contiguous: img = np.ascontiguousarray(img.transpose(2, 0, 1)) img = to_tensor(img) else: img = to_tensor(img).permute(2, 0, 1).contiguous() packed_results['inputs'] = img if 'gt_ignore_flags' in results: valid_idx = np.where(results['gt_ignore_flags'] == 0)[0] ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0] data_sample = DetDataSample() instance_data = InstanceData() ignore_instance_data = InstanceData() for key in self.mapping_table.keys(): if key not in results: continue
# Copyright (c) OpenMMLab. All rights reserved. @TRANSFORMS.register_module() class PackDetInputs(BaseTransform): """Pack the inputs data for the detection / semantic segmentation / panoptic segmentation. The ``img_meta`` item is always populated. The contents of the ``img_meta`` dictionary depends on ``meta_keys``. By default this includes: - ``img_id``: id of the image - ``img_path``: path to the image file - ``ori_shape``: original shape of the image as a tuple (h, w) - ``img_shape``: shape of the image input to the network as a tuple \ (h, w). Note that images may be zero padded on the \ bottom/right if the batch tensor is larger than this shape. - ``scale_factor``: a float indicating the preprocessing scale - ``flip``: a boolean indicating if image flip transform was used - ``flip_direction``: the flipping direction Args: meta_keys (Sequence[str], optional): Meta keys to be converted to ``mmcv.DataContainer`` and collected in ``data[img_metas]``. Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'flip', 'flip_direction')`` """ mapping_table = { 'gt_bboxes': 'bboxes', 'gt_bboxes_labels': 'labels', 'gt_masks': 'masks' } def __init__(self, meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'flip', 'flip_direction')): self.meta_keys = meta_keys def transform(self, results: dict) -> dict: """Method to pack the input data. Args: results (dict): Result dict from the data pipeline. Returns: dict: - 'inputs' (obj:`torch.Tensor`): The forward data of models. - 'data_sample' (obj:`DetDataSample`): The annotation info of the sample. """ packed_results = dict() if 'img' in results: img = results['img'] if len(img.shape) < 3: img = np.expand_dims(img, -1) # To improve the computational speed by by 3-5 times, apply: # If image is not contiguous, use # `numpy.transpose()` followed by `numpy.ascontiguousarray()` # If image is already contiguous, use # `torch.permute()` followed by `torch.contiguous()` # Refer to https://github.com/open-mmlab/mmdetection/pull/9533 # for more details if not img.flags.c_contiguous: img = np.ascontiguousarray(img.transpose(2, 0, 1)) img = to_tensor(img) else: img = to_tensor(img).permute(2, 0, 1).contiguous() packed_results['inputs'] = img if 'gt_ignore_flags' in results: valid_idx = np.where(results['gt_ignore_flags'] == 0)[0] ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0] data_sample = DetDataSample() instance_data = InstanceData() ignore_instance_data = InstanceData() for key in self.mapping_table.keys(): if key not in results: continue
if key == 'gt_masks' or isinstance(results[key], BaseBoxes):
4
2023-12-23 08:36:47+00:00
16k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n v_model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 384)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)\n\n def get_audio(self, filename):\n audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)\n '''\n # from https://github.com/YYuX-1145/Bert-VITS2-Integration-package\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n '''\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n if config.train_ms_config.spec_cache:\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.randn(1024, len(phone))\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.randn(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.randn(1024, len(phone))\n ja_bert = torch.randn(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "AudioVisemesLoader", "path": "data_utils.py", "snippet": "class AudioVisemesLoader(torch.utils.data.Dataset):\n \"\"\"\n loads audio, visemes torch variable pairs from visemes list file .\n file is like: \n ./records/date_time.z.npy|./records/date_time.npy\n \"\"\"\n \n def __init__(self, audio_visemes_list_file, hparams):\n self.audio_visemes_list_items = load_filepaths_and_text(audio_visemes_list_file)\n print('audio_visemes_list_items: ', len(self.audio_visemes_list_items))\n random.seed(1234)\n random.shuffle(self.audio_visemes_list_items)\n self.max_visemes_len = 1210\n self.min_visemes_len = 1190\n self._filter()\n\n\n def _filter(self):\n # check if the file exists, and can parse as torch tensor\n audio_visemes_list_items_new = []\n for audio_file, visemes_file in self.audio_visemes_list_items:\n if os.path.exists(audio_file) and os.path.exists(visemes_file):\n # check using torch.load\n try:\n audio = torch.load(audio_file)\n visemes = np.load(visemes_file)\n if visemes.shape[0] < self.min_visemes_len:\n print('drop this data: --------- visemes.shape[0] < self.min_visemes_len: ', visemes.shape[0], visemes_file)\n continue\n audio_visemes_list_items_new.append([audio_file, visemes_file])\n except Exception as e:\n print('error: ', audio_file, visemes_file)\n print(e)\n self.audio_visemes_list_items = audio_visemes_list_items_new\n print('audio_visemes_list_items after filter: ', len(self.audio_visemes_list_items))\n\n def __getitem__(self, index):\n # read these two torch.tensor\n audio_file, visemes_file = self.audio_visemes_list_items[index]\n audio_z = torch.load(audio_file).squeeze(0).detach()\n # [192, seq_len(1722)]\n\n visemes = np.load(visemes_file)\n visemes = torch.from_numpy(visemes)\n #[seq_len(1194), 61]\n visemes = visemes.transpose(0, 1)\n #[61, seq_len(1194)]\n if visemes.shape[1] > self.max_visemes_len:\n # cut the extra part\n # print('__getitem__ 1 cut visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n visemes = visemes[:, :self.max_visemes_len]\n elif visemes.shape[1] < self.max_visemes_len:\n # padding to max_visemes_len with last frame\n # print('__getitem__ 2 padding visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n # last_frame = visemes[-1]\n # visemes = np.concatenate([visemes, np.tile(last_frame, (self.max_visemes_len - visemes.shape[0], 1))], axis=0)\n # visemes = torch.from_numpy(visemes)\n pass\n\n visemes_offset = 0.08 # 将visemes延迟n s\n visemes_offset_frames = int(visemes_offset * const_map.ARKIT_FPS)\n visemes = visemes[:, visemes_offset_frames:]\n\n audio_z_offset = 0.0\n audio_z_offset_frames = int(audio_z_offset * const_map.Z_FPS)\n audio_z = audio_z[:, audio_z_offset_frames:]\n\n # 获取二者的时长,将过长的一方多的部分丢弃\n visemes_duration = visemes.shape[1] / const_map.ARKIT_FPS\n audio_z_duration = audio_z.shape[1] / const_map.Z_FPS\n if visemes_duration > audio_z_duration:\n visemes = visemes[:, :int(audio_z_duration * const_map.ARKIT_FPS)]\n elif visemes_duration < audio_z_duration:\n audio_z = audio_z[:, :int(visemes_duration * const_map.Z_FPS)]\n\n\n # print('__getitem__ 3 audio.shape: ', audio.shape, 'visemes.shape: ', visemes.shape,'file: ', visemes_file)\n return audio_z, visemes\n\n def __len__(self):\n return len(self.audio_visemes_list_items)" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n logw_sdp = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n l_length_sdp += torch.sum((logw_sdp - logw_) ** 2, [1, 2]) / torch.sum(x_mask)\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_, logw_sdp),\n g,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n def get_post_enc_dec(self):\n return self.enc_q, self.dec" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.LSTM = nn.LSTM(\n 2 * filter_channels, filter_channels, batch_first=True, bidirectional=True\n )\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(2 * filter_channels, 1), nn.Sigmoid()\n )\n\n def forward_probability(self, x, dur):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = x.transpose(1, 2)\n x, _ = self.LSTM(x)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, dur)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "WavLMDiscriminator", "path": "models.py", "snippet": "class WavLMDiscriminator(nn.Module):\n \"\"\"docstring for Discriminator.\"\"\"\n\n def __init__(\n self, slm_hidden=768, slm_layers=13, initial_channel=64, use_spectral_norm=False\n ):\n super(WavLMDiscriminator, self).__init__()\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.pre = norm_f(\n Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0)\n )\n\n self.convs = nn.ModuleList(\n [\n norm_f(\n nn.Conv1d(\n initial_channel, initial_channel * 2, kernel_size=5, padding=2\n )\n ),\n norm_f(\n nn.Conv1d(\n initial_channel * 2,\n initial_channel * 4,\n kernel_size=5,\n padding=2,\n )\n ),\n norm_f(\n nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)\n ),\n ]\n )\n\n self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1))\n\n def forward(self, x):\n x = self.pre(x)\n\n fmap = []\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, modules.LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n x = torch.flatten(x, 1, -1)\n\n return x" }, { "identifier": "VisemesNet", "path": "models.py", "snippet": "class VisemesNet(nn.Module):\n def active(self, x):\n # active_fun: 0: null, 1: tanh, 2: relu, 3: LeakyReLU\n if self.active_fun == 1:\n return torch.tanh(x)\n elif self.active_fun == 2:\n return torch.relu(x)\n elif self.active_fun == 3:\n return self.leakyReLU(x)\n else:\n return x\n\n def __init__(self, hidden_channels, lstm_bidirectional=True, active_fun = 3, enable_conv=True, \n use_transformer = False, enable_dropout=True):\n super(VisemesNet, self).__init__()\n self.lstm_bidirectional = lstm_bidirectional\n self.lstm_directions = 2 if lstm_bidirectional else 1\n self.use_transformer = use_transformer\n self.enable_dropout = enable_dropout\n if active_fun == 3:\n self.leakyReLU = nn.LeakyReLU(negative_slope=0.01)\n if use_transformer:\n num_heads=8\n num_layers=3\n dim_feedforward=512\n dropout=0.1\n activation=\"relu\"\n self.transformer_encoder_layer = nn.TransformerEncoderLayer(\n d_model=hidden_channels, \n nhead=num_heads,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n batch_first=True\n )\n self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer, num_layers=num_layers)\n else:\n self.lstm = nn.LSTM(input_size=hidden_channels, hidden_size=128, num_layers=3, batch_first=True, bidirectional=lstm_bidirectional)\n if use_transformer:\n self.fc1 = nn.Linear(hidden_channels, 96)\n else:\n self.fc1 = nn.Linear(128 * self.lstm_directions, 96)\n self.fc2 = nn.Linear(96, 61)\n dropout_rate = 0.5\n if self.enable_dropout:\n self.dropout = nn.Dropout(dropout_rate)\n conv_kernel_pre = 15\n conv_kernel_post = 11\n self.conv1d_pre = nn.Conv1d(in_channels=hidden_channels, out_channels=hidden_channels, kernel_size=conv_kernel_pre, stride=1, padding=conv_kernel_pre//2)\n self.conv1d_post = nn.Conv1d(in_channels=61, out_channels=61, kernel_size=conv_kernel_post, stride=1, padding=conv_kernel_post//2)\n self.enable_conv = enable_conv\n self.active_fun = active_fun\n\n def forward(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.use_transformer:\n return self.forward_transformer(x, y)\n else:\n return self.forward_lstm(x, y)\n\n def forward_transformer(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n # batch_first: True (batch, seq, feature); False (seq, batch, feature).\n x = x.transpose(1, 2)\n\n expressions = self.transformer_encoder(x)\n \n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n # expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n\n return expressions \n\n def forward_lstm(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n x = x.transpose(1, 2)\n # x [batch_size, seq_len, hidden_channels]\n expressions = None\n expressions, _ = self.lstm(x)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n return expressions\n \n def init_weights(self):\n # 初始化权重\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.LSTM):\n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n nn.init.xavier_uniform_(param.data)\n elif 'weight_hh' in name:\n nn.init.orthogonal_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight.data, 1)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Conv1d):\n nn.init.xavier_uniform_(m.weight.data)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.TransformerEncoderLayer):\n for name, param in m.named_parameters():\n if 'weight' in name:\n if param.dim() == 1:\n nn.init.normal_(param.data)\n else:\n nn.init.xavier_uniform_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.TransformerEncoder):\n for param in m.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param.data)\n else:\n nn.init.constant_(param.data, 0)" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "WavLMLoss", "path": "losses.py", "snippet": "class WavLMLoss(torch.nn.Module):\n def __init__(self, model, wd, model_sr, slm_sr=16000):\n super(WavLMLoss, self).__init__()\n self.wavlm = AutoModel.from_pretrained(model)\n self.wd = wd\n self.resample = torchaudio.transforms.Resample(model_sr, slm_sr)\n self.wavlm.eval()\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n def forward(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16.squeeze(), output_hidden_states=True\n ).hidden_states\n\n floss = 0\n for er, eg in zip(wav_embeddings, y_rec_embeddings):\n floss += torch.mean(torch.abs(er - eg))\n\n return floss.mean()\n\n def generator(self, y_rec):\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_df_hat_g = self.wd(y_rec_embeddings)\n loss_gen = torch.mean((1 - y_df_hat_g) ** 2)\n\n return loss_gen\n\n def discriminator(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n y_d_gs = self.wd(y_rec_embeddings)\n\n y_df_hat_r, y_df_hat_g = y_d_rs, y_d_gs\n\n r_loss = torch.mean((1 - y_df_hat_r) ** 2)\n g_loss = torch.mean((y_df_hat_g) ** 2)\n\n loss_disc_f = r_loss + g_loss\n\n return loss_disc_f.mean()\n\n def discriminator_forward(self, wav):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n\n return y_d_rs" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
14,177
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False) net_v = VisemesNet(hps.model.hidden_channels).cuda() latest_model_path = utils.latest_checkpoint_path(hps.model_dir, "V_*.pth") if latest_model_path is not None: _, optim_d, _, epoch_str = utils.load_checkpoint(latest_model_path, net_v, None, skip_optimizer=False) else : epoch_str = 1 global_visemes_step = 0 net_v.init_weights() optim_v = torch.optim.AdamW( net_v.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_v.param_groups[0]['initial_lr'] = hps.train.learning_rate scheduler_v = torch.optim.lr_scheduler.ExponentialLR(optim_v, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2, ) scaler = GradScaler(enabled=hps.train.bf16_run) for epoch in range(epoch_str, hps.train.epochs + 1): train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler) scheduler_v.step() if epoch % hps.train.eval_interval == 0: eval_visemes_only(epoch, hps, net_v, eval_loader) utils.save_checkpoint(net_v, optim_v,hps.train.learning_rate , epoch, os.path.join(hps.model_dir, "V_{}.pth".format(epoch))) def train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler): for batch_idx, (spec, visemes) in tqdm(enumerate(train_loader)): spec, visemes = spec.cuda(), visemes.cuda() with autocast(enabled=hps.train.bf16_run): # 通过VisemesNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) optim_v.zero_grad() scaler.scale(visemes_hat_mse).backward() scaler.unscale_(optim_v) grad_norm_v = commons.clip_grad_value_(net_v.parameters(), None) scaler.step(optim_v) global global_visemes_step global_visemes_step += 1 if batch_idx % hps.train.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tvisemes_hat_mse: {:.6f}\tgrad_norm_v: {:.6f}'.format( epoch, batch_idx * len(spec), len(train_loader.dataset), 100. * batch_idx / len(train_loader), visemes_hat_mse.item(), grad_norm_v)) def get_visemes_mse(visemes, visemes_hat): if visemes.shape[-1] != visemes_hat.shape[-1]: # 如果y和x的最低维度不一样 visemes_hat = F.interpolate(visemes_hat, size=visemes.shape[-1], mode='linear', align_corners=True) # 对x进行线性插值,使其形状与y一致 visemes_hat_mse = torch.mean(torch.pow(visemes_hat - visemes, 2)) return visemes_hat_mse def eval_visemes_only(epoch, hps, net_v, eval_loader): net_v.eval() with torch.no_grad(): visemes_hat_mse_sum = 0.0 for batch_idx, (spec, visemes) in tqdm(enumerate(eval_loader)): spec, visemes = spec.cuda(), visemes.cuda() # 通过VisemesFCNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) visemes_hat_mse_sum += visemes_hat_mse # print('visemes_hat_mse', visemes_hat_mse) break visemes_hat_mse_avg = visemes_hat_mse_sum / (batch_idx + 1) log_str = '------------------ eval epoch: {} visemes_hat_mse_avg: {:.6f}'.format(epoch, visemes_hat_mse_avg) print(log_str) logger.warning(log_str) net_v.train() def run(): # 环境变量解析
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False) net_v = VisemesNet(hps.model.hidden_channels).cuda() latest_model_path = utils.latest_checkpoint_path(hps.model_dir, "V_*.pth") if latest_model_path is not None: _, optim_d, _, epoch_str = utils.load_checkpoint(latest_model_path, net_v, None, skip_optimizer=False) else : epoch_str = 1 global_visemes_step = 0 net_v.init_weights() optim_v = torch.optim.AdamW( net_v.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_v.param_groups[0]['initial_lr'] = hps.train.learning_rate scheduler_v = torch.optim.lr_scheduler.ExponentialLR(optim_v, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2, ) scaler = GradScaler(enabled=hps.train.bf16_run) for epoch in range(epoch_str, hps.train.epochs + 1): train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler) scheduler_v.step() if epoch % hps.train.eval_interval == 0: eval_visemes_only(epoch, hps, net_v, eval_loader) utils.save_checkpoint(net_v, optim_v,hps.train.learning_rate , epoch, os.path.join(hps.model_dir, "V_{}.pth".format(epoch))) def train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler): for batch_idx, (spec, visemes) in tqdm(enumerate(train_loader)): spec, visemes = spec.cuda(), visemes.cuda() with autocast(enabled=hps.train.bf16_run): # 通过VisemesNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) optim_v.zero_grad() scaler.scale(visemes_hat_mse).backward() scaler.unscale_(optim_v) grad_norm_v = commons.clip_grad_value_(net_v.parameters(), None) scaler.step(optim_v) global global_visemes_step global_visemes_step += 1 if batch_idx % hps.train.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tvisemes_hat_mse: {:.6f}\tgrad_norm_v: {:.6f}'.format( epoch, batch_idx * len(spec), len(train_loader.dataset), 100. * batch_idx / len(train_loader), visemes_hat_mse.item(), grad_norm_v)) def get_visemes_mse(visemes, visemes_hat): if visemes.shape[-1] != visemes_hat.shape[-1]: # 如果y和x的最低维度不一样 visemes_hat = F.interpolate(visemes_hat, size=visemes.shape[-1], mode='linear', align_corners=True) # 对x进行线性插值,使其形状与y一致 visemes_hat_mse = torch.mean(torch.pow(visemes_hat - visemes, 2)) return visemes_hat_mse def eval_visemes_only(epoch, hps, net_v, eval_loader): net_v.eval() with torch.no_grad(): visemes_hat_mse_sum = 0.0 for batch_idx, (spec, visemes) in tqdm(enumerate(eval_loader)): spec, visemes = spec.cuda(), visemes.cuda() # 通过VisemesFCNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) visemes_hat_mse_sum += visemes_hat_mse # print('visemes_hat_mse', visemes_hat_mse) break visemes_hat_mse_avg = visemes_hat_mse_sum / (batch_idx + 1) log_str = '------------------ eval epoch: {} visemes_hat_mse_avg: {:.6f}'.format(epoch, visemes_hat_mse_avg) print(log_str) logger.warning(log_str) net_v.train() def run(): # 环境变量解析
envs = config.train_ms_config.env
0
2023-12-27 03:09:11+00:00
16k
chinhsuanwu/ifusion-threestudio
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
10,896
__conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else:
betas = make_beta_schedule(
6
2023-12-27 20:30:33+00:00
16k
gardenifi/server
app/ble/wifi.py
[ { "identifier": "Helpers", "path": "app/raspi/helpers.py", "snippet": "class Helpers:\n \"\"\"\n The `Helpers` class provides various helper methods for performing tasks\n such as setting valves, getting system information, storing and loading\n objects to/from files, managing WiFi networks, and updating the `wpa_supplicant.conf` file.\n \"\"\"\n\n __instance = None\n __lock = threading.Lock()\n\n def __new__(cls):\n \"\"\"\n Create a new instance of the Helpers class using the singleton design pattern.\n\n Returns:\n An instance of the Helpers class.\n\n Example Usage:\n instance = Helpers()\n \"\"\"\n if cls.__instance is None:\n with cls.__lock:\n cls.__instance = super().__new__(cls) # pylint: disable=duplicate-code\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n return cls.__instance\n\n @classmethod\n def destroy_instance(cls):\n \"\"\"\n Destroy the instance of the Helpers class.\n\n This method sets the instance of the Helpers class to None, effectively destroying the instance.\n\n Example Usage:\n ```python\n instance = Helpers() # Create an instance of the Helpers class\n Helpers.destroy_instance() # Destroy the instance\n print(instance) # Output: None\n ```\n\n Inputs:\n None\n\n Outputs:\n None\n \"\"\"\n cls.__instance = None\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n\n @property\n def toggle_statuses(self):\n \"\"\"\n Getter method for the toggle_statuses property.\n\n Returns:\n dict: A dictionary containing toggle statuses.\n\n Example:\n Access toggle statuses using `instance.toggle_statuses`.\n \"\"\"\n return self._toggle_statuses\n\n @toggle_statuses.setter\n def toggle_statuses(self, value):\n \"\"\"\n Setter method for the toggle_statuses property.\n\n Args:\n value (dict): A dictionary containing toggle statuses to set.\n\n Example:\n Set toggle statuses using `instance.toggle_statuses = new_statuses`.\n \"\"\"\n self._toggle_statuses = value\n\n @property\n def ap_array(self):\n \"\"\"\n Getter method for the _ap_array property.\n\n Returns:\n An array of wifi networks\n\n Example:\n Access toggle statuses using `instance.ap_array`.\n \"\"\"\n return self._ap_array\n\n @ap_array.setter\n def ap_array(self, value):\n \"\"\"\n Setter method for the _ap_array property.\n\n Args:\n value (dict): An array containing the wifi networks to set.\n\n Example:\n Set toggle statuses using `instance.ap_array = new_ap_array`.\n \"\"\"\n self._ap_array = value\n\n def set_valves(self, valves):\n \"\"\"\n Set valve statuses in the toggle_statuses dictionary.\n\n Args:\n valves (str or dict): A string or dictionary representing valve statuses.\n\n Example:\n instance.set_valves('{\"valve1\": true, \"valve2\": false}')\n \"\"\"\n try:\n if isinstance(valves, str):\n valves = ast.literal_eval(valves)\n else:\n valves = ast.literal_eval(str(valves))\n self._toggle_statuses[\"valves\"] = valves\n except Exception as exception:\n logger.error(f\"Error in set_valves: {exception}\")\n raise\n\n def extract_local_ip(self):\n \"\"\"\n Extract the local IP address of the device.\n\n Returns:\n str: The local IP address.\n\n Example:\n local_ip = instance.extract_local_ip()\n \"\"\"\n tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n tcp_sock.connect((\"8.8.8.8\", 1))\n ip_address = tcp_sock.getsockname()[0]\n except Exception:\n ip_address = \"127.0.0.1\"\n finally:\n tcp_sock.close()\n return ip_address\n\n def get_uptime(self):\n \"\"\"\n Get the system uptime.\n\n Returns:\n str: The system uptime.\n\n Example:\n uptime = instance.get_uptime()\n \"\"\"\n try:\n result = subprocess.run([\"uptime\", \"-p\"], stdout=subprocess.PIPE, text=True, check=True)\n return result.stdout.replace(\"\\n\", \"\")\n except Exception as e:\n logger.error(f\"Error retrieving uptime: {e}\")\n return str(e)\n\n def get_git_commit_id(self):\n \"\"\"\n Get the Git commit ID of the current project.\n\n Returns:\n str: The Git commit ID.\n\n Example:\n commit_id = instance.get_git_commit_id()\n \"\"\"\n # Specify the file path\n file_path = \"app/git_commit_id.txt\"\n\n # Open the file in read mode ('r')\n try:\n with open(file_path, encoding=\"utf-8\") as file:\n # Read the entire content of the file\n content = file.read().replace(\"\\n\", \"\")\n logger.debug(f\"File content: {content}\")\n return content\n except FileNotFoundError as e:\n logger.error(f\"The file '{file_path}' does not exist.\")\n return str(e)\n except Exception as e:\n traceback.print_exc()\n logger.error(f\"Error retrieving git log: {e}\")\n return str(e)\n\n def store_object_to_file(self, filename, local_object):\n \"\"\"\n Store a local object to a file using pickle.\n\n Args:\n filename (str): The name of the file to store the object.\n local_object (object): The object to be stored.\n\n Example:\n instance.store_object_to_file('data.pkl', data)\n \"\"\"\n try:\n with open(filename, \"wb\") as obj_file:\n pickle.dump(local_object, obj_file)\n logger.info(f\"Stored local object file: {filename}: {local_object}\")\n obj_file.close()\n return local_object\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def store_toggle_statuses_to_file(self):\n \"\"\"\n Store toggle statuses to a file.\n\n Returns:\n dict: The toggle statuses being stored.\n\n Example:\n stored_statuses = instance.store_toggle_statuses_to_file()\n \"\"\"\n return self.store_object_to_file(STATUSES_FILE, self._toggle_statuses)\n\n def store_wifi_networks_to_file(self):\n \"\"\"\n Store WiFi networks to a file.\n\n Returns:\n list: The WiFi networks being stored.\n\n Example:\n stored_networks = instance.store_wifi_networks_to_file()\n \"\"\"\n return self.store_object_to_file(NETWORKS_FILE, self._ap_array)\n\n def load_object_from_file(self, filename):\n \"\"\"\n Load a local object from a file using pickle.\n\n Args:\n filename (str): The name of the file to load the object from.\n\n Returns:\n object: The loaded object.\n\n Example:\n loaded_object = instance.load_object_from_file('data.pkl')\n \"\"\"\n try:\n local_obj = {}\n with open(filename, \"rb\") as obj_file:\n local_obj = pickle.load(obj_file)\n logger.info(f\"Loaded local object file: {filename}: {local_obj}\")\n obj_file.close()\n return local_obj\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n self.store_object_to_file(filename, local_obj)\n return local_obj\n\n def load_toggle_statuses_from_file(self):\n \"\"\"\n Load toggle statuses from a file and update the instance's _toggle_statuses attribute.\n \"\"\"\n self._toggle_statuses = self.load_object_from_file(STATUSES_FILE)\n\n def load_wifi_networks_from_file(self):\n \"\"\"\n Load WiFi networks from a file and update the instance's _ap_array attribute.\n \"\"\"\n self._ap_array = self.load_object_from_file(NETWORKS_FILE)\n\n def get_timezone(self):\n \"\"\"\n Get the system timezone.\n\n Returns:\n str: The system timezone.\n\n Example:\n timezone = instance.get_timezone()\n \"\"\"\n return str(time.tzname[time.daylight])\n\n def check_empty_toggle(self, valve):\n \"\"\"\n Check if a toggle status is empty for a specific valve and set a default value if it is.\n\n Args:\n valve (str): The name of the valve.\n\n Example:\n instance.check_empty_toggle(\"out1\")\n \"\"\"\n if self._toggle_statuses.get(valve) is None:\n self._toggle_statuses[valve] = 0\n self._toggle_statuses[valve] = self.set_gpio_outputs(self._toggle_statuses[valve], valve)\n\n def get_toggle_statuses(self):\n \"\"\"\n Get and update toggle statuses, system information, and store them to a file.\n\n Returns:\n dict: The updated toggle statuses.\n\n Example:\n updated_statuses = instance.get_toggle_statuses()\n \"\"\"\n if \"valves\" not in self._toggle_statuses:\n self.set_valves([])\n\n self.check_empty_toggle(\"out1\")\n self.check_empty_toggle(\"out2\")\n self.check_empty_toggle(\"out3\")\n self.check_empty_toggle(\"out4\")\n\n self._toggle_statuses[\"server_time\"] = str(datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\n self._toggle_statuses[\"tz\"] = self.get_timezone()\n self._toggle_statuses[\"hw_id\"] = RPI_HW_ID\n\n logger.info(f\"Valves statuses:{self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n\n return self._toggle_statuses\n\n def set_gpio_outputs(self, status, valve):\n \"\"\"\n Set GPIO outputs for a specified valve.\n\n Args:\n status (int): The status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n int: The modified status.\n\n Example:\n modified_status = instance.set_gpio_outputs(1, \"out1\")\n \"\"\"\n status = bool(status in (1, 2))\n logger.info(f\"Set Output of Valve: {valve}::{status}\")\n if ARCH == \"arm\":\n if valve == \"out2\":\n logger.info(f\"===========> Setting PIN 11 GPIO.output...{status}\")\n # RuntimeError: Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)\n GPIO.output(11, status)\n logger.info(f\"===========> PIN 11 Status GPIO.input: {GPIO.input(11)}\")\n return 1 if status is True else 0\n\n def toggle(self, status, valve):\n \"\"\"\n Toggle a valve, set GPIO outputs, update toggle statuses, and store them to a file.\n\n Args:\n status (int): The new status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n str: A confirmation message.\n\n Example:\n confirmation = instance.toggle(1, \"out1\")\n \"\"\"\n status = self.set_gpio_outputs(status, valve)\n self._toggle_statuses[valve] = status\n logger.info(f\"Modified valves statuses: {self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n return \"OK\"\n\n @property\n def is_connected_to_inet(self):\n \"\"\"\n Get the current internet connection status.\n\n Returns:\n bool: True if connected, False otherwise.\n\n Example:\n connection_status = instance.is_connected_to_inet()\n \"\"\"\n return self._is_connected_to_inet\n\n @is_connected_to_inet.setter\n def is_connected_to_inet(self, value):\n \"\"\"\n Set the current internet connection status.\n\n Returns:\n None\n\n Example:\n instance.is_connected_to_inet = connection_status\n \"\"\"\n self._is_connected_to_inet = value\n\n def system_reboot(self):\n \"\"\"\n Reboot the system after a 2-second delay.\n \"\"\"\n logger.info(\"Rebooting in 2 seconds...\")\n time.sleep(2)\n try:\n subprocess.run([\"reboot\"], stdout=subprocess.PIPE, text=True, check=True)\n except Exception as e:\n logger.error(f\"Error rebooting: {e}\")\n\n def system_update(self):\n \"\"\"\n Update the system through git.\n \"\"\"\n logger.info(\"Git update code and restart...\")\n try:\n subprocess.run([\"/usr/bin/git\", \"pull\"], stdout=subprocess.PIPE, text=True, check=True)\n os.kill(os.getpid(), signal.SIGTERM)\n except Exception as e:\n logger.error(f\"Error updating git: {e}\")\n\n def checking_for_duplicate_ssids(self, ssid, ap_array):\n \"\"\"\n Check for duplicate SSIDs in the list of WiFi networks.\n\n Args:\n ssid (str): The SSID to check.\n ap_array (list): The list of WiFi networks.\n\n Returns:\n bool: True if a duplicate is found, False otherwise.\n\n Example:\n is_duplicate = instance.checking_for_duplicate_ssids(\"MyWiFi\", wifi_networks)\n \"\"\"\n for wifi in ap_array:\n if wifi[\"ssid\"] == ssid:\n return True\n return False\n\n def scan_rpi_wifi_networks(self, refresh=False):\n \"\"\"\n Scan for available WiFi networks and update the instance's _ap_array attribute.\n\n Args:\n refresh (bool): If True, force a refresh of the WiFi networks list.\n\n Returns:\n list: The updated list of WiFi networks.\n\n Example:\n wifi_networks = instance.scan_rpi_wifi_networks()\n \"\"\"\n self._ap_array = []\n index = 0\n if not os.path.exists(NETWORKS_FILE):\n refresh = True\n if refresh:\n if ARCH == \"arm\":\n with subprocess.Popen([\"iwlist\", \"scan\"], stdout=subprocess.PIPE) as iwlist_raw:\n ap_list, err = iwlist_raw.communicate()\n if err is not None:\n logger.error(f\"Popen error: {err}\")\n return self._ap_array\n logger.debug(f\"iwlist scan command output: {ap_list}\")\n for line in ap_list.decode(\"utf-8\").rsplit(\"\\n\"):\n logger.debug(f\"Line: {line}\")\n if \"ESSID\" in line:\n ap_ssid = line[27:-1]\n if ap_ssid != \"\" and not self.checking_for_duplicate_ssids(ap_ssid, self._ap_array):\n index += 1\n logger.info(f\"id = {index}, ssid = {ap_ssid}\")\n wifi_network = {\"id\": index, \"ssid\": str(ap_ssid)}\n self._ap_array.append(json.loads(json.dumps(wifi_network)))\n self.store_wifi_networks_to_file()\n else:\n self._ap_array = []\n else:\n self.load_wifi_networks_from_file()\n\n return self._ap_array\n\n def store_wpa_ssid_key(self, ssid, wifi_key):\n \"\"\"\n Store the WPA SSID and key, and update the WPA supplicant configuration.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if the update is successful, False otherwise.\n\n Example:\n success = instance.store_wpa_ssid_key(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n logger.info(f\"ssid: {ssid}, wifi_key: {wifi_key}\")\n return self.update_wpa_supplicant(ssid, wifi_key)\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def is_raspberry_pi_zero(self):\n \"\"\"\n Check whether we're hosted in an RPi Zero or not.\n \"\"\"\n try:\n with open(\"/proc/cpuinfo\", encoding=\"utf8\") as cpuinfo:\n for line in cpuinfo:\n if line.startswith(\"Model\"):\n model_info = line.strip().split(\":\")\n model_name = model_info[1].strip()\n return \"Raspberry Pi Zero\" in model_name\n return False\n except FileNotFoundError as fnfex:\n logger.error(f\"Error: {fnfex}\")\n return False\n\n def write_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Write the WPA supplicant configuration to a temporary file.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n \"\"\"\n with open(WPA_SUPL_CONF_TMP, \"w\", encoding=\"utf8\") as temp_conf_file:\n temp_conf_file.write(\"ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\\n\")\n temp_conf_file.write(\"update_config=1\\n\")\n temp_conf_file.write(\"\\n\")\n temp_conf_file.write(\"network={\\n\")\n temp_conf_file.write('\tssid=\"' + str(ssid) + '\"\\n')\n if wifi_key == \"\":\n temp_conf_file.write(\"\tkey_mgmt=NONE\\n\")\n else:\n temp_conf_file.write('\tpsk=\"' + str(wifi_key) + '\"\\n')\n temp_conf_file.write(\"}\\n\")\n temp_conf_file.close()\n\n def get_wireless_interface(self):\n \"\"\"\n Get the wireless interface name of the device.\n\n Returns:\n str: The wireless interface name.\n\n Example:\n interface_name = instance.get_wireless_interface()\n \"\"\"\n try:\n ifconfig_output = subprocess.check_output([\"ifconfig\"]).decode(\"utf-8\")\n wireless_interfaces = re.findall(r\"wlan[0-9]+\", ifconfig_output)\n if wireless_interfaces:\n return wireless_interfaces[0]\n except subprocess.CalledProcessError as ex:\n logger.error(f\"Error: {ex}\")\n raise\n return None\n\n def update_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Update the WPA supplicant configuration and check for internet connectivity.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if connected to the internet after the update, False otherwise.\n\n Example:\n connected = instance.update_wpa_supplicant(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n self._is_connected_to_inet = False\n if RUNNING_UNIT_TESTS and ssid == DUMMY_SSID and wifi_key == DUMMY_PASSKEY:\n return True\n # In case of Raspberry Pi Zero NetworkManager stucks. So let's go with the wap_supplicant\n # modification approach.\n if self.is_raspberry_pi_zero():\n self.write_wpa_supplicant(ssid, wifi_key)\n os.system(\n \"cp /etc/wpa_supplicant/wpa_supplicant.conf \\\n /etc/wpa_supplicant/wpa_supplicant.conf.bak\"\n )\n os.system(\"cp \" + WPA_SUPL_CONF_TMP + \" /etc/wpa_supplicant/wpa_supplicant.conf\")\n wpa_cli_cmd = \"sudo wpa_cli -i wlan0 reconfigure\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command {wpa_cli_cmd}:{output.decode('utf8')}\")\n else:\n wpa_cli_cmd = f\"sudo nmcli device wifi connect {ssid} password {wifi_key}\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}:{output.decode('utf8')}`\")\n\n wireless_interface = self.get_wireless_interface()\n logger.info(f\"wireless_interface `{wireless_interface}`\")\n wpa_cli_cmd = f\"wpa_cli -i {wireless_interface} status | grep state | cut -d'=' -f2\"\n logger.info(f\"Command to run: `{wpa_cli_cmd}`\")\n retries = 0\n while retries < 30:\n retries = retries + 1\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}`:{output.decode('utf8')}\")\n if str(output.decode(\"utf8\")) == \"COMPLETED\\n\":\n self._is_connected_to_inet = True\n else:\n time.sleep(2)\n\n logger.info(f\"Connected to internet: {self._is_connected_to_inet}\")\n return self._is_connected_to_inet\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def sleep_and_reboot_for_wpa(self):\n \"\"\"\n Sleep for a short period and then reboot the system.\n \"\"\"\n self.system_reboot()" }, { "identifier": "Services", "path": "app/raspi/services.py", "snippet": "class Services:\n \"\"\"\n The `Services` class provides various methods for managing and controlling\n services related to a Raspberry Pi device, such as turning on/off valves,\n storing and deleting program cycles, loading program cycles, discovering\n WiFi networks, and saving WiFi network configurations.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self._scheduler = BackgroundScheduler()\n self._scheduler_started = False\n\n @property\n def scheduler_started(self):\n \"\"\"getter\"\"\"\n return self._scheduler_started\n\n @scheduler_started.setter\n def scheduler_started(self, value):\n \"\"\"setter\"\"\"\n self._scheduler_started = value\n\n @property\n def scheduler(self):\n \"\"\"getter\"\"\"\n return self._scheduler\n\n @scheduler.setter\n def scheduler(self, value):\n \"\"\"setter\"\"\"\n self._scheduler = value\n\n def turn_on_from_program(self, valve):\n \"\"\"\n Turn on a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(2, \"out\" + str(valve))\n\n def turn_off_from_program(self, valve):\n \"\"\"\n Turn off a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(0, \"out\" + str(valve))\n\n def get_stop_datetime(self, day, start_hour, start_min, period):\n \"\"\"\n Calculate the stop time for a program cycle.\n\n Parameters:\n - day (str): The day of the week.\n - start_hour (int): The starting hour.\n - start_min (int): The starting minute.\n - period (int): The duration of the cycle in minutes.\n\n Returns:\n tuple: A tuple containing the stop day, stop hour, and stop minute.\n \"\"\"\n logger.debug(f\"Converting to correct day, start, stop: {day}, {start_hour}, {start_min}, {period}\")\n stop_day_index = DAYS.index(day)\n logger.debug(f\"stop_day_index {stop_day_index}\")\n\n stop_min = (start_min + period) % 60\n logger.debug(f\"stop_min {stop_min}\")\n\n if stop_min < start_min:\n # should go to the next hour\n stop_hour = (start_hour + 1) % 24\n # should go to the next day\n if stop_hour < start_hour:\n stop_day_index = (stop_day_index + 1) % 7\n else:\n stop_hour = start_hour\n\n logger.debug(f\"stop_hour {stop_hour}\")\n\n stop_day = DAYS[stop_day_index]\n logger.debug(f\"stop_day: {stop_day}\")\n\n return stop_day, stop_hour, stop_min\n\n def store_program_cycles(self, json_data, store=False) -> None:\n \"\"\"\n Store program cycles and schedule them using the scheduler.\n\n Parameters:\n - json_data (dict): JSON data containing program information.\n - store (bool, optional): Whether to store the program information. Default is False.\n\n Returns:\n None\n \"\"\"\n try:\n triggers_to_start = []\n triggers_to_stop = []\n for day in json_data[\"days\"].split(\",\"):\n if day not in DAYS:\n raise DayValueException(f\"{day} is not correct! Accepted values: {DAYS}\")\n for cycle in json_data[\"cycles\"]:\n logger.info(f\"Cycle: {cycle}\")\n if int(cycle[\"min\"]) <= 0:\n logger.info(\"This cycle should not be considered to be in the program due to min <=0.\")\n continue\n start_hour = cycle[\"start\"].split(\":\")[0]\n start_min = cycle[\"start\"].split(\":\")[1]\n\n logger.info(f\"Start: {day} at {start_hour}:{start_min}\")\n triggers_to_start.append(CronTrigger(day_of_week=day, hour=int(start_hour), minute=int(start_min)))\n\n stop_day, stop_hour, stop_min = self.get_stop_datetime(day, int(start_hour), int(start_min), int(cycle[\"min\"]))\n logger.info(f\"Stop: {stop_day} at {stop_hour}:{stop_min}\")\n triggers_to_stop.append(CronTrigger(day_of_week=stop_day, hour=stop_hour, minute=stop_min))\n\n logger.info(f\"FINAL Triggers To Start to be in the program:{triggers_to_start}\")\n logger.info(f\"FINAL Triggers To Stop to be in the program: {triggers_to_stop}\")\n\n self._scheduler.add_job(self.turn_on_from_program, OrTrigger(triggers_to_start), args=[json_data[\"out\"]])\n self._scheduler.add_job(self.turn_off_from_program, OrTrigger(triggers_to_stop), args=[json_data[\"out\"]])\n\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n\n if store is True:\n file_path = PROGRAM + str(json_data[\"out\"]) + PROGRAM_EXT\n with open(file_path, \"w\", encoding=\"utf-8\") as outfile:\n json.dump(json_data, outfile)\n outfile.close()\n\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def delete_program(self, valve) -> bool:\n \"\"\"\n Delete a stored program for a specific valve.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n bool: True if the program was deleted, False otherwise.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Looking for {file_path} to delete!\")\n if path.exists(file_path):\n logger.info(f\"{file_path} exists! Deleting it...\")\n remove(file_path)\n return True\n return False\n\n def load_program_cycles_if_exists(self, valve):\n \"\"\"\n Load program cycles for a valve if a stored program exists.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n dict or None: The loaded JSON data or None if no program exists.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Loading {file_path} if exists!\")\n json_data = None\n if path.exists(file_path):\n logger.info(f\"{file_path} exists!\")\n with open(file_path, encoding=\"utf-8\") as json_file:\n json_data = json.load(json_file)\n self.store_program_cycles(json_data)\n json_file.close()\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n return json_data\n\n def split_json_into_chunks(self, selected_page, ap_array):\n \"\"\"\n Split a JSON array into chunks and create a response JSON.\n\n Parameters:\n - selected_page (int): The requested page number.\n - ap_array (list): The array to be split.\n\n Returns:\n dict: The response JSON containing the specified page and network information.\n \"\"\"\n selected_page = int(selected_page)\n json_response = {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"page\": selected_page,\n \"nets\": {},\n \"pages\": 0,\n }\n json_response_to_send = json_response.copy()\n\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n logger.debug(f\"Initial JSON response headers size: {headers_size} bytes\")\n\n pages = 1\n current_chunk_size = headers_size\n json_array = []\n\n for item in ap_array:\n json_response[\"pages\"] = pages\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n item_size = len(json.dumps(item).encode(\"utf-8\"))\n logger.debug(\n \"JSON item size: \"\n + f\"{item_size} bytes, \"\n + \"current_chunk_size: \"\n + f\"{current_chunk_size} bytes, \"\n + \"total: \"\n + f\"{current_chunk_size + item_size} bytes\"\n )\n if current_chunk_size + item_size >= MAX_NUM_OF_BYTES_CHUNK - MAX_NUM_OF_BUFFER_TO_ADD:\n pages += 1\n json_response[\"pages\"] = pages\n json_array = [item]\n json_response[\"nets\"] = json_array\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n current_chunk_size = headers_size + item_size + len(\", \")\n logger.debug(\n f\"Found total >= {MAX_NUM_OF_BYTES_CHUNK}: \"\n f\"Creating a new page: {pages}. \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n else:\n json_array.append(item)\n current_chunk_size += item_size + len(\", \")\n if selected_page == pages:\n json_response_to_send[\"nets\"] = json_array\n\n json_response_to_send[\"pages\"] = pages\n logger.debug(f\"JSON response size: {headers_size}\")\n logger.debug(\n f\"Nets array for this page ({pages}): {json_array}. \"\n f\"Current nets array size: {len(json.dumps(json_array).encode('utf-8'))} bytes, \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n\n if not json_response[\"nets\"]:\n json_response_to_send[\"nets\"] = json_array\n\n logger.debug(f\"JSON total size: {len(json.dumps(json_response_to_send).encode('utf-8'))}\")\n return json_response_to_send\n\n def discover_wifi_networks(self, chunked=0, page=1, refresh_networks_file=False):\n \"\"\"\n Discover available WiFi networks and return the information.\n\n Parameters:\n - chunked (int, optional): Whether to split the response into chunks. Default is 0.\n - page (int, optional): The requested page number. Default is 1.\n - refresh_networks_file (bool, optional): Whether to refresh the networks file. Default is False.\n\n Returns:\n str or dict: The JSON response containing WiFi network information.\n \"\"\"\n try:\n if page > 1:\n refresh_networks_file = False\n json_response = {}\n ap_array = []\n retries = 0\n while retries < 30:\n retries = retries + 1\n ap_array = Helpers().scan_rpi_wifi_networks(refresh_networks_file)\n if len(ap_array) != 0:\n break\n\n json_response = json.dumps(\n {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"ap_array\": ap_array,\n }\n )\n\n logger.info(f\"json_response: {json_response}\")\n if chunked == 0:\n return json_response\n logger.info(f\"Split array into chunks of {MAX_NUM_OF_BYTES_CHUNK} bytes...\")\n json_response = self.split_json_into_chunks(page, ap_array)\n return json_response\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network(self, ssid, wifi_key):\n \"\"\"\n Save WiFi network information.\n\n Parameters:\n - request_data (dict): The request data containing WiFi network information.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n if ssid and wifi_key:\n Helpers().store_wpa_ssid_key(ssid, wifi_key)\n return \"OK\"\n raise ValueError(\"Error: You need to provide ssid and wifi_keys in POST data\")\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network_with_wpa(self, wpa_enabled, wpa_key):\n \"\"\"\n Save WiFi network information with WPA settings.\n\n Parameters:\n - request_params (dict): The request parameters containing WPA settings.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n logger.info(f\"wpa_enabled: {wpa_enabled}, wpa_key: {wpa_key}\")\n if str(wpa_enabled) == \"1\":\n Helpers().update_wpa_supplicant(1, wpa_key)\n else:\n Helpers().update_wpa_supplicant(0, wpa_key)\n\n thread = Thread(target=Helpers().sleep_and_reboot_for_wpa)\n thread.start()\n return \"OK\"\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise" }, { "identifier": "Advertisement", "path": "app/ble/advertisement.py", "snippet": "class Advertisement(dbus.service.Object): # pylint: disable=too-many-instance-attributes\n \"\"\"Bluetooth Advertisement module.\"\"\"\n\n PATH_BASE = \"/org/bluez/raspirri/advertisement\"\n\n # Nine arguments are reasonable in this case.\n\n def __init__(self, index, advertising_type):\n self.path = f\"{self.PATH_BASE}{index}\"\n self.ble_tools = BleTools()\n self.bus = self.ble_tools.get_bus()\n self.ad_type = advertising_type\n self.local_name = None\n self.service_uuids = None\n self.solicit_uuids = None\n self.manufacturer_data = None\n self.service_data = None\n self.include_tx_power = None\n super().__init__(self.bus, self.path)\n\n def get_properties(self):\n \"\"\"Bluetooth Advertisement module properties.\"\"\"\n\n properties = {}\n properties[\"Type\"] = self.ad_type\n\n if self.local_name is not None:\n properties[\"LocalName\"] = dbus.String(self.local_name)\n\n if self.service_uuids is not None:\n properties[\"ServiceUUIDs\"] = dbus.Array(self.service_uuids, signature=\"s\")\n if self.solicit_uuids is not None:\n properties[\"SolicitUUIDs\"] = dbus.Array(self.solicit_uuids, signature=\"s\")\n if self.manufacturer_data is not None:\n properties[\"ManufacturerData\"] = dbus.Dictionary(self.manufacturer_data, signature=\"qv\")\n\n if self.service_data is not None:\n properties[\"ServiceData\"] = dbus.Dictionary(self.service_data, signature=\"sv\")\n if self.include_tx_power is not None:\n properties[\"IncludeTxPower\"] = dbus.Boolean(self.include_tx_power)\n\n if self.local_name is not None:\n properties[\"LocalName\"] = dbus.String(self.local_name)\n\n return {LE_ADVERTISEMENT_IFACE: properties}\n\n def get_path(self):\n \"\"\"path propertie.\"\"\"\n return dbus.ObjectPath(self.path)\n\n def add_service_uuid(self, uuid):\n \"\"\"add service uuid property.\"\"\"\n if self.service_uuids is None:\n self.service_uuids = []\n self.service_uuids.append(uuid)\n\n def add_solicit_uuid(self, uuid):\n \"\"\"add solicit uuid property.\"\"\"\n if self.solicit_uuids is None:\n self.solicit_uuids = []\n self.solicit_uuids.append(uuid)\n\n def add_manufacturer_data(self, manuf_code, data):\n \"\"\"add manufacturer data property.\"\"\"\n if self.manufacturer_data is None:\n self.manufacturer_data = dbus.Dictionary({}, signature=\"qv\")\n self.manufacturer_data[manuf_code] = dbus.Array(data, signature=\"y\")\n\n def add_service_data(self, uuid, data):\n \"\"\"add service data property.\"\"\"\n if self.service_data is None:\n self.service_data = dbus.Dictionary({}, signature=\"sv\")\n self.service_data[uuid] = dbus.Array(data, signature=\"y\")\n\n def add_local_name(self, name):\n \"\"\"add local name property.\"\"\"\n if self.local_name is None:\n self.local_name = \"\"\n self.local_name = dbus.String(name)\n\n @dbus.service.method(DBUS_PROP_IFACE, in_signature=\"s\", out_signature=\"a{sv}\")\n def GetAll(self, interface): # pylint: disable=invalid-name\n \"\"\"get all properties.\"\"\"\n if interface != LE_ADVERTISEMENT_IFACE:\n raise InvalidArgsException()\n\n return self.get_properties()[LE_ADVERTISEMENT_IFACE]\n\n @dbus.service.method(LE_ADVERTISEMENT_IFACE, in_signature=\"\", out_signature=\"\")\n def Release(self): # pylint: disable=invalid-name\n \"\"\"release ble.\"\"\"\n logger.info(f\"{self.path}: Released!\")\n\n def register_ad_callback(self):\n \"\"\"register ad callback.\"\"\"\n logger.info(f\"{self.path}: GATT advertisement registered\")\n\n def register_ad_error_callback(self):\n \"\"\"register ad error callback.\"\"\"\n logger.error(f\"{self.path}: Failed to register GATT advertisement\")\n\n def register(self):\n \"\"\"register.\"\"\"\n logger.info(f\"Bus found: {self.bus}\")\n adapter = self.ble_tools.find_adapter()\n logger.info(f\"Adapter found: {adapter}\")\n\n ad_manager = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, adapter), LE_ADVERTISING_MANAGER_IFACE)\n logger.info(f\"ad_manager found: {ad_manager}\")\n\n ad_manager.RegisterAdvertisement(\n self.get_path(), {}, reply_handler=self.register_ad_callback, error_handler=self.register_ad_error_callback\n )" }, { "identifier": "Application", "path": "app/ble/service.py", "snippet": "class Application(dbus.service.Object):\n \"\"\"Bluetooth module.\"\"\"\n\n def __init__(self, path=\"/\"):\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n self.mainloop = GObject.MainLoop()\n self.ble_tools = BleTools()\n self.bus = self.ble_tools.get_bus()\n self.path = path\n self.services = []\n self.next_index = 0\n dbus.service.Object.__init__(self, self.bus, self.path)\n\n def get_path(self):\n \"\"\"Bluetooth module.\"\"\"\n return dbus.ObjectPath(self.path)\n\n def add_service(self, service):\n \"\"\"Bluetooth module.\"\"\"\n self.services.append(service)\n\n @dbus.service.method(DBUS_OM_IFACE, out_signature=\"a{oa{sa{sv}}}\")\n def GetManagedObjects(self): # pylint: disable=invalid-name\n \"\"\"Bluetooth module.\"\"\"\n response = {}\n\n for service in self.services:\n response[service.get_path()] = service.get_properties()\n chrcs = service.get_characteristics()\n for chrc in chrcs:\n response[chrc.get_path()] = chrc.get_properties()\n descs = chrc.get_descriptors()\n for desc in descs:\n response[desc.get_path()] = desc.get_properties()\n\n return response\n\n def register_app_callback(self):\n \"\"\"Bluetooth module.\"\"\"\n logger.info(f\"GATT application registered. Path: {self.path}\")\n\n def register_app_error_callback(self, error):\n \"\"\"Bluetooth module.\"\"\"\n logger.info(f\"{self.path}: Failed to register application: {error}\")\n\n def register(self):\n \"\"\"Bluetooth module.\"\"\"\n adapter = self.ble_tools.find_adapter()\n logger.info(f\"{self.path}: Adapter: {adapter}\")\n\n service_manager = dbus.Interface(self.bus.get_object(BLUEZ_SERVICE_NAME, adapter), GATT_MANAGER_IFACE)\n logger.info(f\"{self.path}: Service Manager: {service_manager}\")\n\n service_manager.RegisterApplication(\n self.get_path(), {}, reply_handler=self.register_app_callback, error_handler=self.register_app_error_callback\n )\n logger.info(f\"{self.path}: Service Manager Registered...\")\n\n def run(self):\n \"\"\"Bluetooth module.\"\"\"\n self.mainloop.run()\n\n def quit(self):\n \"\"\"Bluetooth module.\"\"\"\n logger.info(\"\\nGATT application terminated\")\n self.mainloop.quit()" }, { "identifier": "Service", "path": "app/ble/service.py", "snippet": "class Service(dbus.service.Object):\n \"\"\"Bluetooth module.\"\"\"\n\n PATH_BASE = \"/org/bluez/raspirri/service\"\n\n def __init__(self, index, uuid, primary):\n self.ble_tools = BleTools()\n self.bus = self.ble_tools.get_bus()\n self.path = self.PATH_BASE + str(index)\n self.uuid = uuid\n self.primary = primary\n self.characteristics = set()\n self.index_counter = count()\n dbus.service.Object.__init__(self, self.bus, self.path)\n\n def get_properties(self):\n \"\"\"Bluetooth module.\"\"\"\n return {\n GATT_SERVICE_IFACE: {\n \"UUID\": self.uuid,\n \"Primary\": self.primary,\n \"Characteristics\": dbus.Array(self.get_characteristic_paths(), signature=\"o\"),\n }\n }\n\n def get_path(self):\n \"\"\"Bluetooth module.\"\"\"\n return dbus.ObjectPath(self.path)\n\n def add_characteristic(self, characteristic):\n \"\"\"Bluetooth module.\"\"\"\n self.characteristics.add(characteristic)\n\n def get_characteristic_paths(self):\n \"\"\"Bluetooth module.\"\"\"\n return [chrc.get_path() for chrc in self.characteristics]\n\n def get_characteristics(self):\n \"\"\"Bluetooth module.\"\"\"\n return self.characteristics\n\n def get_bus(self):\n \"\"\"Bluetooth module.\"\"\"\n return self.bus\n\n def get_next_index(self):\n \"\"\"Bluetooth module.\"\"\"\n return next(self.index_counter)\n\n @dbus.service.method(DBUS_PROP_IFACE, in_signature=\"s\", out_signature=\"a{sv}\")\n def GetAll(self, interface): # pylint: disable=invalid-name\n \"\"\"Bluetooth module.\"\"\"\n if interface != GATT_SERVICE_IFACE:\n raise InvalidArgsException()\n\n return self.get_properties()[GATT_SERVICE_IFACE]" }, { "identifier": "Characteristic", "path": "app/ble/service.py", "snippet": "class Characteristic(dbus.service.Object):\n \"\"\"\n org.bluez.GattCharacteristic1 interface implementation\n \"\"\"\n\n def __init__(self, uuid, flags, service):\n index = service.get_next_index()\n self.path = service.path + \"/char\" + str(index)\n self.bus = service.get_bus()\n self.uuid = uuid\n self.service = service\n self.flags = flags\n self.descriptors = []\n self.next_index = 0\n dbus.service.Object.__init__(self, self.bus, self.path)\n\n def get_properties(self):\n \"\"\"Bluetooth module.\"\"\"\n\n return {\n GATT_CHRC_IFACE: {\n \"Service\": self.service.get_path(),\n \"UUID\": self.uuid,\n \"Flags\": self.flags,\n \"Descriptors\": dbus.Array(self.get_descriptor_paths(), signature=\"o\"),\n }\n }\n\n def get_path(self):\n \"\"\"Bluetooth module.\"\"\"\n return dbus.ObjectPath(self.path)\n\n def add_descriptor(self, descriptor):\n \"\"\"Bluetooth module.\"\"\"\n self.descriptors.append(descriptor)\n\n def get_descriptor_paths(self):\n \"\"\"Bluetooth module.\"\"\"\n result = []\n for desc in self.descriptors:\n result.append(desc.get_path())\n return result\n\n def get_descriptors(self):\n \"\"\"Bluetooth module.\"\"\"\n return self.descriptors\n\n @dbus.service.method(DBUS_PROP_IFACE, in_signature=\"s\", out_signature=\"a{sv}\")\n def GetAll(self, interface): # pylint: disable=invalid-name\n \"\"\"Bluetooth module.\"\"\"\n if interface != GATT_CHRC_IFACE:\n raise InvalidArgsException()\n\n return self.get_properties()[GATT_CHRC_IFACE]\n\n @dbus.service.method(GATT_CHRC_IFACE, in_signature=\"a{sv}\", out_signature=\"ay\")\n def ReadValue(self, options): # pylint: disable=invalid-name\n \"\"\"Bluetooth module.\"\"\"\n raise NotSupportedException(\"ReadValue is not supported for this characteristic.\")\n\n @dbus.service.method(GATT_CHRC_IFACE, in_signature=\"aya{sv}\")\n def WriteValue(self, values, options): # pylint: disable=invalid-name\n \"\"\"Bluetooth module.\"\"\"\n raise NotSupportedException(\"WriteValue is not supported for this characteristic.\")\n\n @dbus.service.method(GATT_CHRC_IFACE)\n def StartNotify(self): # pylint: disable=invalid-name\n \"\"\"Bluetooth module.\"\"\"\n raise NotSupportedException(\"StartNotify is not supported for this characteristic.\")\n\n @dbus.service.method(GATT_CHRC_IFACE)\n def StopNotify(self): # pylint: disable=invalid-name\n \"\"\"Bluetooth module.\"\"\"\n raise NotSupportedException(\"StopNotify is not supported for this characteristic.\")\n\n @dbus.service.signal(DBUS_PROP_IFACE, signature=\"sa{sv}as\")\n def PropertiesChanged(self, interface, changed, invalidated): # pylint: disable=invalid-name\n \"\"\"Bluetooth module.\"\"\"\n logger.info(\n f\"{self.path}: Default propertiesChanged called, returning, \\\n {interface}, {changed}, {invalidated}\"\n )\n\n def get_bus(self):\n \"\"\"Bluetooth module.\"\"\"\n return self.bus\n\n def get_next_index(self):\n \"\"\"Bluetooth module.\"\"\"\n idx = self.next_index\n self.next_index += 1\n return idx\n\n def add_timeout(self, timeout, callback):\n \"\"\"Bluetooth module.\"\"\"\n logger.info(f\"{self.path}: Default add_timeout called\")\n GObject.timeout_add(timeout, callback)" } ]
import json import dbus # pylint: disable=import-error,useless-import-alias from loguru import logger from dbus.exceptions import DBusException from app.raspi.helpers import Helpers from app.raspi.services import Services from app.ble.advertisement import Advertisement from app.ble.service import Application, Service, Characteristic
12,510
self.add_local_name("raspirriv1") self.include_tx_power = True logger.info( f"WifiNetworksAdvertisement initialized with index: {index}, \ advertising type: 'peripheral', local name: 'raspirriv1', and include_tx_power: True." ) def register_ad_callback(self): """Register ad callback.""" logger.info(f"{self.path}: GATT advertisement registered for WifiNetworksAdvertisement") def register_ad_error_callback(self): """Register ad error callback.""" logger.error(f"{self.path}: Failed to register GATT advertisement for WifiNetworksAdvertisement") class WifiNetworksService(Service): # pylint: disable=too-few-public-methods """Bluetooth module.""" SVC_UUID = "00000001-710e-4a5b-8d75-3e5b444bc3cf" def __init__(self, index, page=1, connected=0): self._page = page self._connected = connected super().__init__(index, self.SVC_UUID, True) self._add_characteristics() self._log_characteristics_added() def _add_characteristics(self): self.add_characteristic(WifiCharacteristic(self)) self.add_characteristic(ConnectivityCharacteristic(self)) def _log_characteristics_added(self): logger.info("Adding characteristics completed.") class WifiCharacteristic(Characteristic): """Bluetooth module.""" WIFI_CHARACTERISTIC_UUID = "00000003-710e-4a5b-8d75-3e5b444bc3cf" def __init__(self, service): super().__init__(self.WIFI_CHARACTERISTIC_UUID, ["read", "write", "write-without-response"], service) self._page_set = 1 self._refresh_set = False self._services = Services() logger.info("Adding WifiCharacteristic completed.") def WriteValue(self, values, options): # pylint: disable=unused-argument,invalid-name """Bluetooth module.""" command = "".join(str(value) for value in values) logger.debug(f"command: {command}") try: json_data = json.loads(command) if json_data.get("page"): self._page_set = int(json_data["page"]) logger.debug(f"Page set: {self._page_set}") elif json_data.get("refresh"): value = json_data["refresh"] if value.lower() == "true": self._refresh_set = True else: self._refresh_set = False logger.debug(f"refresh: {self._refresh_set}") elif json_data.get("ssid") and json_data.get("wifi_key"): connected = Helpers().store_wpa_ssid_key(json_data["ssid"], json_data["wifi_key"]) logger.info(f"Wifi changed: {json_data}. Connected: {connected}") else: raise ValueError(f"Missing data! You provided only: {json_data}") except json.JSONDecodeError as exception: logger.error(f"JSONDecodeError: {exception}") raise ValueError(f"{exception}") from exception except Exception as exception: logger.error(f"Error: {exception}") raise ValueError(f"{exception}") from exception def ReadValue(self, options): # pylint: disable=unused-argument,invalid-name """Bluetooth module.""" values = [] try: logger.debug(f"page set earlier: {self._page_set}") wifi_networks = self._services.discover_wifi_networks(1, self._page_set, self._refresh_set) logger.debug(f"wifi_networks: {wifi_networks}") if not wifi_networks: wifi_networks = "No wifi networks identified!" for char in str(wifi_networks): values.append(dbus.Byte(char.encode())) except DBusException as exception: logger.error(f"DBusException: {exception}") raise ValueError(f"{exception}") from exception except Exception as exception: logger.error(f"Error: {exception}") raise ValueError(f"{exception}") from exception return values class ConnectivityCharacteristic(Characteristic): # pylint: disable=too-few-public-methods """Bluetooth module.""" CONN_CHARACTERISTIC_UUID = "00000004-710e-4a5b-8d75-3e5b444bc3cf" def __init__(self, service): super().__init__(self.CONN_CHARACTERISTIC_UUID, ["read"], service) def ReadValue(self, options): # pylint: disable=invalid-name,unused-argument """Bluetooth module.""" values = [] logger.debug(f"connected: {Helpers().is_connected_to_inet}") if Helpers().is_connected_to_inet: values.append(dbus.Byte(b"1")) else: values.append(dbus.Byte(b"0")) logger.debug(f"values: {values}") return values def init_ble(): """Bluetooth module.""" logger.info("Initializing BLE module...")
"""Copyright (c) 2019, Douglas Otwell Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ GATT_CHRC_IFACE = "org.bluez.GattCharacteristic1" NOTIFY_TIMEOUT = 5000 class WifiNetworksAdvertisement(Advertisement): # pylint: disable=too-few-public-methods """Bluetooth module.""" def __init__(self, index): super().__init__(index, "peripheral") self.add_local_name("raspirriv1") self.include_tx_power = True logger.info( f"WifiNetworksAdvertisement initialized with index: {index}, \ advertising type: 'peripheral', local name: 'raspirriv1', and include_tx_power: True." ) def register_ad_callback(self): """Register ad callback.""" logger.info(f"{self.path}: GATT advertisement registered for WifiNetworksAdvertisement") def register_ad_error_callback(self): """Register ad error callback.""" logger.error(f"{self.path}: Failed to register GATT advertisement for WifiNetworksAdvertisement") class WifiNetworksService(Service): # pylint: disable=too-few-public-methods """Bluetooth module.""" SVC_UUID = "00000001-710e-4a5b-8d75-3e5b444bc3cf" def __init__(self, index, page=1, connected=0): self._page = page self._connected = connected super().__init__(index, self.SVC_UUID, True) self._add_characteristics() self._log_characteristics_added() def _add_characteristics(self): self.add_characteristic(WifiCharacteristic(self)) self.add_characteristic(ConnectivityCharacteristic(self)) def _log_characteristics_added(self): logger.info("Adding characteristics completed.") class WifiCharacteristic(Characteristic): """Bluetooth module.""" WIFI_CHARACTERISTIC_UUID = "00000003-710e-4a5b-8d75-3e5b444bc3cf" def __init__(self, service): super().__init__(self.WIFI_CHARACTERISTIC_UUID, ["read", "write", "write-without-response"], service) self._page_set = 1 self._refresh_set = False self._services = Services() logger.info("Adding WifiCharacteristic completed.") def WriteValue(self, values, options): # pylint: disable=unused-argument,invalid-name """Bluetooth module.""" command = "".join(str(value) for value in values) logger.debug(f"command: {command}") try: json_data = json.loads(command) if json_data.get("page"): self._page_set = int(json_data["page"]) logger.debug(f"Page set: {self._page_set}") elif json_data.get("refresh"): value = json_data["refresh"] if value.lower() == "true": self._refresh_set = True else: self._refresh_set = False logger.debug(f"refresh: {self._refresh_set}") elif json_data.get("ssid") and json_data.get("wifi_key"): connected = Helpers().store_wpa_ssid_key(json_data["ssid"], json_data["wifi_key"]) logger.info(f"Wifi changed: {json_data}. Connected: {connected}") else: raise ValueError(f"Missing data! You provided only: {json_data}") except json.JSONDecodeError as exception: logger.error(f"JSONDecodeError: {exception}") raise ValueError(f"{exception}") from exception except Exception as exception: logger.error(f"Error: {exception}") raise ValueError(f"{exception}") from exception def ReadValue(self, options): # pylint: disable=unused-argument,invalid-name """Bluetooth module.""" values = [] try: logger.debug(f"page set earlier: {self._page_set}") wifi_networks = self._services.discover_wifi_networks(1, self._page_set, self._refresh_set) logger.debug(f"wifi_networks: {wifi_networks}") if not wifi_networks: wifi_networks = "No wifi networks identified!" for char in str(wifi_networks): values.append(dbus.Byte(char.encode())) except DBusException as exception: logger.error(f"DBusException: {exception}") raise ValueError(f"{exception}") from exception except Exception as exception: logger.error(f"Error: {exception}") raise ValueError(f"{exception}") from exception return values class ConnectivityCharacteristic(Characteristic): # pylint: disable=too-few-public-methods """Bluetooth module.""" CONN_CHARACTERISTIC_UUID = "00000004-710e-4a5b-8d75-3e5b444bc3cf" def __init__(self, service): super().__init__(self.CONN_CHARACTERISTIC_UUID, ["read"], service) def ReadValue(self, options): # pylint: disable=invalid-name,unused-argument """Bluetooth module.""" values = [] logger.debug(f"connected: {Helpers().is_connected_to_inet}") if Helpers().is_connected_to_inet: values.append(dbus.Byte(b"1")) else: values.append(dbus.Byte(b"0")) logger.debug(f"values: {values}") return values def init_ble(): """Bluetooth module.""" logger.info("Initializing BLE module...")
app = Application()
3
2023-12-22 08:06:09+00:00
16k
shibing624/chatgpt-webui
src/models.py
[ { "identifier": "shared", "path": "src/shared.py", "snippet": "class State:\n def interrupt(self):\n def recover(self):\n def set_api_host(self, api_host: str):\n def reset_api_host(self):\n def reset_all(self):\n def set_api_key_queue(self, api_key_list):\n def switching_api_key(self, func):\n def wrapped(*args, **kwargs):" }, { "identifier": "config", "path": "src/config.py", "snippet": "def retrieve_openai_api(api_key=None):\ndef retrieve_proxy(proxy=None):\ndef update_doc_config(two_column_pdf):" }, { "identifier": "BaseLLMModel", "path": "src/base_model.py", "snippet": "class BaseLLMModel:\n def __init__(\n self,\n model_name,\n system_prompt=INITIAL_SYSTEM_PROMPT,\n temperature=1.0,\n top_p=1.0,\n n_choices=1,\n stop=\"\",\n max_generation_token=None,\n presence_penalty=0,\n frequency_penalty=0,\n logit_bias=None,\n user=\"\",\n single_turn=False,\n ) -> None:\n self.history = []\n self.all_token_counts = []\n self.model_name = model_name\n self.model_type = ModelType.get_type(model_name)\n try:\n self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]\n except KeyError:\n self.token_upper_limit = DEFAULT_TOKEN_LIMIT\n self.interrupted = False\n self.system_prompt = system_prompt\n self.api_key = None\n self.need_api_key = False\n self.history_file_path = get_first_history_name(user)\n self.user_name = user\n self.chatbot = []\n\n self.default_single_turn = single_turn\n self.default_temperature = temperature\n self.default_top_p = top_p\n self.default_n_choices = n_choices\n self.default_stop_sequence = stop\n self.default_max_generation_token = max_generation_token\n self.default_presence_penalty = presence_penalty\n self.default_frequency_penalty = frequency_penalty\n self.default_logit_bias = logit_bias\n self.default_user_identifier = user\n\n self.single_turn = single_turn\n self.temperature = temperature\n self.top_p = top_p\n self.n_choices = n_choices\n self.stop_sequence = stop\n self.max_generation_token = max_generation_token\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.logit_bias = logit_bias\n self.user_identifier = user\n\n self.metadata = {}\n\n def get_answer_stream_iter(self):\n \"\"\"stream predict, need to be implemented\n conversations are stored in self.history, with the most recent question, in OpenAI format\n should return a generator, each time give the next word (str) in the answer\n \"\"\"\n logger.warning(\"stream predict not implemented, using at once predict instead\")\n response, _ = self.get_answer_at_once()\n yield response\n\n def get_answer_at_once(self):\n \"\"\"predict at once, need to be implemented\n conversations are stored in history, with the most recent question, in OpenAI format\n Should return:\n the answer (str)\n total token count (int)\n \"\"\"\n logger.warning(\"at once predict not implemented, using stream predict instead\")\n response_iter = self.get_answer_stream_iter()\n count = 0\n response = ''\n for response in response_iter:\n count += 1\n return response, sum(self.all_token_counts) + count\n\n def billing_info(self):\n \"\"\"get billing infomation, inplement if needed\"\"\"\n return BILLING_NOT_APPLICABLE_MSG\n\n def count_token(self, user_input):\n \"\"\"get token count from input, implement if needed\"\"\"\n return len(user_input)\n\n def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=\"\"):\n def get_return_value():\n return chatbot, status_text\n\n status_text = i18n(\"开始实时传输回答……\")\n if fake_input:\n chatbot.append((fake_input, \"\"))\n else:\n chatbot.append((inputs, \"\"))\n\n user_token_count = self.count_token(inputs)\n self.all_token_counts.append(user_token_count)\n logger.debug(f\"输入token计数: {user_token_count}\")\n\n stream_iter = self.get_answer_stream_iter()\n\n if display_append:\n display_append = (\n '\\n\\n<hr class=\"append-display no-in-raw\" />' + display_append\n )\n\n partial_text = \"\"\n token_increment = 1\n for partial_text in stream_iter:\n if type(partial_text) == tuple:\n partial_text, token_increment = partial_text\n chatbot[-1] = (chatbot[-1][0], partial_text + display_append)\n self.all_token_counts[-1] += token_increment\n status_text = self.token_message()\n yield get_return_value()\n if self.interrupted:\n self.recover()\n break\n self.history.append(construct_assistant(partial_text))\n\n def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=\"\"):\n if fake_input:\n chatbot.append((fake_input, \"\"))\n else:\n chatbot.append((inputs, \"\"))\n if fake_input is not None:\n user_token_count = self.count_token(fake_input)\n else:\n user_token_count = self.count_token(inputs)\n self.all_token_counts.append(user_token_count)\n ai_reply, total_token_count = self.get_answer_at_once()\n self.history.append(construct_assistant(ai_reply))\n if fake_input is not None:\n self.history[-2] = construct_user(fake_input)\n chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)\n if fake_input is not None:\n self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))\n else:\n self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)\n status_text = self.token_message()\n return chatbot, status_text\n\n def handle_file_upload(self, files, chatbot, language):\n \"\"\"if the model accepts modal input, implement this function\"\"\"\n status = gr.Markdown.update()\n if files:\n construct_index(self.api_key, files=files)\n status = i18n(\"索引构建完成\")\n return gr.Files.update(), chatbot, status\n\n def prepare_inputs(\n self, real_inputs, use_websearch,\n files, reply_language, chatbot,\n load_from_cache_if_possible=True,\n ):\n display_append = []\n limited_context = False\n if type(real_inputs) == list:\n fake_inputs = real_inputs[0][\"text\"]\n else:\n fake_inputs = real_inputs\n if files:\n from langchain.vectorstores.base import VectorStoreRetriever\n from langchain.retrievers import BM25Retriever, EnsembleRetriever\n limited_context = True\n msg = \"加载索引中……\"\n logger.info(msg)\n index, documents = construct_index(\n self.api_key,\n files=files,\n load_from_cache_if_possible=load_from_cache_if_possible,\n )\n assert index is not None, \"获取索引失败\"\n msg = \"索引获取成功,生成回答中……\"\n logger.info(msg)\n k = 3\n score_threshold = 0.6\n with retrieve_proxy():\n vec_retriever = VectorStoreRetriever(\n vectorstore=index,\n search_type=\"similarity_score_threshold\",\n search_kwargs={\"k\": k, \"score_threshold\": score_threshold}\n )\n bm25_retriever = BM25Retriever.from_documents(documents, preprocess_func=chinese_preprocessing_func)\n bm25_retriever.k = k\n ensemble_retriever = EnsembleRetriever(\n retrievers=[bm25_retriever, vec_retriever],\n weights=[0.5, 0.5],\n )\n try:\n relevant_documents = ensemble_retriever.get_relevant_documents(fake_inputs)\n except:\n return self.prepare_inputs(\n fake_inputs,\n use_websearch,\n files,\n reply_language,\n chatbot,\n load_from_cache_if_possible=False,\n )\n reference_results = [\n [d.page_content.strip(\"�\"), os.path.basename(d.metadata[\"source\"])]\n for d in relevant_documents\n ]\n reference_results = add_source_numbers(reference_results)\n display_append = add_details(reference_results)\n display_append = \"\\n\\n\" + \"\".join(display_append)\n if type(real_inputs) == list:\n real_inputs[0][\"text\"] = (\n replace_today(PROMPT_TEMPLATE)\n .replace(\"{query_str}\", fake_inputs)\n .replace(\"{context_str}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n else:\n real_inputs = (\n replace_today(PROMPT_TEMPLATE)\n .replace(\"{query_str}\", real_inputs)\n .replace(\"{context_str}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n elif use_websearch:\n from duckduckgo_search import DDGS\n search_results = []\n with DDGS() as ddgs:\n ddgs_gen = ddgs.text(fake_inputs, backend=\"lite\")\n for r in islice(ddgs_gen, 10):\n search_results.append(r)\n reference_results = []\n for idx, result in enumerate(search_results):\n logger.debug(f\"搜索结果{idx + 1}:{result}\")\n domain_name = urllib3.util.parse_url(result[\"href\"]).host\n reference_results.append([result[\"body\"], result[\"href\"]])\n display_append.append(\n # f\"{idx+1}. [{domain_name}]({result['href']})\\n\"\n f\"<a href=\\\"{result['href']}\\\" target=\\\"_blank\\\">{idx + 1}.&nbsp;{result['title']}</a>\"\n )\n reference_results = add_source_numbers(reference_results)\n # display_append = \"<ol>\\n\\n\" + \"\".join(display_append) + \"</ol>\"\n display_append = (\n '<div class = \"source-a\">' + \"\".join(display_append) + \"</div>\"\n )\n if type(real_inputs) == list:\n real_inputs[0][\"text\"] = (\n replace_today(WEBSEARCH_PTOMPT_TEMPLATE)\n .replace(\"{query}\", fake_inputs)\n .replace(\"{web_results}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n else:\n real_inputs = (\n replace_today(WEBSEARCH_PTOMPT_TEMPLATE)\n .replace(\"{query}\", fake_inputs)\n .replace(\"{web_results}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n else:\n display_append = \"\"\n return limited_context, fake_inputs, display_append, real_inputs, chatbot\n\n def predict(\n self,\n inputs,\n chatbot,\n stream=False,\n use_websearch=False,\n files=None,\n reply_language=\"中文\",\n should_check_token_count=True,\n ): # repetition_penalty, top_k\n\n status_text = \"开始生成回答……\"\n if type(inputs) == list:\n logger.info(\n \"用户\"\n + f\"{self.user_name}\"\n + \"的输入为:\"\n + \"(\"\n + str(len(inputs) - 1)\n + \" images) \"\n + f\"{inputs[0]['text']}\"\n )\n else:\n logger.info(\n \"用户\"\n + f\"{self.user_name}\"\n + \"的输入为:\"\n + f\"{inputs}\"\n )\n if should_check_token_count:\n if type(inputs) == list:\n yield chatbot + [(inputs[0][\"text\"], \"\")], status_text\n else:\n yield chatbot + [(inputs, \"\")], status_text\n if reply_language == \"跟随问题语言(不稳定)\":\n reply_language = \"the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch.\"\n\n limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(\n real_inputs=inputs,\n use_websearch=use_websearch,\n files=files,\n reply_language=reply_language,\n chatbot=chatbot\n )\n yield chatbot + [(fake_inputs, \"\")], status_text\n\n if (\n self.need_api_key and\n self.api_key is None\n and not shared.state.multi_api_key\n ):\n status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG\n logger.info(status_text)\n chatbot.append((inputs, \"\"))\n if len(self.history) == 0:\n self.history.append(construct_user(inputs))\n self.history.append(\"\")\n self.all_token_counts.append(0)\n else:\n self.history[-2] = construct_user(inputs)\n yield chatbot + [(inputs, \"\")], status_text\n return\n elif len(inputs.strip()) == 0:\n status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG\n logger.info(status_text)\n yield chatbot + [(inputs, \"\")], status_text\n return\n\n if self.single_turn:\n self.history = []\n self.all_token_counts = []\n if type(inputs) == list:\n self.history.append(inputs)\n else:\n self.history.append(construct_user(inputs))\n\n try:\n if stream:\n logger.debug(\"使用流式传输\")\n iter = self.stream_next_chatbot(\n inputs,\n chatbot,\n fake_input=fake_inputs,\n display_append=display_append,\n )\n for chatbot, status_text in iter:\n yield chatbot, status_text\n else:\n logger.debug(\"不使用流式传输\")\n chatbot, status_text = self.next_chatbot_at_once(\n inputs,\n chatbot,\n fake_input=fake_inputs,\n display_append=display_append,\n )\n yield chatbot, status_text\n except Exception as e:\n traceback.print_exc()\n status_text = STANDARD_ERROR_MSG + str(e)\n yield chatbot, status_text\n\n if len(self.history) > 1 and self.history[-1][\"content\"] != inputs:\n logger.info(\"回答为:\" + f\"{self.history[-1]['content']}\")\n\n if limited_context:\n self.history = []\n self.all_token_counts = []\n\n max_token = self.token_upper_limit - TOKEN_OFFSET\n\n if sum(self.all_token_counts) > max_token and should_check_token_count:\n count = 0\n while (\n sum(self.all_token_counts)\n > self.token_upper_limit * REDUCE_TOKEN_FACTOR\n and sum(self.all_token_counts) > 0\n ):\n count += 1\n del self.all_token_counts[0]\n del self.history[:2]\n logger.info(status_text)\n status_text = f\"为了防止token超限,模型忘记了早期的 {count} 轮对话\"\n yield chatbot, status_text\n\n def retry(\n self,\n chatbot,\n stream=False,\n use_websearch=False,\n files=None,\n reply_language=\"中文\",\n ):\n logger.debug(\"重试中……\")\n if len(self.history) > 1:\n inputs = self.history[-2][\"content\"]\n del self.history[-2:]\n if len(self.all_token_counts) > 0:\n self.all_token_counts.pop()\n elif len(chatbot) > 0:\n inputs = chatbot[-1][0]\n if '<div class=\"user-message\">' in inputs:\n inputs = inputs.split('<div class=\"user-message\">')[1]\n inputs = inputs.split(\"</div>\")[0]\n elif len(self.history) == 1:\n inputs = self.history[-1][\"content\"]\n del self.history[-1]\n else:\n yield chatbot, f\"{STANDARD_ERROR_MSG}上下文是空的\"\n return\n\n iter = self.predict(\n inputs,\n chatbot,\n stream=stream,\n use_websearch=use_websearch,\n files=files,\n reply_language=reply_language,\n )\n for x in iter:\n yield x\n logger.debug(\"重试完毕\")\n\n def interrupt(self):\n self.interrupted = True\n\n def recover(self):\n self.interrupted = False\n\n def set_token_upper_limit(self, new_upper_limit):\n self.token_upper_limit = new_upper_limit\n logger.info(f\"token上限设置为{new_upper_limit}\")\n self.auto_save()\n\n def set_temperature(self, new_temperature):\n self.temperature = new_temperature\n self.auto_save()\n\n def set_top_p(self, new_top_p):\n self.top_p = new_top_p\n self.auto_save()\n\n def set_n_choices(self, new_n_choices):\n self.n_choices = new_n_choices\n self.auto_save()\n\n def set_stop_sequence(self, new_stop_sequence: str):\n new_stop_sequence = new_stop_sequence.split(\",\")\n self.stop_sequence = new_stop_sequence\n self.auto_save()\n\n def set_max_tokens(self, new_max_tokens):\n self.max_generation_token = new_max_tokens\n self.auto_save()\n\n def set_presence_penalty(self, new_presence_penalty):\n self.presence_penalty = new_presence_penalty\n self.auto_save()\n\n def set_frequency_penalty(self, new_frequency_penalty):\n self.frequency_penalty = new_frequency_penalty\n self.auto_save()\n\n def set_logit_bias(self, logit_bias):\n self.logit_bias = logit_bias\n self.auto_save()\n\n def encoded_logit_bias(self):\n if self.logit_bias is None:\n return {}\n logit_bias = self.logit_bias.split()\n bias_map = {}\n encoding = tiktoken.get_encoding(\"cl100k_base\")\n for line in logit_bias:\n word, bias_amount = line.split(\":\")\n if word:\n for token in encoding.encode(word):\n bias_map[token] = float(bias_amount)\n return bias_map\n\n def set_user_identifier(self, new_user_identifier):\n self.user_identifier = new_user_identifier\n self.auto_save()\n\n def set_system_prompt(self, new_system_prompt):\n self.system_prompt = new_system_prompt\n self.auto_save()\n\n def set_key(self, new_access_key):\n self.api_key = new_access_key.strip()\n msg = i18n(\"API密钥更改为了\") + hide_middle_chars(self.api_key)\n logger.info(msg)\n return self.api_key, msg\n\n def set_single_turn(self, new_single_turn):\n self.single_turn = new_single_turn\n self.auto_save()\n\n def reset(self, remain_system_prompt=False):\n self.history = []\n self.all_token_counts = []\n self.interrupted = False\n self.history_file_path = new_auto_history_filename(self.user_name)\n history_name = self.history_file_path[:-5]\n choices = [history_name] + get_history_names(self.user_name)\n system_prompt = self.system_prompt if remain_system_prompt else \"\"\n\n self.single_turn = self.default_single_turn\n self.temperature = self.default_temperature\n self.top_p = self.default_top_p\n self.n_choices = self.default_n_choices\n self.stop_sequence = self.default_stop_sequence\n self.max_generation_token = self.default_max_generation_token\n self.presence_penalty = self.default_presence_penalty\n self.frequency_penalty = self.default_frequency_penalty\n self.logit_bias = self.default_logit_bias\n self.user_identifier = self.default_user_identifier\n\n return (\n [],\n self.token_message([0]),\n gr.Radio.update(choices=choices, value=history_name),\n system_prompt,\n self.single_turn,\n self.temperature,\n self.top_p,\n self.n_choices,\n self.stop_sequence,\n self.token_upper_limit,\n self.max_generation_token,\n self.presence_penalty,\n self.frequency_penalty,\n self.logit_bias,\n self.user_identifier,\n )\n\n def delete_first_conversation(self):\n if self.history:\n del self.history[:2]\n del self.all_token_counts[0]\n return self.token_message()\n\n def delete_last_conversation(self, chatbot):\n if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:\n msg = \"由于包含报错信息,只删除chatbot记录\"\n chatbot = chatbot[:-1]\n return chatbot, self.history\n if len(self.history) > 0:\n self.history = self.history[:-2]\n if len(chatbot) > 0:\n msg = \"删除了一组chatbot对话\"\n chatbot = chatbot[:-1]\n if len(self.all_token_counts) > 0:\n msg = \"删除了一组对话的token计数记录\"\n self.all_token_counts.pop()\n msg = \"删除了一组对话\"\n self.chatbot = chatbot\n self.auto_save(chatbot)\n return chatbot, msg\n\n def token_message(self, token_lst=None):\n if token_lst is None:\n token_lst = self.all_token_counts\n token_sum = 0\n for i in range(len(token_lst)):\n token_sum += sum(token_lst[: i + 1])\n return (\n i18n(\"Token 计数: \")\n + f\"{sum(token_lst)}\"\n + i18n(\",本次对话累计消耗了 \")\n + f\"{token_sum} tokens\"\n )\n\n def rename_chat_history(self, filename, chatbot):\n if filename == \"\":\n return gr.update()\n if not filename.endswith(\".json\"):\n filename += \".json\"\n self.delete_chat_history(self.history_file_path)\n # 命名重复检测\n repeat_file_index = 2\n full_path = os.path.join(HISTORY_DIR, self.user_name, filename)\n while os.path.exists(full_path):\n full_path = os.path.join(\n HISTORY_DIR, self.user_name, f\"{repeat_file_index}_{filename}\"\n )\n repeat_file_index += 1\n filename = os.path.basename(full_path)\n\n self.history_file_path = filename\n save_file(filename, self, chatbot)\n return init_history_list(self.user_name)\n\n def auto_name_chat_history(\n self, name_chat_method, user_question, chatbot, single_turn_checkbox\n ):\n if len(self.history) == 2 and not single_turn_checkbox:\n user_question = self.history[0][\"content\"]\n if type(user_question) == list:\n user_question = user_question[0][\"text\"]\n filename = replace_special_symbols(user_question)[:16] + \".json\"\n return self.rename_chat_history(filename, chatbot)\n else:\n return gr.update()\n\n def auto_save(self, chatbot=None):\n if chatbot is None:\n chatbot = self.chatbot\n save_file(self.history_file_path, self, chatbot)\n\n def export_markdown(self, filename, chatbot):\n if filename == \"\":\n return\n if not filename.endswith(\".md\"):\n filename += \".md\"\n save_file(filename, self, chatbot)\n\n def load_chat_history(self, new_history_file_path=None):\n logger.debug(f\"{self.user_name} 加载对话历史中……\")\n if new_history_file_path is not None:\n if type(new_history_file_path) != str:\n # copy file from new_history_file_path.name to os.path.join(HISTORY_DIR, self.user_name)\n new_history_file_path = new_history_file_path.name\n shutil.copyfile(\n new_history_file_path,\n os.path.join(\n HISTORY_DIR,\n self.user_name,\n os.path.basename(new_history_file_path),\n ),\n )\n self.history_file_path = os.path.basename(new_history_file_path)\n else:\n self.history_file_path = new_history_file_path\n try:\n if self.history_file_path == os.path.basename(self.history_file_path):\n history_file_path = os.path.join(\n HISTORY_DIR, self.user_name, self.history_file_path\n )\n else:\n history_file_path = self.history_file_path\n if not self.history_file_path.endswith(\".json\"):\n history_file_path += \".json\"\n saved_json = {}\n if os.path.exists(history_file_path):\n with open(history_file_path, \"r\", encoding=\"utf-8\") as f:\n saved_json = json.load(f)\n try:\n if type(saved_json[\"history\"][0]) == str:\n logger.info(\"历史记录格式为旧版,正在转换……\")\n new_history = []\n for index, item in enumerate(saved_json[\"history\"]):\n if index % 2 == 0:\n new_history.append(construct_user(item))\n else:\n new_history.append(construct_assistant(item))\n saved_json[\"history\"] = new_history\n logger.info(new_history)\n except:\n pass\n if len(saved_json[\"chatbot\"]) < len(saved_json[\"history\"]) // 2:\n logger.info(\"Trimming corrupted history...\")\n saved_json[\"history\"] = saved_json[\"history\"][-len(saved_json[\"chatbot\"]):]\n logger.info(f\"Trimmed history: {saved_json['history']}\")\n logger.debug(f\"{self.user_name} 加载对话历史完毕\")\n self.history = saved_json[\"history\"]\n self.single_turn = saved_json.get(\"single_turn\", self.single_turn)\n self.temperature = saved_json.get(\"temperature\", self.temperature)\n self.top_p = saved_json.get(\"top_p\", self.top_p)\n self.n_choices = saved_json.get(\"n_choices\", self.n_choices)\n self.stop_sequence = list(saved_json.get(\"stop_sequence\", self.stop_sequence))\n self.token_upper_limit = saved_json.get(\n \"token_upper_limit\", self.token_upper_limit\n )\n self.max_generation_token = saved_json.get(\n \"max_generation_token\", self.max_generation_token\n )\n self.presence_penalty = saved_json.get(\n \"presence_penalty\", self.presence_penalty\n )\n self.frequency_penalty = saved_json.get(\n \"frequency_penalty\", self.frequency_penalty\n )\n self.logit_bias = saved_json.get(\"logit_bias\", self.logit_bias)\n self.user_identifier = saved_json.get(\"user_identifier\", self.user_name)\n self.metadata = saved_json.get(\"metadata\", self.metadata)\n self.chatbot = saved_json[\"chatbot\"]\n return (\n os.path.basename(self.history_file_path)[:-5],\n saved_json[\"system\"],\n saved_json[\"chatbot\"],\n self.single_turn,\n self.temperature,\n self.top_p,\n self.n_choices,\n \",\".join(self.stop_sequence),\n self.token_upper_limit,\n self.max_generation_token,\n self.presence_penalty,\n self.frequency_penalty,\n self.logit_bias,\n self.user_identifier,\n )\n except:\n # 没有对话历史或者对话历史解析失败\n logger.info(f\"没有找到对话历史记录 {self.history_file_path}\")\n self.reset()\n return (\n os.path.basename(self.history_file_path),\n \"\",\n [],\n self.single_turn,\n self.temperature,\n self.top_p,\n self.n_choices,\n \",\".join(self.stop_sequence),\n self.token_upper_limit,\n self.max_generation_token,\n self.presence_penalty,\n self.frequency_penalty,\n self.logit_bias,\n self.user_identifier,\n )\n\n def delete_chat_history(self, filename):\n if filename == \"CANCELED\":\n return gr.update(), gr.update(), gr.update()\n if filename == \"\":\n return i18n(\"你没有选择任何对话历史\"), gr.update(), gr.update()\n if not filename.endswith(\".json\"):\n filename += \".json\"\n if filename == os.path.basename(filename):\n history_file_path = os.path.join(HISTORY_DIR, self.user_name, filename)\n else:\n history_file_path = filename\n md_history_file_path = history_file_path[:-5] + \".md\"\n try:\n os.remove(history_file_path)\n os.remove(md_history_file_path)\n return i18n(\"删除对话历史成功\"), get_history_list(self.user_name), []\n except:\n logger.info(f\"删除对话历史失败 {history_file_path}\")\n return (\n i18n(\"对话历史\") + filename + i18n(\"已经被删除啦\"),\n get_history_list(self.user_name),\n [],\n )\n\n def auto_load(self):\n filepath = get_history_filepath(self.user_name)\n if not filepath:\n self.history_file_path = new_auto_history_filename(self.user_name)\n else:\n self.history_file_path = filepath\n return self.load_chat_history()\n\n def like(self):\n \"\"\"like the last response, implement if needed\"\"\"\n return gr.update()\n\n def dislike(self):\n \"\"\"dislike the last response, implement if needed\"\"\"\n return gr.update()\n\n def deinitialize(self):\n \"\"\"deinitialize the model, implement if needed\"\"\"\n pass" }, { "identifier": "ModelType", "path": "src/base_model.py", "snippet": "class ModelType(Enum):\n Unknown = -1\n OpenAI = 0\n ChatGLM = 1\n OpenAIInstruct = 2\n OpenAIVision = 3\n Claude = 4\n Qwen = 5\n LLaMA = 6\n\n @classmethod\n def get_type(cls, model_name: str):\n model_name_lower = model_name.lower()\n if \"gpt\" in model_name_lower:\n if \"instruct\" in model_name_lower:\n model_type = ModelType.OpenAIInstruct\n elif \"vision\" in model_name_lower:\n model_type = ModelType.OpenAIVision\n else:\n model_type = ModelType.OpenAI\n elif \"chatglm\" in model_name_lower:\n model_type = ModelType.ChatGLM\n elif \"llama\" in model_name_lower or \"alpaca\" in model_name_lower or \"yi\" in model_name_lower:\n model_type = ModelType.LLaMA\n else:\n model_type = ModelType.Unknown\n return model_type" }, { "identifier": "ChatGLMClient", "path": "src/chatglm.py", "snippet": "class ChatGLMClient(BaseLLMModel):\n def __init__(self, model_name, user_name=\"\"):\n super().__init__(model_name=model_name, user=user_name)\n import torch\n from transformers import AutoModel, AutoTokenizer\n global CHATGLM_TOKENIZER, CHATGLM_MODEL\n self.deinitialize()\n if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None:\n system_name = platform.system()\n logger.info(f\"Loading model from {model_name}\")\n if model_name in LOCAL_MODELS:\n model_path = LOCAL_MODELS[model_name]\n else:\n model_path = model_name\n CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n quantified = False\n if \"int4\" in model_name:\n quantified = True\n model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map='auto', torch_dtype='auto')\n if torch.cuda.is_available():\n logger.info(\"CUDA is available, using CUDA\")\n model = model.half().cuda()\n # mps加速还存在一些问题,暂时不使用\n elif system_name == \"Darwin\" and model_path is not None and not quantified:\n logger.info(\"Running on macOS, using MPS\")\n # running on macOS and model already downloaded\n model = model.half().to(\"mps\")\n else:\n logger.info(\"GPU is not available, using CPU\")\n model = model.float()\n model = model.eval()\n logger.info(f\"Model loaded from {model_path}\")\n CHATGLM_MODEL = model\n\n def _get_glm3_style_input(self):\n history = self.history\n query = history.pop()[\"content\"]\n return history, query\n\n def _get_glm2_style_input(self):\n history = [x[\"content\"] for x in self.history]\n query = history.pop()\n logger.debug(f\"{history}\")\n assert len(history) % 2 == 0, f\"History should be even length. current history is: {history}\"\n history = [[history[i], history[i + 1]]\n for i in range(0, len(history), 2)]\n return history, query\n\n def _get_glm_style_input(self):\n if \"glm2\" in self.model_name:\n return self._get_glm2_style_input()\n else:\n return self._get_glm3_style_input()\n\n def get_answer_at_once(self):\n history, query = self._get_glm_style_input()\n response, _ = CHATGLM_MODEL.chat(\n CHATGLM_TOKENIZER, query, history=history)\n return response, len(response)\n\n def get_answer_stream_iter(self):\n history, query = self._get_glm_style_input()\n for response, history in CHATGLM_MODEL.stream_chat(\n CHATGLM_TOKENIZER,\n query,\n history,\n max_length=self.token_upper_limit,\n top_p=self.top_p,\n temperature=self.temperature,\n ):\n yield response\n\n def deinitialize(self):\n import gc\n import torch\n # 释放显存\n global CHATGLM_MODEL, CHATGLM_TOKENIZER\n CHATGLM_MODEL = None\n CHATGLM_TOKENIZER = None\n gc.collect()\n torch.cuda.empty_cache()\n logger.info(\"ChatGLM model deinitialized\")" }, { "identifier": "LLaMAClient", "path": "src/llama.py", "snippet": "class LLaMAClient(BaseLLMModel):\n def __init__(self, model_name, user_name=\"\"):\n super().__init__(model_name=model_name, user=user_name)\n from transformers import AutoModelForCausalLM, AutoTokenizer\n self.max_generation_token = 1000\n logger.info(f\"Loading model from {model_name}\")\n if model_name in LOCAL_MODELS:\n model_path = LOCAL_MODELS[model_name]\n else:\n model_path = model_name\n self.tokenizer = AutoTokenizer.from_pretrained(model_path, legacy=True, use_fast=False)\n self.model = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto', torch_dtype='auto').eval()\n logger.info(f\"Model loaded from {model_path}\")\n self.stop_str = self.tokenizer.eos_token or \"</s>\"\n\n def _get_chat_input(self):\n messages = []\n for conv in self.history:\n if conv[\"role\"] == \"system\":\n messages.append({'role': 'system', 'content': conv[\"content\"]})\n elif conv[\"role\"] == \"user\":\n messages.append({'role': 'user', 'content': conv[\"content\"]})\n else:\n messages.append({'role': 'assistant', 'content': conv[\"content\"]})\n input_ids = self.tokenizer.apply_chat_template(\n conversation=messages,\n tokenize=True,\n add_generation_prompt=True,\n return_tensors='pt'\n )\n\n return input_ids.to(self.model.device)\n\n def get_answer_at_once(self):\n input_ids = self._get_chat_input()\n output_ids = self.model.generate(\n input_ids,\n max_new_tokens=self.max_generation_token,\n top_p=self.top_p,\n temperature=self.temperature,\n )\n response = self.tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)\n\n return response, len(response)\n\n def get_answer_stream_iter(self):\n from transformers import TextIteratorStreamer\n from threading import Thread\n input_ids = self._get_chat_input()\n streamer = TextIteratorStreamer(\n self.tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True\n )\n thread = Thread(\n target=self.model.generate,\n kwargs={\"input_ids\": input_ids,\n \"max_new_tokens\": self.max_generation_token,\n \"top_p\": self.top_p,\n \"temperature\": self.temperature,\n \"streamer\": streamer}\n )\n thread.start()\n generated_text = \"\"\n for new_text in streamer:\n stop = False\n pos = new_text.find(self.stop_str)\n if pos != -1:\n new_text = new_text[:pos]\n stop = True\n generated_text += new_text\n yield generated_text\n if stop:\n break" }, { "identifier": "INITIAL_SYSTEM_PROMPT", "path": "src/presets.py", "snippet": "INITIAL_SYSTEM_PROMPT = \"You are a helpful assistant.\"" }, { "identifier": "TIMEOUT_ALL", "path": "src/presets.py", "snippet": "TIMEOUT_ALL = 200 # 非流式对话时的超时时间" }, { "identifier": "TIMEOUT_STREAMING", "path": "src/presets.py", "snippet": "TIMEOUT_STREAMING = 60 # 流式对话时的超时时间" }, { "identifier": "STANDARD_ERROR_MSG", "path": "src/presets.py", "snippet": "STANDARD_ERROR_MSG = i18n(\"☹️发生了错误:\") # 错误信息的标准前缀" }, { "identifier": "CONNECTION_TIMEOUT_MSG", "path": "src/presets.py", "snippet": "CONNECTION_TIMEOUT_MSG = i18n(\"连接超时,无法获取对话。\") # 连接超时" }, { "identifier": "READ_TIMEOUT_MSG", "path": "src/presets.py", "snippet": "READ_TIMEOUT_MSG = i18n(\"读取超时,无法获取对话。\") # 读取超时" }, { "identifier": "ERROR_RETRIEVE_MSG", "path": "src/presets.py", "snippet": "ERROR_RETRIEVE_MSG = i18n(\"请检查网络连接,或者API-Key是否有效。\")" }, { "identifier": "GENERAL_ERROR_MSG", "path": "src/presets.py", "snippet": "GENERAL_ERROR_MSG = i18n(\"获取对话时发生错误,请查看后台日志\")" }, { "identifier": "CHAT_COMPLETION_URL", "path": "src/presets.py", "snippet": "CHAT_COMPLETION_URL = \"https://api.openai.com/v1/chat/completions\"" }, { "identifier": "SUMMARY_CHAT_SYSTEM_PROMPT", "path": "src/presets.py", "snippet": "SUMMARY_CHAT_SYSTEM_PROMPT = \"\"\"\\\nPlease summarize the following conversation for a chat topic.\nNo more than 16 characters.\nNo special characters.\nPunctuation mark is banned.\nNot including '.' ':' '?' '!' '“' '*' '<' '>'.\nReply in user's language.\n\"\"\"" }, { "identifier": "hide_middle_chars", "path": "src/utils.py", "snippet": " class DataframeData(TypedDict):\nclass ConfigType(Enum):\nclass ConfigItem:\nclass SetupWizard:\ndef predict(current_model, *args):\ndef billing_info(current_model):\ndef set_key(current_model, *args):\ndef load_chat_history(current_model, *args):\ndef delete_chat_history(current_model, *args):\ndef interrupt(current_model, *args):\ndef reset(current_model, *args):\ndef retry(current_model, *args):\ndef delete_first_conversation(current_model, *args):\ndef delete_last_conversation(current_model, *args):\ndef set_system_prompt(current_model, *args):\ndef rename_chat_history(current_model, *args):\ndef auto_name_chat_history(current_model, *args):\ndef export_markdown(current_model, *args):\ndef upload_chat_history(current_model, *args):\ndef set_token_upper_limit(current_model, *args):\ndef set_temperature(current_model, *args):\ndef set_top_p(current_model, *args):\ndef set_n_choices(current_model, *args):\ndef set_stop_sequence(current_model, *args):\ndef set_max_tokens(current_model, *args):\ndef set_presence_penalty(current_model, *args):\ndef set_frequency_penalty(current_model, *args):\ndef set_logit_bias(current_model, *args):\ndef set_user_identifier(current_model, *args):\ndef set_single_turn(current_model, *args):\ndef handle_file_upload(current_model, *args):\ndef handle_summarize_index(current_model, *args):\ndef like(current_model, *args):\ndef dislike(current_model, *args):\ndef count_token(input_str):\ndef markdown_to_html_with_syntax_highlight(md_str): # deprecated\n def replacer(match):\ndef normalize_markdown(md_text: str) -> str: # deprecated\ndef convert_mdtext(md_text): # deprecated\ndef clip_rawtext(chat_message, need_escape=True):\ndef convert_bot_before_marked(chat_message):\ndef convert_user_before_marked(chat_message):\ndef escape_markdown(text):\ndef convert_asis(userinput): # deprecated\ndef detect_converted_mark(userinput): # deprecated\ndef detect_language(code): # deprecated\ndef construct_text(role, text):\ndef construct_user(text):\ndef construct_system(text):\ndef construct_assistant(text):\ndef save_file(filename, model, chatbot):\ndef sorted_by_pinyin(list):\ndef sorted_by_last_modified_time(list, dir):\ndef get_file_names_by_type(dir, filetypes=[\".json\"]):\ndef get_file_names_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_dropdown_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_by_last_modified_time(dir, filetypes=[\".json\"]):\ndef get_history_names(user_name=\"\"):\ndef get_first_history_name(user_name=\"\"):\ndef get_history_list(user_name=\"\"):\ndef init_history_list(user_name=\"\"):\ndef filter_history(user_name, keyword):\ndef load_template(filename, mode=0):\ndef get_template_names():\ndef get_template_dropdown():\ndef get_template_content(templates, selection, original_system_prompt):\ndef reset_textbox():\ndef reset_default():\ndef change_api_host(host):\ndef change_proxy(proxy):\ndef hide_middle_chars(s):\ndef submit_key(key):\ndef replace_today(prompt):\ndef get_geoip():\n def fetch_ip():\ndef find_n(lst, max_num):\ndef start_outputing():\ndef end_outputing():\ndef cancel_outputing():\ndef transfer_input(inputs):\ndef update_chuanhu():\ndef add_source_numbers(lst, source_name=\"Source\", use_source=True):\ndef add_details(lst):\ndef sheet_to_string(sheet, sheet_name=None):\ndef excel_to_string(file_path):\ndef get_last_day_of_month(any_day):\ndef get_model_source(model_name, alternative_source):\ndef refresh_ui_elements_on_load(current_model, selected_model_name, user_name):\ndef toggle_like_btn_visibility(selected_model_name):\ndef get_corresponding_file_type_by_model_name(selected_model_name):\ndef new_auto_history_filename(username):\ndef get_history_filepath(username):\ndef beautify_err_msg(err_msg):\ndef auth_from_conf(username, password):\ndef get_files_hash(file_src=None, file_paths=None):\ndef myprint(**args):\ndef replace_special_symbols(string, replace_string=\" \"):\n def __init__(self, key, name, default=None, type=ConfigType.String) -> None:\ndef generate_prompt_string(config_item):\ndef generate_result_string(config_item, config_value):\n def __init__(self, file_path=config_file) -> None:\n def set(self, config_items: List[ConfigItem], prompt: str):\n def set_users(self):\n def __setitem__(self, setting_key: str, value):\n def __getitem__(self, setting_key: str):\n def save(self):\ndef setup_wizard():\ndef save_pkl(data, file_path):\ndef load_pkl(file_path):\ndef chinese_preprocessing_func(text: str) -> List[str]:\nSERVER_GEO_IP_MSG = None\nFETCHING_IP = False\n SERVER_GEO_IP_MSG = i18n(\"你可以使用聊天功能。\")\n SERVER_GEO_IP_MSG = \"**您的IP区域:中国。**\"\n SERVER_GEO_IP_MSG = i18n(\"您的IP区域:\") + f\"{country}。\"\n FETCHING_IP = False\n FETCHING_IP = True" } ]
import base64 import datetime import json import os import colorama import gradio as gr import requests import traceback import traceback from io import BytesIO from PIL import Image from loguru import logger from src import shared, config from src.base_model import BaseLLMModel, ModelType from src.chatglm import ChatGLMClient from src.llama import LLaMAClient from src.presets import ( INITIAL_SYSTEM_PROMPT, TIMEOUT_ALL, TIMEOUT_STREAMING, STANDARD_ERROR_MSG, CONNECTION_TIMEOUT_MSG, READ_TIMEOUT_MSG, ERROR_RETRIEVE_MSG, GENERAL_ERROR_MSG, CHAT_COMPLETION_URL, SUMMARY_CHAT_SYSTEM_PROMPT ) from src.utils import ( hide_middle_chars, count_token, construct_system, construct_user, get_last_day_of_month, i18n, replace_special_symbols, )
11,594
system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, user_name="", ) -> None: super().__init__( model_name=model_name, temperature=temperature, top_p=top_p, system_prompt=system_prompt, user=user_name, ) self.api_key = api_key self.need_api_key = True self._refresh_header() def get_answer_stream_iter(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response(stream=True) if response is not None: iter = self._decode_chat_response(response) partial_text = "" for i in iter: partial_text += i yield partial_text else: yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG def get_answer_at_once(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response() response = json.loads(response.text) content = response["choices"][0]["message"]["content"] total_token_count = response["usage"]["total_tokens"] return content, total_token_count def count_token(self, user_input): input_token_count = count_token(construct_user(user_input)) if self.system_prompt is not None and len(self.all_token_counts) == 0: system_prompt_token_count = count_token( construct_system(self.system_prompt) ) return input_token_count + system_prompt_token_count return input_token_count def billing_info(self): try: curr_time = datetime.datetime.now() last_day_of_month = get_last_day_of_month( curr_time).strftime("%Y-%m-%d") first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" try: usage_data = self._get_billing_data(usage_url) except Exception as e: logger.warning(f"获取API使用情况失败:" + str(e)) return i18n("**获取API使用情况失败**") rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" except requests.exceptions.ConnectTimeout: status_text = ( STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG ) return status_text except requests.exceptions.ReadTimeout: status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG return status_text except Exception as e: traceback.print_exc() logger.error(i18n("获取API使用情况失败:") + str(e)) return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG def set_token_upper_limit(self, new_upper_limit): pass @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 def _get_response(self, stream=False): openai_api_key = self.api_key system_prompt = self.system_prompt history = self.history logger.debug(f"{history}") headers = { "Authorization": f"Bearer {openai_api_key}", "Content-Type": "application/json", } if system_prompt is not None: history = [construct_system(system_prompt), *history] payload = { "model": self.model_name, "messages": history, "temperature": self.temperature, "top_p": self.top_p, "n": self.n_choices, "stream": stream, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, } if self.max_generation_token is not None: payload["max_tokens"] = self.max_generation_token if self.stop_sequence is not None: payload["stop"] = self.stop_sequence if self.logit_bias is not None: payload["logit_bias"] = self.logit_bias if self.user_identifier is not None: payload["user"] = self.user_identifier if stream: timeout = TIMEOUT_STREAMING else: timeout = TIMEOUT_ALL # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}")
# -*- coding: utf-8 -*- """ Get model client from model name """ class OpenAIClient(BaseLLMModel): def __init__( self, model_name, api_key, system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, user_name="", ) -> None: super().__init__( model_name=model_name, temperature=temperature, top_p=top_p, system_prompt=system_prompt, user=user_name, ) self.api_key = api_key self.need_api_key = True self._refresh_header() def get_answer_stream_iter(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response(stream=True) if response is not None: iter = self._decode_chat_response(response) partial_text = "" for i in iter: partial_text += i yield partial_text else: yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG def get_answer_at_once(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response() response = json.loads(response.text) content = response["choices"][0]["message"]["content"] total_token_count = response["usage"]["total_tokens"] return content, total_token_count def count_token(self, user_input): input_token_count = count_token(construct_user(user_input)) if self.system_prompt is not None and len(self.all_token_counts) == 0: system_prompt_token_count = count_token( construct_system(self.system_prompt) ) return input_token_count + system_prompt_token_count return input_token_count def billing_info(self): try: curr_time = datetime.datetime.now() last_day_of_month = get_last_day_of_month( curr_time).strftime("%Y-%m-%d") first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" try: usage_data = self._get_billing_data(usage_url) except Exception as e: logger.warning(f"获取API使用情况失败:" + str(e)) return i18n("**获取API使用情况失败**") rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" except requests.exceptions.ConnectTimeout: status_text = ( STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG ) return status_text except requests.exceptions.ReadTimeout: status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG return status_text except Exception as e: traceback.print_exc() logger.error(i18n("获取API使用情况失败:") + str(e)) return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG def set_token_upper_limit(self, new_upper_limit): pass @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 def _get_response(self, stream=False): openai_api_key = self.api_key system_prompt = self.system_prompt history = self.history logger.debug(f"{history}") headers = { "Authorization": f"Bearer {openai_api_key}", "Content-Type": "application/json", } if system_prompt is not None: history = [construct_system(system_prompt), *history] payload = { "model": self.model_name, "messages": history, "temperature": self.temperature, "top_p": self.top_p, "n": self.n_choices, "stream": stream, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, } if self.max_generation_token is not None: payload["max_tokens"] = self.max_generation_token if self.stop_sequence is not None: payload["stop"] = self.stop_sequence if self.logit_bias is not None: payload["logit_bias"] = self.logit_bias if self.user_identifier is not None: payload["user"] = self.user_identifier if stream: timeout = TIMEOUT_STREAMING else: timeout = TIMEOUT_ALL # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}")
with config.retrieve_proxy():
1
2023-12-27 12:14:26+00:00
16k
camenduru/AnyDoor-online-hf
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
12,309
else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): #c 1,3,224,224 if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): #1,1,1024 c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): #t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() t = self.time_steps.reshape( (x.shape[0],) ).to(self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False) #boundary = self.boundary.to(loss_simple.device) #boundary = F.interpolate(boundary, size = (64,64)) * 5 + 1.0 #16,1,64,64 #print(loss_simple.shape) #16,4,64,64 loss_simple = loss_simple.mean([1, 2, 3]) #.mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) #print(self.parameterization, self.learn_logvar, self.original_elbo_weight, self.lvlb_weights[t]) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)
0
2023-12-25 04:48:34+00:00
16k
smonsays/modular-hyperteacher
metax/data/imitation.py
[ { "identifier": "Environment", "path": "metax/data/envs/base.py", "snippet": "class Environment(abc.ABC):\n @abc.abstractproperty\n def num_actions(self) -> int:\n \"\"\" Number of possible actions.\"\"\"\n\n @abc.abstractproperty\n def observation_shape(self):\n \"\"\"The shape of the observation array\"\"\"\n\n @abc.abstractmethod\n def observe(self, env_state: EnvironmentState):\n \"\"\"Returns the observation from the environment state.\"\"\"\n\n @abc.abstractmethod\n def reset(self, rng: PRNGKey, goal: Array = None) -> Tuple[Any, EnvironmentInteraction]:\n \"\"\"Resets the environment to an initial state.\"\"\"\n\n @abc.abstractmethod\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n \"\"\"Resets the environment goal.\"\"\"\n\n def step(\n self, rng: PRNGKey, env_state: EnvironmentState, action: Array\n ) -> Tuple[EnvironmentState, EnvironmentInteraction]:\n \"\"\"Run one timestep of the environment's dynamics. Returns the Transition and the Environment state.\"\"\"\n\n # return self._step(rng, env_state, action)\n def empty_step(rng, state, action):\n \"\"\"\n Only update time and give no reward.\n \"\"\"\n new_timestep = state.timestep + 1\n new_state = state.replace(timestep=new_timestep)\n new_emission = EnvironmentInteraction(\n observation=self.observe(state),\n reward=0.0,\n done=state.done,\n timestep=new_timestep,\n )\n return new_state, new_emission\n\n # Only run env step if not already done\n return jax.lax.cond(\n env_state.done,\n empty_step,\n self._step,\n rng,\n env_state,\n action,\n )\n\n @abc.abstractmethod\n def _step(\n self, rng: PRNGKey, env_state: EnvironmentState, action: Array\n ) -> Tuple[EnvironmentState, EnvironmentInteraction]:\n \"\"\"Run one timestep of the environment's dynamics. Returns the Transition and the Environment state.\"\"\"" }, { "identifier": "CompositionalGrid", "path": "metax/data/envs/grid.py", "snippet": "class CompositionalGrid(Environment):\n def __init__(\n self,\n grid_size: int,\n num_interactions: int,\n num_mazes: int,\n num_objects: int,\n num_distractors: int,\n frac_ood: float,\n task_support: str,\n seed: int,\n ) -> None:\n super().__init__()\n assert grid_size > 5, \"grid_size must be greater than 5\"\n\n self.grid_size = grid_size\n self.num_interactions = num_interactions\n self.num_directions = 4 # split grid into 4 quadrants for the goal position\n self.num_objects = num_objects\n self.num_mazes = num_mazes\n self.num_distractors = num_distractors\n self.frac_ood = frac_ood\n self.task_support = task_support\n self.seed = seed\n self.rng = jax.random.PRNGKey(seed)\n self.num_factors = 4 # direction, interaction, maze, object\n\n # Static matrices\n self._delta_position = jnp.concatenate((\n jnp.array([[-1, 0], [0, 1], [1, 0], [0, -1]]), # up, right, down, left\n jnp.zeros((self.num_interactions, 2), dtype=jnp.int32), # no movement for interaction\n ))\n size_low, size_high = grid_size // 2, (grid_size // 2) + grid_size % 2\n self._quadrants = jnp.stack((\n np.block([\n [np.ones((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.ones((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.ones((size_low, size_high)), np.zeros((size_low, size_low))]\n ]),\n np.block([\n [np.zeros((size_high, size_high)), np.zeros((size_high, size_low))],\n [np.zeros((size_low, size_high)), np.ones((size_low, size_low))]\n ]),\n ))\n\n # Pregenerate possible goals and randomly split into in/out of distribution\n self.tasks_all = np.array(list(itertools.product(\n range(self.num_directions),\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n if self.task_support == \"non_compositional\":\n # in/out split with non-compositional support\n self.tasks_in_dist = np.array(list(itertools.product(\n range(self.num_directions - 1), # hold out one goal quadrant from in_dist\n range(self.num_interactions),\n range(self.num_mazes),\n range(self.num_objects),\n )))\n\n @partial(np.vectorize, signature=\"(k),(n,k)->()\")\n def elem_in_array(elem, array):\n return np.any(np.all(elem == array, axis=1))\n\n self.tasks_out_dist = self.tasks_all[~elem_in_array(self.tasks_all, self.tasks_in_dist)]\n\n elif \"_hot\" in self.task_support:\n num_hot = int(self.task_support.split(\"_\")[0])\n mask = jnp.sum(self.tasks_all > 0, axis=1) <= num_hot\n self.tasks_in_dist = jnp.array(self.tasks_all[mask])\n self.tasks_out_dist = jnp.array(self.tasks_all[~mask])\n\n elif self.task_support == \"random\":\n self.tasks_all = jax.random.permutation(self.rng, self.tasks_all)\n self.num_ood = int(len(self.tasks_all) * self.frac_ood)\n self.tasks_in_dist = jnp.array(self.tasks_all[: -self.num_ood])\n self.tasks_out_dist = jnp.array(self.tasks_all[-self.num_ood:])\n\n # Make sure all features for every factor are present in the in-distribution tasks\n assert len(jnp.unique(self.tasks_in_dist[:, 0])) == self.num_directions\n assert len(jnp.unique(self.tasks_in_dist[:, 1])) == self.num_interactions\n assert len(jnp.unique(self.tasks_in_dist[:, 2])) == self.num_mazes\n assert len(jnp.unique(self.tasks_in_dist[:, 3])) == self.num_objects\n else:\n raise ValueError(f\"Invalid task support: {self.task_support}\")\n\n assert len(self.tasks_in_dist) > 0\n assert len(self.tasks_out_dist) > 0\n\n # Create random mazes\n if self.num_mazes > 0:\n self.mazes = jnp.stack([\n self.generate_random_maze(self.grid_size, seed=self.seed + i)\n for i in range(self.num_mazes)\n ])\n else:\n self.mazes = jnp.zeros((1, self.grid_size, self.grid_size))\n\n # Precompute optimal paths, this is potentially expensive for large grid sizes\n optimal_paths, shortest_paths = list(\n zip(*[self._precompute_optimal_paths(m) for m in self.mazes])\n )\n self.optimal_paths, shortest_paths = jnp.stack(optimal_paths), jnp.stack(shortest_paths)\n self.valid_goal_dist = shortest_paths >= self.grid_size\n\n @property\n def num_actions(self) -> int:\n return 4 + self.num_interactions\n\n @property\n def observation_shape(self) -> Tuple[int]:\n # encodes positions of agent, objects and walls\n return (self.grid_size, self.grid_size, self.num_objects + 2)\n\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n assert mode in [\"ood\", \"test\", \"train\"]\n if mode == \"ood\":\n task_code = jax.random.choice(rng, self.tasks_out_dist)\n else:\n task_code = jax.random.choice(rng, self.tasks_in_dist)\n\n task_id = jnp.ravel_multi_index(\n task_code,\n dims=(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects),\n mode=\"wrap\",\n )\n emb_dim = max(self.num_directions, self.num_interactions, self.num_mazes, self.num_objects)\n embedding = jax.nn.one_hot(task_code, emb_dim)\n\n return CompositionalGridGoal(*task_code), {\"task_id\": task_id, \"embedding\": embedding}\n\n def reset(\n self, rng: PRNGKey, goal: Optional[CompositionalGridGoal] = None\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n \"\"\"Resets the environment to a random, initial state\"\"\"\n rng_distractor, rng_pos1, rng_pos2, rng_pos3, rng_goal = jax.random.split(rng, 5)\n\n if goal is None:\n # Sample a goal from train distribution if None specified\n goal, _ = self.reset_goal(rng_goal, mode=\"train\")\n\n # Sample distractor objects distinct from goal object\n distractors = jax.random.choice(\n key=rng_distractor,\n a=self.num_objects,\n shape=(self.num_distractors,),\n replace=True,\n p=1.0 - (jnp.arange(self.num_objects) == goal.object)\n )\n\n # Sample distinct, random positions for agent, distractors and the goal respecting direction\n position_goal = jax.random.choice(\n key=rng_pos2,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]) * self._quadrants[goal.direction]).reshape(-1),\n )\n goal_coord = self._coord_to_idx(position_goal[0][0], position_goal[0][1])\n position_agent = jax.random.choice(\n key=rng_pos1,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(1, ),\n p=((1.0 - self.mazes[goal.maze]).reshape(-1) * self.valid_goal_dist[goal.maze][goal_coord]),\n )\n positions_distractors = jax.random.choice(\n key=rng_pos3,\n a=np.array(list(itertools.product(range(self.grid_size), repeat=2))),\n shape=(self.num_distractors, ),\n replace=False,\n p=1.0 - self.mazes[goal.maze].reshape(-1),\n )\n\n positions = jnp.concatenate([position_goal, positions_distractors, position_agent])\n\n env_state = CompositionalGridState(\n done=False, timestep=0, distractors=distractors, positions=positions, goal=goal\n )\n emission = EnvironmentInteraction(\n observation=self.observe(env_state), reward=0.0, done=False, timestep=0\n )\n\n return env_state, emission\n\n def _step(\n self, rng: PRNGKey, env_state, action: Array\n ) -> Tuple[CompositionalGridState, EnvironmentInteraction]:\n pos_agent = env_state.positions[-1, :]\n\n # Check if agent reached goal (positive reward)\n goal_reached = jnp.logical_and(\n action == (len(MOVES) + env_state.goal.interaction),\n jnp.all(pos_agent == env_state.positions[0, :]),\n )\n reward = 1.0 * goal_reached\n\n # Move the agent to new position and check if valid\n pos_new = self._delta_position[action] + pos_agent\n pos_invalid = jnp.logical_or(\n jnp.logical_or(jnp.any(pos_new < 0), jnp.any(pos_new >= self.grid_size)), # in grid?\n self.mazes[env_state.goal.maze][pos_new[0], pos_new[1]], # in wall?\n )\n pos_new = jnp.where(pos_invalid, pos_agent, pos_new)\n\n # Update state\n positions = env_state.positions.at[-1].set(pos_new)\n env_state = CompositionalGridState(\n done=goal_reached,\n timestep=env_state.timestep + 1,\n distractors=env_state.distractors,\n positions=positions,\n goal=env_state.goal,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=reward,\n done=env_state.done,\n timestep=env_state.timestep,\n )\n\n return env_state, emission\n\n def observe(self, env_state: CompositionalGridState) -> Array:\n \"\"\"\n Encode the environment state as an asrray of shape (grid_size, grid_size, num_factors * num_objects + 1).\n For each position in the grid, the code word has the following structure:\n [factor_0_feature_0, ..., factor_0_feature_n, ..., factor_n_feature_0, ..., factor_n_feature_n, wall?, agent?]\n \"\"\"\n objects = jnp.concatenate([jnp.array([env_state.goal.object]), env_state.distractors])\n objects_hot = jax.nn.one_hot(objects, num_classes=self.num_objects)\n pos_objects, pos_agent = env_state.positions[0:-1, :], env_state.positions[-1, :]\n\n # Build the grid\n grid = jnp.zeros(self.observation_shape)\n grid = grid.at[\n jnp.expand_dims(pos_objects[:, 0], axis=1),\n jnp.expand_dims(pos_objects[:, 1], axis=1),\n :-2,\n ].set(jnp.expand_dims(objects_hot, axis=1))\n grid = grid.at[:, :, -2].set(self.mazes[env_state.goal.maze]) # walls encoded in penultimate channel\n grid = grid.at[pos_agent[0], pos_agent[1], -1].set(1.0) # agent encoded in last channel\n\n return grid\n\n def _features_to_idx(self, features: Array) -> Array:\n \"\"\"Converts features to a unique feature index\"\"\"\n idx = [factor * self.num_objects + feature for factor, feature in enumerate(features)]\n return jnp.array(idx)\n\n def _coord_to_idx(self, x, y):\n \"\"\"Converts coordinates to a unique grid index\"\"\"\n return x * self.grid_size + y\n\n def _idx_to_coord(self, idx):\n \"\"\"Converts a grid index to grid coordinates\"\"\"\n return idx // self.grid_size, idx % self.grid_size\n\n def demonstrate(\n self, rng: PRNGKey, env_state: CompositionalGridState\n ) -> EnvironmentInteraction:\n \"\"\"Given a state, compute the optimal trajectory to the goal.\"\"\"\n pos_agent, pos_goal = env_state.positions[-1, :], env_state.positions[0, :]\n idx_agent, idx_goal = self._coord_to_idx(*pos_agent), self._coord_to_idx(*pos_goal)\n optimal_actions = self.optimal_paths[env_state.goal.maze][idx_agent, idx_goal]\n\n # Fill placeholder actions with correct interaction\n mask_pad = (optimal_actions == -1)\n optimal_actions *= ~mask_pad\n optimal_actions += (len(MOVES) + env_state.goal.interaction) * mask_pad\n\n def env_step(carry, action):\n rng, env_state = carry\n rng, rng_step = jax.random.split(rng)\n env_state, emission = self.step(rng_step, env_state, action)\n return (rng, env_state), emission\n\n _, trajectory = jax.lax.scan(env_step, (rng, env_state), optimal_actions)\n\n # Append initial emission and remove last emission from trajectory\n initial_emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=0.0,\n done=False,\n timestep=0,\n )\n trajectory = jtu.tree_map(\n lambda x, y: jnp.concatenate((jnp.expand_dims(x, axis=0), y)),\n initial_emission, trajectory\n )\n trajectory = jtu.tree_map(lambda x: x[:-1], trajectory)\n\n return trajectory, optimal_actions\n\n def _precompute_optimal_paths(self, maze: Array):\n \"\"\"Precompute the optimal trajectories for all possible states.\"\"\"\n # Create an array that encodes the graph structure of the grid to compute all shortest paths\n coordinates, no_walls_coords = [], np.argwhere(maze == 0)\n for x, y in no_walls_coords:\n edges = []\n if x > 0 and not maze[x - 1, y]:\n edges.append([x - 1, y])\n if x < self.grid_size - 1 and not maze[x + 1, y]:\n edges.append([x + 1, y])\n if y > 0 and not maze[x, y - 1]:\n edges.append([x, y - 1])\n if y < self.grid_size - 1 and not maze[x, y + 1]:\n edges.append([x, y + 1])\n\n idx_curr = self._coord_to_idx(x, y)\n coordinates += [(idx_curr, self._coord_to_idx(i, k)) for (i, k) in edges]\n\n coordinates = np.array(coordinates)\n connectivity = np.zeros((self.grid_size**2, self.grid_size**2))\n connectivity[coordinates[:, 0], coordinates[:, 1]] = 1.0\n shortest_paths, predecessors = shortest_path(connectivity, return_predecessors=True)\n max_num_actions = (self.grid_size**2) - 1\n\n def get_path(predecessors, start, end):\n \"\"\"Get the full path from the predecessor matrix.\"\"\"\n path = [end]\n while path[-1] != start:\n path.append(predecessors[start, path[-1]])\n return path[::-1]\n\n def path_to_actions(path):\n \"\"\"Convert path to actions.\"\"\"\n # Pad with placeholder actions, need to be overwritten with correct interaction in self.demonstrate()\n actions = np.full((max_num_actions), -1)\n for i in range(len(path) - 1):\n x1, y1 = self._idx_to_coord(path[i])\n x2, y2 = self._idx_to_coord(path[i + 1])\n action = np.array([x2 - x1, y2 - y1])\n action = np.where(np.all(self._delta_position == action, axis=1))[0][0]\n actions[i] = action\n return np.array(actions)\n\n # Precompute optimal paths for all possible positions\n optimal_paths = -1 * np.ones(\n (self.grid_size**2, self.grid_size**2, max_num_actions), dtype=int\n )\n for start in no_walls_coords:\n for goal in no_walls_coords:\n start_idx, goal_idx = self._coord_to_idx(*start), self._coord_to_idx(*goal)\n path = get_path(predecessors, start_idx, goal_idx)\n actions = path_to_actions(path)\n optimal_paths[start_idx, goal_idx, :] = actions\n\n return jnp.array(optimal_paths), jnp.array(shortest_paths)\n\n @staticmethod\n def generate_random_maze(\n grid_size: int, complexity: float = 0.75, density: float = 0.75, seed: int = 0\n ):\n \"\"\"\n Generate a random maze array.\n Walls are encoded as 1 and free space as 0.\n\n Adapted from https://github.com/zuoxingdong/mazelab/blob/master/mazelab/generators/random_maze.py\n which is based on https://en.wikipedia.org/wiki/Maze_generation_algorithm\n \"\"\"\n assert grid_size % 2 == 1, \"Maze size must be odd\"\n grid_size_pad = grid_size + 2\n np_rng = np.random.default_rng(seed)\n\n # Adjust complexity and density relative to maze size\n complexity = int(complexity * (5 * (grid_size_pad + grid_size_pad)))\n density = int(density * ((grid_size_pad // 2) * (grid_size_pad // 2)))\n\n # Fill borders\n grid = np.zeros((grid_size_pad, grid_size_pad), dtype=bool)\n grid[0, :] = grid[-1, :] = 1\n grid[:, 0] = grid[:, -1] = 1\n\n # Make aisles\n for _ in range(density):\n x, y = (\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n np_rng.integers(0, grid_size_pad // 2 + 1) * 2,\n )\n grid[y, x] = 1\n for j in range(complexity):\n neighbours = []\n if x > 1:\n neighbours.append((y, x - 2))\n if x < grid_size_pad - 2:\n neighbours.append((y, x + 2))\n if y > 1:\n neighbours.append((y - 2, x))\n if y < grid_size_pad - 2:\n neighbours.append((y + 2, x))\n if len(neighbours):\n y_, x_ = neighbours[np_rng.integers(0, len(neighbours))]\n if grid[y_, x_] == 0:\n grid[y_, x_] = 1\n grid[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1\n x, y = x_, y_\n\n return grid.astype(int)[1:-1, 1:-1]" }, { "identifier": "CompositionalPreference", "path": "metax/data/envs/preference.py", "snippet": "class CompositionalPreference(Environment):\n # _layout = \"\"\"\\\n # wwwwwwwwwwwww\n # w w w\n # w w w\n # w w\n # w w w\n # w w w\n # ww wwww w\n # w www www\n # w w w\n # w w w\n # w w\n # w w w\n # wwwwwwwwwwwww\n # \"\"\"\n _layout = \"\"\"\\\nwwwwwww\nw w w\nw w w\nww ww\nw w w\nw w w\nwwwwwww\n\"\"\"\n _delta_position = jnp.array(\n [\n [0, 0], # NOTHING\n [-1, 0], # UP\n [0, 1], # RIGHT\n [1, 0], # DOWN\n [0, -1], # LEFT\n ]\n )\n\n def __init__(\n self,\n num_preferences: int, # ~=num_experts\n num_features: int, # ~=dim layer weight\n num_objects: int,\n num_hot: int, # ~= num_hot\n continuous_combinations: bool,\n discount: float,\n frac_ood: float,\n timelimit: int,\n task_support: str,\n seed: int,\n ) -> None:\n super().__init__()\n self.num_preferences = num_preferences\n self.num_features = num_features\n self.num_objects = num_objects\n self.num_hot = num_hot\n self.continuous_combinations = continuous_combinations\n self.discount = discount\n self.frac_ood = frac_ood\n self.timelimit = timelimit\n self.task_support = task_support\n self.seed = seed\n self.rng = jax.random.PRNGKey(seed)\n\n # We assume a fixed grid.\n self.grid = jnp.array(\n [list(map(lambda c: 0 if c == \" \" else 1, line)) for line in self._layout.splitlines()]\n )\n self.free_coord = jnp.array([(x, y) for (x, y) in zip(*np.where(self.grid == 0))])\n grid_idx_to_coord_matrix = jax.nn.one_hot(\n self.free_coord[:, 0] * self.grid.shape[1] + self.free_coord[:, 1],\n self.grid.shape[0] * self.grid.shape[1],\n )\n self.coord_matrix_to_grid_idx = jnp.argmax(grid_idx_to_coord_matrix.T, axis=-1)\n self.grid_idx_to_coord_matrix = jnp.argmax(grid_idx_to_coord_matrix, axis=-1)\n self.num_free_coord = self.free_coord.shape[0]\n self.num_available_distractors_config = 2**self.num_objects\n self.num_states = self.num_free_coord * self.num_available_distractors_config\n\n self.preference_basis = jax.random.normal(\n self.rng, (self.num_preferences, self.num_features)\n )\n\n # Generate all possible combinations of 1:num_hot experts (num_experts choose num_hot)\n preference_combin_all = []\n for h in range(1, self.num_hot + 1):\n perms = itertools.combinations(range(self.num_preferences), h)\n preference_idx = np.array(list(perms)).reshape(-1, h)\n preference_combin_all_k_hot = self.k_hot(preference_idx)\n preference_combin_all.append(preference_combin_all_k_hot)\n\n preference_combin_all = jnp.concatenate(preference_combin_all)\n\n if self.task_support == \"connected\" or self.task_support == \"disconnected\":\n assert self.num_hot == 2\n assert self.num_preferences > 4 and self.num_preferences % 2 == 0\n # connected: 0 1 2 3 4 5 6 7 01 12 23 34 45 56 67 70 02 13 24 35 46 57 60 71\n preference_combin = [self.k_hot(np.arange(self.num_preferences)[:, None])] # one-hots\n preference_combin.append(self.k_hot(np.stack(( # two-hots 01 12 23 34 45 56 67 70\n np.arange(self.num_preferences),\n (np.arange(self.num_preferences) + 1) % self.num_preferences)).T\n ))\n preference_combin.append(self.k_hot(np.stack(( # two-hots 02 13 24 35 46 57 60 71\n np.arange(self.num_preferences),\n (np.arange(self.num_preferences) + 2) % self.num_preferences)).T\n ))\n preference_combin_connected = np.concatenate(preference_combin)\n\n @partial(np.vectorize, signature=\"(n),(m,n)->()\")\n def elem_in_array(elem, array):\n return np.any(np.all(elem == array, axis=1))\n\n mask_connected = elem_in_array(preference_combin_all, preference_combin_connected)\n\n # disconnected: 1 and 2 hots out of (0,1,2,3) U 1 and 2 hots out of (4,5,6,7)\n mask_1_hot = jnp.sum(preference_combin_all, axis=-1) == 1\n mask_2_hot = jnp.sum(preference_combin_all, axis=-1) == 2\n mask_preference_combin_1 = jnp.all(preference_combin_all[:, :self.num_preferences // 2] == 0, axis=1)\n mask_preference_combin_2 = jnp.all(preference_combin_all[:, self.num_preferences // 2:] == 0, axis=1)\n\n mask_disconnected = (\n (mask_1_hot & mask_preference_combin_1) | (mask_1_hot & mask_preference_combin_2) | (\n mask_2_hot & mask_preference_combin_1) | (mask_2_hot & mask_preference_combin_2)\n )\n\n if self.task_support == \"connected\":\n mask_in_dist = mask_connected\n elif self.task_support == \"disconnected\":\n mask_in_dist = mask_disconnected\n\n mask_out_dist = ~(mask_connected | mask_disconnected)\n\n self.preference_in_dist = jnp.array(preference_combin_all[mask_in_dist])\n self.preference_out_dist = jnp.array(preference_combin_all[mask_out_dist])\n\n elif self.task_support == \"non_compositional\":\n # Non-compositional task support holds-out the last expert in the last layer\n mask_last_expert = preference_combin_all[:, -1] == 1\n self.preference_in_dist = jnp.array(preference_combin_all[~mask_last_expert])\n self.preference_out_dist = jnp.array(preference_combin_all[mask_last_expert])\n\n elif self.task_support == \"random\":\n # Randomly split task experts into in and out distribution tasks\n preference_combin_all = jax.random.permutation(self.rng, preference_combin_all)\n self.num_ood = int(len(preference_combin_all) * self.frac_ood)\n self.preference_in_dist = jnp.array(preference_combin_all[: -self.num_ood])\n self.preference_out_dist = jnp.array(preference_combin_all[-self.num_ood:])\n\n assert len(self.preference_in_dist) > 0\n assert len(self.preference_out_dist) > 0\n\n self.objects_all = jax.random.permutation(self.rng, np.arange(self.num_features))\n\n @partial(jnp.vectorize, excluded=(0,), signature=\"(n)->(m)\")\n def k_hot(self, ind):\n \"\"\"\n Convert a vector of indeces to a k-hot vector.\n Repeating an index does not change the result.\n \"\"\"\n return (jnp.sum(jax.nn.one_hot(ind, self.num_preferences), axis=0) > 0) * 1.0\n\n @property\n def num_actions(self) -> int:\n return len(ACTIONS)\n\n @property\n def observation_shape(self) -> Tuple[int]:\n return (*self.grid.shape, self.num_features + 2)\n\n def reset_goal(self, rng: PRNGKey, mode: str) -> Array:\n # Copied from hyperteacher\n rng_tasks, rng_weights = jax.random.split(rng)\n if mode in [\"test\", \"train\", \"ood\"]:\n task_experts = self.preference_out_dist if mode == \"ood\" else self.preference_in_dist\n task_ids = jax.random.choice(rng_tasks, len(task_experts), shape=())\n embeddings = task_experts[task_ids]\n\n if mode == \"ood\":\n task_ids += len(self.preference_in_dist)\n elif \"ood_\" in mode:\n hotness = int(mode.split(\"_\")[1])\n if hotness <= self.num_hot:\n # Filter the existing task_experts_out_dist for the given hotness\n task_ids = jax.random.choice(\n key=rng_tasks,\n a=len(self.preference_out_dist),\n p=1.0 * jnp.all(\n jnp.sum(self.preference_out_dist, axis=-1) == hotness, axis=-1\n ),\n shape=(),\n )\n embeddings = self.preference_out_dist[task_ids]\n elif hotness <= self.num_preferences:\n # Randomly sample task_experts - everything is ood here\n expert_indeces = jax.random.choice(rng_tasks, self.num_preferences, replace=False, shape=(hotness, ))\n embeddings = self.k_hot(expert_indeces)\n task_ids = -1 * jnp.ones(()) # No unique task IDs available here\n else:\n raise ValueError(f\"Invalid hotness {hotness}\")\n\n if self.continuous_combinations:\n # Sample weights uniformly from simplex (see Willms, 2021)\n weights = jax.random.exponential(rng_weights, shape=embeddings.shape)\n weights = weights * embeddings\n weights = weights / (jnp.sum(weights, axis=-1, keepdims=True) + 1)\n\n # Shift nonzero embeddings to the range [0.5, 1.0] to prevent adding further sparsity\n embeddings = (0.5 * weights + 0.5) * embeddings\n\n return embeddings, {\"task_id\": task_ids, \"embedding\": embeddings[None, :]}\n\n @partial(jax.jit, static_argnums=(0))\n def reset(\n self, rng: PRNGKey, goal: Array = None\n ) -> Tuple[PreferenceState, EnvironmentInteraction]:\n \"\"\"Resets the environment to a random, initial state\"\"\"\n rng_preference, rng_distractor, rng_pos = jax.random.split(rng, 3)\n\n if goal is None:\n # Sample a preference from train distribution if None specified\n goal, _ = self.reset_goal(rng_preference, mode=\"train\")\n\n preference = goal\n\n # Sample distractors\n distractors = jax.random.choice(\n key=rng_distractor,\n a=self.objects_all,\n shape=(self.num_objects,),\n replace=True,\n )\n\n positions = jax.random.choice(\n rng_pos, self.free_coord, shape=(self.num_objects + 1,), replace=False\n )\n\n env_state = PreferenceState(\n done=False,\n timestep=0,\n positions=positions,\n features=distractors,\n available_distractors=jnp.ones((self.num_objects,)),\n preference=preference,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state), reward=0.0, done=False, timestep=0\n )\n return env_state, emission\n\n @partial(jax.jit, static_argnums=(0))\n def _step(\n self, rng: PRNGKey, env_state, action: Array\n ) -> Tuple[PreferenceState, EnvironmentInteraction]:\n pos_agent = env_state.positions[-1][0], env_state.positions[-1][1]\n distractors_pos = env_state.positions[:-1]\n features = env_state.features\n available_distractors = env_state.available_distractors\n\n preference = env_state.preference\n\n next_pos_agent, next_available_distractors, reward = self._move(\n pos_agent, features, available_distractors, distractors_pos, preference, action\n )\n next_timestep = env_state.timestep + 1\n # Update state\n env_state = PreferenceState(\n # If NOTHING is performed, the environment immediately terminates.\n done=jnp.logical_or(next_timestep > self.timelimit, action == ACTIONS.NOTHING.value),\n timestep=next_timestep,\n positions=env_state.positions.at[-1].set(next_pos_agent),\n features=env_state.features,\n available_distractors=next_available_distractors,\n preference=env_state.preference,\n )\n\n emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=reward,\n done=env_state.done,\n timestep=env_state.timestep,\n )\n\n return env_state, emission\n\n def observe(self, env_state: PreferenceState) -> Array:\n distractor_idx = env_state.features\n pos_objects, pos_agent = env_state.positions[0:-1, :], env_state.positions[-1, :]\n\n # Build the grid\n grid = jnp.zeros((*self.grid.shape, self.num_features + 2))\n\n grid = grid.at[\n (pos_objects[:, 0]),\n (pos_objects[:, 1]),\n distractor_idx,\n ].set(env_state.available_distractors)\n grid = grid.at[pos_agent[0], pos_agent[1], -2].set(\n 1.0\n ) # agent encoded in penultimate channel\n grid = grid.at[:, :, -1].set(self.grid) # walls encoded in last channel\n\n return grid\n\n def _idx_to_state(self, idx):\n grid_idx = idx // self.num_available_distractors_config\n distractor_config_idx = idx % self.num_available_distractors_config\n coord_packed = self.grid_idx_to_coord_matrix[grid_idx]\n coord = coord_packed // self.grid.shape[1], coord_packed % self.grid.shape[1]\n return coord, (((distractor_config_idx & (1 << np.arange(self.num_objects)))) > 0).astype(\n int\n )\n\n def _state_to_idx(self, coord, available_distractors):\n coord_packed = coord[0] * self.grid.shape[1] + coord[1]\n grid_idx = self.coord_matrix_to_grid_idx[coord_packed]\n distractor_config_idx = available_distractors @ (2 ** jnp.arange(self.num_objects))\n return (grid_idx * self.num_available_distractors_config + distractor_config_idx).astype(\n int\n )\n\n def _move(\n self, pos_agent, features, available_distractors, distractors_pos, preference, action\n ):\n delta_position = self._delta_position[action]\n next_position = pos_agent[0] + delta_position[0], pos_agent[1] + delta_position[1]\n # TODO(@simon): Remove boundary walls to save some input dim and check if within grid size bounds instead\n next_pos_grid = (\n jax.nn.one_hot(next_position[0], self.grid.shape[0])[..., None]\n * jax.nn.one_hot(next_position[1], self.grid.shape[1])[..., None].T\n )\n hit_wall = (self.grid * next_pos_grid).sum()\n next_position = jax.lax.cond(hit_wall, lambda _: pos_agent, lambda _: next_position, None)\n picked_distractor = (next_position[0] == distractors_pos[:, 0]) * (\n next_position[1] == distractors_pos[:, 1]\n )\n\n return (\n next_position,\n available_distractors * (1 - picked_distractor),\n (\n (picked_distractor * available_distractors)\n @ jax.nn.one_hot(features, self.num_features)\n @ self.preference_basis.T\n @ preference\n ),\n )\n\n @partial(jax.jit, static_argnums=(0))\n def demonstrate(self, rng, env_state):\n \"\"\"Given a state, compute the optimal trajectory to the goal.\"\"\"\n action_value_init = jnp.zeros((self.num_states, self.num_actions))\n\n def next_idx_and_reward(idx, action):\n coord, available_distractors = self._idx_to_state(idx)\n next_coord, next_available_feature, reward = self._move(\n coord,\n env_state.features,\n available_distractors,\n env_state.positions[:-1],\n env_state.preference,\n action,\n )\n next_idx = self._state_to_idx(next_coord, next_available_feature)\n # Return the maximum value\n return next_idx, reward\n\n transition_map, reward_map = jax.vmap(\n jax.vmap(next_idx_and_reward, in_axes=(None, 0)), in_axes=(0, None)\n )(jnp.arange(self.num_states), jnp.arange(self.num_actions))\n\n def bellman_backup(action_value, t):\n def next_value(idx, action):\n next_idx = transition_map[idx, action]\n reward = reward_map[idx, action]\n # Return the maximum value\n return self.discount * action_value[next_idx].max() + reward\n\n next_action_value = jax.vmap(\n jax.vmap(next_value, in_axes=(None, 0)), in_axes=(0, None)\n )(jnp.arange(self.num_states), jnp.arange(self.num_actions))\n return next_action_value, None\n\n action_value, _ = jax.lax.scan(\n bellman_backup, action_value_init, jnp.arange(self.timelimit)\n )\n\n def env_step(carry, t):\n rng, env_state = carry\n rng, rng_step = jax.random.split(rng)\n pos_agent = env_state.positions[-1]\n idx = self._state_to_idx(pos_agent, env_state.available_distractors)\n action = jnp.argmax(action_value[idx])\n env_state, emission = self.step(rng_step, env_state, action)\n return (rng, env_state), (emission, action_value[idx])\n\n (_, _), (trajectory, action_values) = jax.lax.scan(\n env_step, (rng, env_state), jnp.arange(self.timelimit)\n )\n\n # Append initial emission and remove last emission from trajectory\n initial_emission = EnvironmentInteraction(\n observation=self.observe(env_state),\n reward=0.0,\n done=False,\n timestep=0,\n )\n trajectory = jtu.tree_map(\n lambda x, y: jnp.concatenate((jnp.expand_dims(x, axis=0), y)),\n initial_emission,\n trajectory,\n )\n trajectory = jtu.tree_map(lambda x: x[:-1], trajectory)\n\n return trajectory, action_values" }, { "identifier": "Dataloader", "path": "metax/data/base.py", "snippet": "class Dataloader(abc.ABC):\n def __init__(self, input_shape: Tuple[int], output_dim: int):\n self.input_shape = input_shape\n self.output_dim = output_dim\n\n @abc.abstractproperty\n def __len__(self):\n pass\n\n @abc.abstractproperty\n def sample_input(self):\n # Sample input should include batch dimension\n pass\n\n @abc.abstractmethod\n def __iter__(self):\n pass" }, { "identifier": "MetaDataset", "path": "metax/data/base.py", "snippet": "class MetaDataset(NamedTuple):\n train: Union[Dataset, MultitaskDataset]\n test: Union[Dataset, MultitaskDataset]" }, { "identifier": "MultitaskDataset", "path": "metax/data/base.py", "snippet": "class MultitaskDataset(NamedTuple):\n x: Array\n y: Array\n task_id: Array\n info: Dict = dict()" } ]
from functools import partial from typing import Optional from chex import PRNGKey from metax.data.envs.base import Environment from metax.data.envs.grid import CompositionalGrid from metax.data.envs.preference import CompositionalPreference from .base import Dataloader, MetaDataset, MultitaskDataset import jax import jax.numpy as jnp import jax.tree_util as jtu
11,048
""" Copyright (c) Simon Schug All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class ImitationMetaDataloader(Dataloader): def __init__( self, env: Environment, num_tasks: int, shots_train: int, shots_test: int, meta_batch_size: int, mode: str, train_test_split: bool, rng: PRNGKey, ): super().__init__(input_shape=env.observation_shape, output_dim=env.num_actions) self.env = env self.num_tasks = num_tasks self.shots_train = shots_train self.shots_test = shots_test self.meta_batch_size = meta_batch_size self.mode = mode self.train_test_split = train_test_split self.fixed_rng = rng assert num_tasks % meta_batch_size == 0, "num_tasks must be divisible by meta_batch_size" self.num_steps = num_tasks // meta_batch_size @property def sample_input(self): return jnp.zeros((1,) + self.env.observation_shape) def __len__(self): return self.num_steps def __iter__(self): for rng in jax.random.split(self.fixed_rng, self.num_steps): # Sample batch and wrap as MetaDataset rngs_batch = jax.random.split(rng, self.meta_batch_size) yield self.sample_metatask(rngs_batch) @partial(jax.jit, static_argnames="self") @partial(jax.vmap, in_axes=(None, 0)) def sample_metatask(self, rng: PRNGKey) -> MetaDataset: rng_goal, rng_task = jax.random.split(rng, 2) goal, info = self.env.reset_goal(rng_goal, mode=self.mode) @jax.vmap def sample_task(rng): rng_reset, rng_demo = jax.random.split(rng, 2) env_state, _ = self.env.reset(rng_reset, goal=goal) trajectory, actions = self.env.demonstrate(rng_demo, env_state) return MultitaskDataset( x=trajectory.observation, y=actions, task_id=jnp.full(actions.shape[:1], info["task_id"]), info={ "mask": ~trajectory.done, "embeddings": jnp.repeat(info["embedding"][None, :], actions.shape[0], axis=0), }, ) rngs_task = jax.random.split(rng_task, self.shots_train + self.shots_test) train_and_test_task = sample_task(rngs_task) if self.train_test_split: # Split into train and test set return MetaDataset( train=jtu.tree_map( lambda x: x[:self.shots_train].reshape(-1, *x.shape[2:]), train_and_test_task ), test=jtu.tree_map( lambda x: x[self.shots_train:].reshape(-1, *x.shape[2:]), train_and_test_task ), ) else: # No train_test split means, meta.train == meta.test set return MetaDataset( train=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task), test=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task), ) def create_imitation_metaloader( name, meta_batch_size, shots_train, shots_test, train_test_split, num_tasks_train, num_tasks_test, num_tasks_valid, num_tasks_ood: Optional[int] = None, seed=None, **kwargs, ): ood_sets_hot = None if name == "compositional_grid":
""" Copyright (c) Simon Schug All rights reserved. MIT License Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class ImitationMetaDataloader(Dataloader): def __init__( self, env: Environment, num_tasks: int, shots_train: int, shots_test: int, meta_batch_size: int, mode: str, train_test_split: bool, rng: PRNGKey, ): super().__init__(input_shape=env.observation_shape, output_dim=env.num_actions) self.env = env self.num_tasks = num_tasks self.shots_train = shots_train self.shots_test = shots_test self.meta_batch_size = meta_batch_size self.mode = mode self.train_test_split = train_test_split self.fixed_rng = rng assert num_tasks % meta_batch_size == 0, "num_tasks must be divisible by meta_batch_size" self.num_steps = num_tasks // meta_batch_size @property def sample_input(self): return jnp.zeros((1,) + self.env.observation_shape) def __len__(self): return self.num_steps def __iter__(self): for rng in jax.random.split(self.fixed_rng, self.num_steps): # Sample batch and wrap as MetaDataset rngs_batch = jax.random.split(rng, self.meta_batch_size) yield self.sample_metatask(rngs_batch) @partial(jax.jit, static_argnames="self") @partial(jax.vmap, in_axes=(None, 0)) def sample_metatask(self, rng: PRNGKey) -> MetaDataset: rng_goal, rng_task = jax.random.split(rng, 2) goal, info = self.env.reset_goal(rng_goal, mode=self.mode) @jax.vmap def sample_task(rng): rng_reset, rng_demo = jax.random.split(rng, 2) env_state, _ = self.env.reset(rng_reset, goal=goal) trajectory, actions = self.env.demonstrate(rng_demo, env_state) return MultitaskDataset( x=trajectory.observation, y=actions, task_id=jnp.full(actions.shape[:1], info["task_id"]), info={ "mask": ~trajectory.done, "embeddings": jnp.repeat(info["embedding"][None, :], actions.shape[0], axis=0), }, ) rngs_task = jax.random.split(rng_task, self.shots_train + self.shots_test) train_and_test_task = sample_task(rngs_task) if self.train_test_split: # Split into train and test set return MetaDataset( train=jtu.tree_map( lambda x: x[:self.shots_train].reshape(-1, *x.shape[2:]), train_and_test_task ), test=jtu.tree_map( lambda x: x[self.shots_train:].reshape(-1, *x.shape[2:]), train_and_test_task ), ) else: # No train_test split means, meta.train == meta.test set return MetaDataset( train=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task), test=jtu.tree_map(lambda x: x.reshape(-1, *x.shape[2:]), train_and_test_task), ) def create_imitation_metaloader( name, meta_batch_size, shots_train, shots_test, train_test_split, num_tasks_train, num_tasks_test, num_tasks_valid, num_tasks_ood: Optional[int] = None, seed=None, **kwargs, ): ood_sets_hot = None if name == "compositional_grid":
env = CompositionalGrid(
1
2023-12-22 16:35:49+00:00
16k
AContesini/Convert_PDF_to_DOCX_or_vice-versa
venv/Lib/site-packages/tqdm/cli.py
[ { "identifier": "TqdmKeyError", "path": "venv/Lib/site-packages/tqdm/std.py", "snippet": "class TqdmKeyError(KeyError):\n pass" }, { "identifier": "TqdmTypeError", "path": "venv/Lib/site-packages/tqdm/std.py", "snippet": "class TqdmTypeError(TypeError):\n pass" }, { "identifier": "tqdm", "path": "venv/Lib/site-packages/tqdm/std.py", "snippet": "class tqdm(Comparable):\n \"\"\"\n Decorate an iterable object, returning an iterator which acts exactly\n like the original iterable, but prints a dynamically updating\n progressbar every time a value is requested.\n\n Parameters\n ----------\n iterable : iterable, optional\n Iterable to decorate with a progressbar.\n Leave blank to manually manage the updates.\n desc : str, optional\n Prefix for the progressbar.\n total : int or float, optional\n The number of expected iterations. If unspecified,\n len(iterable) is used if possible. If float(\"inf\") or as a last\n resort, only basic progress statistics are displayed\n (no ETA, no progressbar).\n If `gui` is True and this parameter needs subsequent updating,\n specify an initial arbitrary large positive number,\n e.g. 9e9.\n leave : bool, optional\n If [default: True], keeps all traces of the progressbar\n upon termination of iteration.\n If `None`, will leave only if `position` is `0`.\n file : `io.TextIOWrapper` or `io.StringIO`, optional\n Specifies where to output the progress messages\n (default: sys.stderr). Uses `file.write(str)` and `file.flush()`\n methods. For encoding, see `write_bytes`.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes the progressbar to stay within this bound.\n If unspecified, attempts to use environment width. The\n fallback is a meter width of 10 and no limit for the counter and\n statistics. If 0, will not print any meter (only stats).\n mininterval : float, optional\n Minimum progress display update interval [default: 0.1] seconds.\n maxinterval : float, optional\n Maximum progress display update interval [default: 10] seconds.\n Automatically adjusts `miniters` to correspond to `mininterval`\n after long display update lag. Only works if `dynamic_miniters`\n or monitor thread is enabled.\n miniters : int or float, optional\n Minimum progress display update interval, in iterations.\n If 0 and `dynamic_miniters`, will automatically adjust to equal\n `mininterval` (more CPU efficient, good for tight loops).\n If > 0, will skip display of specified number of iterations.\n Tweak this and `mininterval` to get very efficient loops.\n If your progress is erratic with both fast and slow iterations\n (network, skipping items, etc) you should set miniters=1.\n ascii : bool or str, optional\n If unspecified or False, use unicode (smooth blocks) to fill\n the meter. The fallback is to use ASCII characters \" 123456789#\".\n disable : bool, optional\n Whether to disable the entire progressbar wrapper\n [default: False]. If set to None, disable on non-TTY.\n unit : str, optional\n String that will be used to define the unit of each iteration\n [default: it].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be reduced/scaled\n automatically and a metric prefix following the\n International System of Units standard will be added\n (kilo, mega, etc.) [default: False]. If any other non-zero\n number, will scale `total` and `n`.\n dynamic_ncols : bool, optional\n If set, constantly alters `ncols` and `nrows` to the\n environment (allowing for window resizes) [default: False].\n smoothing : float, optional\n Exponential moving average smoothing factor for speed estimates\n (ignored in GUI mode). Ranges from 0 (average speed) to 1\n (current/instantaneous speed) [default: 0.3].\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n initial : int or float, optional\n The initial counter value. Useful when restarting a progress\n bar [default: 0]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n position : int, optional\n Specify the line offset to print this bar (starting from 0)\n Automatic if unspecified.\n Useful to manage multiple bars at once (eg, from threads).\n postfix : dict or *, optional\n Specify additional stats to display at the end of the bar.\n Calls `set_postfix(**postfix)` if possible (dict).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n write_bytes : bool, optional\n Whether to write bytes. If (default: False) will write unicode.\n lock_args : tuple, optional\n Passed to `refresh` for intermediate output\n (initialisation, iterating, and updating).\n nrows : int, optional\n The screen height. If specified, hides nested bars outside this\n bound. If unspecified, attempts to use environment height.\n The fallback is 20.\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n delay : float, optional\n Don't display until [default: 0] seconds have elapsed.\n gui : bool, optional\n WARNING: internal parameter - do not use.\n Use tqdm.gui.tqdm(...) instead. If set, will attempt to use\n matplotlib animations for a graphical output [default: False].\n\n Returns\n -------\n out : decorated iterator.\n \"\"\"\n\n monitor_interval = 10 # set to 0 to disable the thread\n monitor = None\n _instances = WeakSet()\n\n @staticmethod\n def format_sizeof(num, suffix='', divisor=1000):\n \"\"\"\n Formats a number (greater than unity) with SI Order of Magnitude\n prefixes.\n\n Parameters\n ----------\n num : float\n Number ( >= 1) to format.\n suffix : str, optional\n Post-postfix [default: ''].\n divisor : float, optional\n Divisor between prefixes [default: 1000].\n\n Returns\n -------\n out : str\n Number with Order of Magnitude SI unit postfix.\n \"\"\"\n for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if abs(num) < 999.5:\n if abs(num) < 99.95:\n if abs(num) < 9.995:\n return '{0:1.2f}'.format(num) + unit + suffix\n return '{0:2.1f}'.format(num) + unit + suffix\n return '{0:3.0f}'.format(num) + unit + suffix\n num /= divisor\n return '{0:3.1f}Y'.format(num) + suffix\n\n @staticmethod\n def format_interval(t):\n \"\"\"\n Formats a number of seconds as a clock time, [H:]MM:SS\n\n Parameters\n ----------\n t : int\n Number of seconds.\n\n Returns\n -------\n out : str\n [H:]MM:SS\n \"\"\"\n mins, s = divmod(int(t), 60)\n h, m = divmod(mins, 60)\n if h:\n return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)\n else:\n return '{0:02d}:{1:02d}'.format(m, s)\n\n @staticmethod\n def format_num(n):\n \"\"\"\n Intelligent scientific notation (.3g).\n\n Parameters\n ----------\n n : int or float or Numeric\n A Number.\n\n Returns\n -------\n out : str\n Formatted number.\n \"\"\"\n f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')\n n = str(n)\n return f if len(f) < len(n) else n\n\n @staticmethod\n def status_printer(file):\n \"\"\"\n Manage the printing and in-place updating of a line of characters.\n Note that if the string is longer than a line, then in-place\n updating may not work (it will print a new line at each refresh).\n \"\"\"\n fp = file\n fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover\n if fp in (sys.stderr, sys.stdout):\n getattr(sys.stderr, 'flush', lambda: None)()\n getattr(sys.stdout, 'flush', lambda: None)()\n\n def fp_write(s):\n fp.write(str(s))\n fp_flush()\n\n last_len = [0]\n\n def print_status(s):\n len_s = disp_len(s)\n fp_write('\\r' + s + (' ' * max(last_len[0] - len_s, 0)))\n last_len[0] = len_s\n\n return print_status\n\n @staticmethod\n def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it',\n unit_scale=False, rate=None, bar_format=None, postfix=None,\n unit_divisor=1000, initial=0, colour=None, **extra_kwargs):\n \"\"\"\n Return a string-based progress bar given some parameters\n\n Parameters\n ----------\n n : int or float\n Number of finished iterations.\n total : int or float\n The expected total number of iterations. If meaningless (None),\n only basic progress statistics are displayed (no ETA).\n elapsed : float\n Number of seconds passed since start.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes `{bar}` to stay within this bound\n [default: None]. If `0`, will not print any bar (only stats).\n The fallback is `{bar:10}`.\n prefix : str, optional\n Prefix message (included in total width) [default: ''].\n Use as {desc} in bar_format string.\n ascii : bool, optional or str, optional\n If not set, use unicode (smooth blocks) to fill the meter\n [default: False]. The fallback is to use ASCII characters\n \" 123456789#\".\n unit : str, optional\n The iteration unit [default: 'it'].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be printed with an\n appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)\n [default: False]. If any other non-zero number, will scale\n `total` and `n`.\n rate : float, optional\n Manual override for iteration rate.\n If [default: None], uses n/elapsed.\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n postfix : *, optional\n Similar to `prefix`, but placed at the end\n (e.g. for additional stats).\n Note: postfix is usually a string (not a dict) for this method,\n and will if possible be set to postfix = ', ' + postfix.\n However other types are supported (#382).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n initial : int or float, optional\n The initial counter value [default: 0].\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n\n Returns\n -------\n out : Formatted meter and stats, ready to display.\n \"\"\"\n\n # sanity check: total\n if total and n >= (total + 0.5): # allow float imprecision (#849)\n total = None\n\n # apply custom scale if necessary\n if unit_scale and unit_scale not in (True, 1):\n if total:\n total *= unit_scale\n n *= unit_scale\n if rate:\n rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt\n unit_scale = False\n\n elapsed_str = tqdm.format_interval(elapsed)\n\n # if unspecified, attempt to use rate = average speed\n # (we allow manual override since predicting time is an arcane art)\n if rate is None and elapsed:\n rate = (n - initial) / elapsed\n inv_rate = 1 / rate if rate else None\n format_sizeof = tqdm.format_sizeof\n rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else\n '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s'\n rate_inv_fmt = (\n (format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate))\n if inv_rate else '?') + 's/' + unit\n rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt\n\n if unit_scale:\n n_fmt = format_sizeof(n, divisor=unit_divisor)\n total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?'\n else:\n n_fmt = str(n)\n total_fmt = str(total) if total is not None else '?'\n\n try:\n postfix = ', ' + postfix if postfix else ''\n except TypeError:\n pass\n\n remaining = (total - n) / rate if rate and total else 0\n remaining_str = tqdm.format_interval(remaining) if rate else '?'\n try:\n eta_dt = (datetime.now() + timedelta(seconds=remaining)\n if rate and total else datetime.utcfromtimestamp(0))\n except OverflowError:\n eta_dt = datetime.max\n\n # format the stats displayed to the left and right sides of the bar\n if prefix:\n # old prefix setup work around\n bool_prefix_colon_already = (prefix[-2:] == \": \")\n l_bar = prefix if bool_prefix_colon_already else prefix + \": \"\n else:\n l_bar = ''\n\n r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]'\n\n # Custom bar formatting\n # Populate a dict with all available progress indicators\n format_dict = {\n # slight extension of self.format_dict\n 'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt,\n 'elapsed': elapsed_str, 'elapsed_s': elapsed,\n 'ncols': ncols, 'desc': prefix or '', 'unit': unit,\n 'rate': inv_rate if inv_rate and inv_rate > 1 else rate,\n 'rate_fmt': rate_fmt, 'rate_noinv': rate,\n 'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate,\n 'rate_inv_fmt': rate_inv_fmt,\n 'postfix': postfix, 'unit_divisor': unit_divisor,\n 'colour': colour,\n # plus more useful definitions\n 'remaining': remaining_str, 'remaining_s': remaining,\n 'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt,\n **extra_kwargs}\n\n # total is known: we can predict some stats\n if total:\n # fractional and percentage progress\n frac = n / total\n percentage = frac * 100\n\n l_bar += '{0:3.0f}%|'.format(percentage)\n\n if ncols == 0:\n return l_bar[:-1] + r_bar[1:]\n\n format_dict.update(l_bar=l_bar)\n if bar_format:\n format_dict.update(percentage=percentage)\n\n # auto-remove colon for empty `{desc}`\n if not prefix:\n bar_format = bar_format.replace(\"{desc}: \", '')\n else:\n bar_format = \"{l_bar}{bar}{r_bar}\"\n\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar # no `{bar}`; nothing else to do\n\n # Formatting progress bar space available for bar's display\n full_bar = Bar(frac,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,\n colour=colour)\n if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):\n bar_format = str(bar_format)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n\n elif bar_format:\n # user-specified bar_format but no total\n l_bar += '|'\n format_dict.update(l_bar=l_bar, percentage=0)\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar\n full_bar = Bar(0,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.BLANK, colour=colour)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n else:\n # no total: no progressbar, ETA, just progress stats\n return (f'{(prefix + \": \") if prefix else \"\"}'\n f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]')\n\n def __new__(cls, *_, **__):\n instance = object.__new__(cls)\n with cls.get_lock(): # also constructs lock if non-existent\n cls._instances.add(instance)\n # create monitoring thread\n if cls.monitor_interval and (cls.monitor is None\n or not cls.monitor.report()):\n try:\n cls.monitor = TMonitor(cls, cls.monitor_interval)\n except Exception as e: # pragma: nocover\n warn(\"tqdm:disabling monitor support\"\n \" (monitor_interval = 0) due to:\\n\" + str(e),\n TqdmMonitorWarning, stacklevel=2)\n cls.monitor_interval = 0\n return instance\n\n @classmethod\n def _get_free_pos(cls, instance=None):\n \"\"\"Skips specified instance.\"\"\"\n positions = {abs(inst.pos) for inst in cls._instances\n if inst is not instance and hasattr(inst, \"pos\")}\n return min(set(range(len(positions) + 1)).difference(positions))\n\n @classmethod\n def _decr_instances(cls, instance):\n \"\"\"\n Remove from list and reposition another unfixed bar\n to fill the new gap.\n\n This means that by default (where all nested bars are unfixed),\n order is not maintained but screen flicker/blank space is minimised.\n (tqdm<=4.44.1 moved ALL subsequent unfixed bars up.)\n \"\"\"\n with cls._lock:\n try:\n cls._instances.remove(instance)\n except KeyError:\n # if not instance.gui: # pragma: no cover\n # raise\n pass # py2: maybe magically removed already\n # else:\n if not instance.gui:\n last = (instance.nrows or 20) - 1\n # find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`)\n instances = list(filter(\n lambda i: hasattr(i, \"pos\") and last <= i.pos,\n cls._instances))\n # set first found to current `pos`\n if instances:\n inst = min(instances, key=lambda i: i.pos)\n inst.clear(nolock=True)\n inst.pos = abs(instance.pos)\n\n @classmethod\n def write(cls, s, file=None, end=\"\\n\", nolock=False):\n \"\"\"Print a message via tqdm (without overlap with bars).\"\"\"\n fp = file if file is not None else sys.stdout\n with cls.external_write_mode(file=file, nolock=nolock):\n # Write the message\n fp.write(s)\n fp.write(end)\n\n @classmethod\n @contextmanager\n def external_write_mode(cls, file=None, nolock=False):\n \"\"\"\n Disable tqdm within context and refresh tqdm when exits.\n Useful when writing to standard output stream\n \"\"\"\n fp = file if file is not None else sys.stdout\n\n try:\n if not nolock:\n cls.get_lock().acquire()\n # Clear all bars\n inst_cleared = []\n for inst in getattr(cls, '_instances', []):\n # Clear instance if in the target output file\n # or if write output + tqdm output are both either\n # sys.stdout or sys.stderr (because both are mixed in terminal)\n if hasattr(inst, \"start_t\") and (inst.fp == fp or all(\n f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):\n inst.clear(nolock=True)\n inst_cleared.append(inst)\n yield\n # Force refresh display of bars we cleared\n for inst in inst_cleared:\n inst.refresh(nolock=True)\n finally:\n if not nolock:\n cls._lock.release()\n\n @classmethod\n def set_lock(cls, lock):\n \"\"\"Set the global lock.\"\"\"\n cls._lock = lock\n\n @classmethod\n def get_lock(cls):\n \"\"\"Get the global lock. Construct it if it does not exist.\"\"\"\n if not hasattr(cls, '_lock'):\n cls._lock = TqdmDefaultWriteLock()\n return cls._lock\n\n @classmethod\n def pandas(cls, **tqdm_kwargs):\n \"\"\"\n Registers the current `tqdm` class with\n pandas.core.\n ( frame.DataFrame\n | series.Series\n | groupby.(generic.)DataFrameGroupBy\n | groupby.(generic.)SeriesGroupBy\n ).progress_apply\n\n A new instance will be created every time `progress_apply` is called,\n and each instance will automatically `close()` upon completion.\n\n Parameters\n ----------\n tqdm_kwargs : arguments for the tqdm instance\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> from tqdm import tqdm\n >>> from tqdm.gui import tqdm as tqdm_gui\n >>>\n >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))\n >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc\n >>> # Now you can use `progress_apply` instead of `apply`\n >>> df.groupby(0).progress_apply(lambda x: x**2)\n\n References\n ----------\n <https://stackoverflow.com/questions/18603270/\\\n progress-indicator-during-pandas-operations-python>\n \"\"\"\n from warnings import catch_warnings, simplefilter\n\n from pandas.core.frame import DataFrame\n from pandas.core.series import Series\n try:\n with catch_warnings():\n simplefilter(\"ignore\", category=FutureWarning)\n from pandas import Panel\n except ImportError: # pandas>=1.2.0\n Panel = None\n Rolling, Expanding = None, None\n try: # pandas>=1.0.0\n from pandas.core.window.rolling import _Rolling_and_Expanding\n except ImportError:\n try: # pandas>=0.18.0\n from pandas.core.window import _Rolling_and_Expanding\n except ImportError: # pandas>=1.2.0\n try: # pandas>=1.2.0\n from pandas.core.window.expanding import Expanding\n from pandas.core.window.rolling import Rolling\n _Rolling_and_Expanding = Rolling, Expanding\n except ImportError: # pragma: no cover\n _Rolling_and_Expanding = None\n try: # pandas>=0.25.0\n from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy\n from pandas.core.groupby.generic import DataFrameGroupBy\n except ImportError: # pragma: no cover\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy\n except ImportError:\n from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import GroupBy\n except ImportError: # pragma: no cover\n from pandas.core.groupby import GroupBy\n\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import PanelGroupBy\n except ImportError:\n try:\n from pandas.core.groupby import PanelGroupBy\n except ImportError: # pandas>=0.25.0\n PanelGroupBy = None\n\n tqdm_kwargs = tqdm_kwargs.copy()\n deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]\n\n def inner_generator(df_function='apply'):\n def inner(df, func, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n df : (DataFrame|Series)[GroupBy]\n Data (may be grouped).\n func : function\n To be applied on the (grouped) data.\n **kwargs : optional\n Transmitted to `df.apply()`.\n \"\"\"\n\n # Precompute total iterations\n total = tqdm_kwargs.pop(\"total\", getattr(df, 'ngroups', None))\n if total is None: # not grouped\n if df_function == 'applymap':\n total = df.size\n elif isinstance(df, Series):\n total = len(df)\n elif (_Rolling_and_Expanding is None or\n not isinstance(df, _Rolling_and_Expanding)):\n # DataFrame or Panel\n axis = kwargs.get('axis', 0)\n if axis == 'index':\n axis = 0\n elif axis == 'columns':\n axis = 1\n # when axis=0, total is shape[axis1]\n total = df.size // df.shape[axis]\n\n # Init bar\n if deprecated_t[0] is not None:\n t = deprecated_t[0]\n deprecated_t[0] = None\n else:\n t = cls(total=total, **tqdm_kwargs)\n\n if len(args) > 0:\n # *args intentionally not supported (see #244, #299)\n TqdmDeprecationWarning(\n \"Except func, normal arguments are intentionally\" +\n \" not supported by\" +\n \" `(DataFrame|Series|GroupBy).progress_apply`.\" +\n \" Use keyword arguments instead.\",\n fp_write=getattr(t.fp, 'write', sys.stderr.write))\n\n try: # pandas>=1.3.0\n from pandas.core.common import is_builtin_func\n except ImportError:\n is_builtin_func = df._is_builtin_func\n try:\n func = is_builtin_func(func)\n except TypeError:\n pass\n\n # Define bar updating wrapper\n def wrapper(*args, **kwargs):\n # update tbar correctly\n # it seems `pandas apply` calls `func` twice\n # on the first column/row to decide whether it can\n # take a fast or slow code path; so stop when t.total==t.n\n t.update(n=1 if not t.total or t.n < t.total else 0)\n return func(*args, **kwargs)\n\n # Apply the provided function (in **kwargs)\n # on the df using our wrapper (which provides bar updating)\n try:\n return getattr(df, df_function)(wrapper, **kwargs)\n finally:\n t.close()\n\n return inner\n\n # Monkeypatch pandas to provide easy methods\n # Enable custom tqdm progress in pandas!\n Series.progress_apply = inner_generator()\n SeriesGroupBy.progress_apply = inner_generator()\n Series.progress_map = inner_generator('map')\n SeriesGroupBy.progress_map = inner_generator('map')\n\n DataFrame.progress_apply = inner_generator()\n DataFrameGroupBy.progress_apply = inner_generator()\n DataFrame.progress_applymap = inner_generator('applymap')\n\n if Panel is not None:\n Panel.progress_apply = inner_generator()\n if PanelGroupBy is not None:\n PanelGroupBy.progress_apply = inner_generator()\n\n GroupBy.progress_apply = inner_generator()\n GroupBy.progress_aggregate = inner_generator('aggregate')\n GroupBy.progress_transform = inner_generator('transform')\n\n if Rolling is not None and Expanding is not None:\n Rolling.progress_apply = inner_generator()\n Expanding.progress_apply = inner_generator()\n elif _Rolling_and_Expanding is not None:\n _Rolling_and_Expanding.progress_apply = inner_generator()\n\n # override defaults via env vars\n @envwrap(\"TQDM_\", is_method=True, types={'total': float, 'ncols': int, 'miniters': float,\n 'position': int, 'nrows': int})\n def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,\n ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,\n ascii=None, disable=False, unit='it', unit_scale=False,\n dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,\n position=None, postfix=None, unit_divisor=1000, write_bytes=False,\n lock_args=None, nrows=None, colour=None, delay=0.0, gui=False,\n **kwargs):\n \"\"\"see tqdm.tqdm for arguments\"\"\"\n if file is None:\n file = sys.stderr\n\n if write_bytes:\n # Despite coercing unicode into bytes, py2 sys.std* streams\n # should have bytes written to them.\n file = SimpleTextIOWrapper(\n file, encoding=getattr(file, 'encoding', None) or 'utf-8')\n\n file = DisableOnWriteError(file, tqdm_instance=self)\n\n if disable is None and hasattr(file, \"isatty\") and not file.isatty():\n disable = True\n\n if total is None and iterable is not None:\n try:\n total = len(iterable)\n except (TypeError, AttributeError):\n total = None\n if total == float(\"inf\"):\n # Infinite iterations, behave same as unknown\n total = None\n\n if disable:\n self.iterable = iterable\n self.disable = disable\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n self.n = initial\n self.total = total\n self.leave = leave\n return\n\n if kwargs:\n self.disable = True\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n raise (\n TqdmDeprecationWarning(\n \"`nested` is deprecated and automated.\\n\"\n \"Use `position` instead for manual control.\\n\",\n fp_write=getattr(file, 'write', sys.stderr.write))\n if \"nested\" in kwargs else\n TqdmKeyError(\"Unknown argument(s): \" + str(kwargs)))\n\n # Preprocess the arguments\n if (\n (ncols is None or nrows is None) and (file in (sys.stderr, sys.stdout))\n ) or dynamic_ncols: # pragma: no cover\n if dynamic_ncols:\n dynamic_ncols = _screen_shape_wrapper()\n if dynamic_ncols:\n ncols, nrows = dynamic_ncols(file)\n else:\n _dynamic_ncols = _screen_shape_wrapper()\n if _dynamic_ncols:\n _ncols, _nrows = _dynamic_ncols(file)\n if ncols is None:\n ncols = _ncols\n if nrows is None:\n nrows = _nrows\n\n if miniters is None:\n miniters = 0\n dynamic_miniters = True\n else:\n dynamic_miniters = False\n\n if mininterval is None:\n mininterval = 0\n\n if maxinterval is None:\n maxinterval = 0\n\n if ascii is None:\n ascii = not _supports_unicode(file)\n\n if bar_format and ascii is not True and not _is_ascii(ascii):\n # Convert bar format into unicode since terminal uses unicode\n bar_format = str(bar_format)\n\n if smoothing is None:\n smoothing = 0\n\n # Store the arguments\n self.iterable = iterable\n self.desc = desc or ''\n self.total = total\n self.leave = leave\n self.fp = file\n self.ncols = ncols\n self.nrows = nrows\n self.mininterval = mininterval\n self.maxinterval = maxinterval\n self.miniters = miniters\n self.dynamic_miniters = dynamic_miniters\n self.ascii = ascii\n self.disable = disable\n self.unit = unit\n self.unit_scale = unit_scale\n self.unit_divisor = unit_divisor\n self.initial = initial\n self.lock_args = lock_args\n self.delay = delay\n self.gui = gui\n self.dynamic_ncols = dynamic_ncols\n self.smoothing = smoothing\n self._ema_dn = EMA(smoothing)\n self._ema_dt = EMA(smoothing)\n self._ema_miniters = EMA(smoothing)\n self.bar_format = bar_format\n self.postfix = None\n self.colour = colour\n self._time = time\n if postfix:\n try:\n self.set_postfix(refresh=False, **postfix)\n except TypeError:\n self.postfix = postfix\n\n # Init the iterations counters\n self.last_print_n = initial\n self.n = initial\n\n # if nested, at initial sp() call we replace '\\r' by '\\n' to\n # not overwrite the outer progress bar\n with self._lock:\n # mark fixed positions as negative\n self.pos = self._get_free_pos(self) if position is None else -position\n\n if not gui:\n # Initialize the screen printer\n self.sp = self.status_printer(self.fp)\n if delay <= 0:\n self.refresh(lock_args=self.lock_args)\n\n # Init the time counter\n self.last_print_t = self._time()\n # NB: Avoid race conditions by setting start_t at the very end of init\n self.start_t = self.last_print_t\n\n def __bool__(self):\n if self.total is not None:\n return self.total > 0\n if self.iterable is None:\n raise TypeError('bool() undefined when iterable == total == None')\n return bool(self.iterable)\n\n def __len__(self):\n return (\n self.total if self.iterable is None\n else self.iterable.shape[0] if hasattr(self.iterable, \"shape\")\n else len(self.iterable) if hasattr(self.iterable, \"__len__\")\n else self.iterable.__length_hint__() if hasattr(self.iterable, \"__length_hint__\")\n else getattr(self, \"total\", None))\n\n def __reversed__(self):\n try:\n orig = self.iterable\n except AttributeError:\n raise TypeError(\"'tqdm' object is not reversible\")\n else:\n self.iterable = reversed(self.iterable)\n return self.__iter__()\n finally:\n self.iterable = orig\n\n def __contains__(self, item):\n contains = getattr(self.iterable, '__contains__', None)\n return contains(item) if contains is not None else item in self.__iter__()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n self.close()\n except AttributeError:\n # maybe eager thread cleanup upon external error\n if (exc_type, exc_value, traceback) == (None, None, None):\n raise\n warn(\"AttributeError ignored\", TqdmWarning, stacklevel=2)\n\n def __del__(self):\n self.close()\n\n def __str__(self):\n return self.format_meter(**self.format_dict)\n\n @property\n def _comparable(self):\n return abs(getattr(self, \"pos\", 1 << 31))\n\n def __hash__(self):\n return id(self)\n\n def __iter__(self):\n \"\"\"Backward-compatibility to use: for x in tqdm(iterable)\"\"\"\n\n # Inlining instance variables as locals (speed optimisation)\n iterable = self.iterable\n\n # If the bar is disabled, then just walk the iterable\n # (note: keep this check outside the loop for performance)\n if self.disable:\n for obj in iterable:\n yield obj\n return\n\n mininterval = self.mininterval\n last_print_t = self.last_print_t\n last_print_n = self.last_print_n\n min_start_t = self.start_t + self.delay\n n = self.n\n time = self._time\n\n try:\n for obj in iterable:\n yield obj\n # Update and possibly print the progressbar.\n # Note: does not call self.update(1) for speed optimisation.\n n += 1\n\n if n - last_print_n >= self.miniters:\n cur_t = time()\n dt = cur_t - last_print_t\n if dt >= mininterval and cur_t >= min_start_t:\n self.update(n - last_print_n)\n last_print_n = self.last_print_n\n last_print_t = self.last_print_t\n finally:\n self.n = n\n self.close()\n\n def update(self, n=1):\n \"\"\"\n Manually update the progress bar, useful for streams\n such as reading files.\n E.g.:\n >>> t = tqdm(total=filesize) # Initialise\n >>> for current_buffer in stream:\n ... ...\n ... t.update(len(current_buffer))\n >>> t.close()\n The last line is highly recommended, but possibly not necessary if\n `t.update()` will be called in such a way that `filesize` will be\n exactly reached and printed.\n\n Parameters\n ----------\n n : int or float, optional\n Increment to add to the internal counter of iterations\n [default: 1]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n\n Returns\n -------\n out : bool or None\n True if a `display()` was triggered.\n \"\"\"\n if self.disable:\n return\n\n if n < 0:\n self.last_print_n += n # for auto-refresh logic to work\n self.n += n\n\n # check counter first to reduce calls to time()\n if self.n - self.last_print_n >= self.miniters:\n cur_t = self._time()\n dt = cur_t - self.last_print_t\n if dt >= self.mininterval and cur_t >= self.start_t + self.delay:\n cur_t = self._time()\n dn = self.n - self.last_print_n # >= n\n if self.smoothing and dt and dn:\n # EMA (not just overall average)\n self._ema_dn(dn)\n self._ema_dt(dt)\n self.refresh(lock_args=self.lock_args)\n if self.dynamic_miniters:\n # If no `miniters` was specified, adjust automatically to the\n # maximum iteration rate seen so far between two prints.\n # e.g.: After running `tqdm.update(5)`, subsequent\n # calls to `tqdm.update()` will only cause an update after\n # at least 5 more iterations.\n if self.maxinterval and dt >= self.maxinterval:\n self.miniters = dn * (self.mininterval or self.maxinterval) / dt\n elif self.smoothing:\n # EMA miniters update\n self.miniters = self._ema_miniters(\n dn * (self.mininterval / dt if self.mininterval and dt\n else 1))\n else:\n # max iters between two prints\n self.miniters = max(self.miniters, dn)\n\n # Store old values for next call\n self.last_print_n = self.n\n self.last_print_t = cur_t\n return True\n\n def close(self):\n \"\"\"Cleanup and (if leave=False) close the progressbar.\"\"\"\n if self.disable:\n return\n\n # Prevent multiple closures\n self.disable = True\n\n # decrement instance pos and remove from internal set\n pos = abs(self.pos)\n self._decr_instances(self)\n\n if self.last_print_t < self.start_t + self.delay:\n # haven't ever displayed; nothing to clear\n return\n\n # GUI mode\n if getattr(self, 'sp', None) is None:\n return\n\n # annoyingly, _supports_unicode isn't good enough\n def fp_write(s):\n self.fp.write(str(s))\n\n try:\n fp_write('')\n except ValueError as e:\n if 'closed' in str(e):\n return\n raise # pragma: no cover\n\n leave = pos == 0 if self.leave is None else self.leave\n\n with self._lock:\n if leave:\n # stats for overall rate (no weighted average)\n self._ema_dt = lambda: None\n self.display(pos=0)\n fp_write('\\n')\n else:\n # clear previous display\n if self.display(msg='', pos=pos) and not pos:\n fp_write('\\r')\n\n def clear(self, nolock=False):\n \"\"\"Clear current bar display.\"\"\"\n if self.disable:\n return\n\n if not nolock:\n self._lock.acquire()\n pos = abs(self.pos)\n if pos < (self.nrows or 20):\n self.moveto(pos)\n self.sp('')\n self.fp.write('\\r') # place cursor back at the beginning of line\n self.moveto(-pos)\n if not nolock:\n self._lock.release()\n\n def refresh(self, nolock=False, lock_args=None):\n \"\"\"\n Force refresh the display of this bar.\n\n Parameters\n ----------\n nolock : bool, optional\n If `True`, does not lock.\n If [default: `False`]: calls `acquire()` on internal lock.\n lock_args : tuple, optional\n Passed to internal lock's `acquire()`.\n If specified, will only `display()` if `acquire()` returns `True`.\n \"\"\"\n if self.disable:\n return\n\n if not nolock:\n if lock_args:\n if not self._lock.acquire(*lock_args):\n return False\n else:\n self._lock.acquire()\n self.display()\n if not nolock:\n self._lock.release()\n return True\n\n def unpause(self):\n \"\"\"Restart tqdm timer from last print time.\"\"\"\n if self.disable:\n return\n cur_t = self._time()\n self.start_t += cur_t - self.last_print_t\n self.last_print_t = cur_t\n\n def reset(self, total=None):\n \"\"\"\n Resets to 0 iterations for repeated use.\n\n Consider combining with `leave=True`.\n\n Parameters\n ----------\n total : int or float, optional. Total to use for the new bar.\n \"\"\"\n self.n = 0\n if total is not None:\n self.total = total\n if self.disable:\n return\n self.last_print_n = 0\n self.last_print_t = self.start_t = self._time()\n self._ema_dn = EMA(self.smoothing)\n self._ema_dt = EMA(self.smoothing)\n self._ema_miniters = EMA(self.smoothing)\n self.refresh()\n\n def set_description(self, desc=None, refresh=True):\n \"\"\"\n Set/modify description of the progress bar.\n\n Parameters\n ----------\n desc : str, optional\n refresh : bool, optional\n Forces refresh [default: True].\n \"\"\"\n self.desc = desc + ': ' if desc else ''\n if refresh:\n self.refresh()\n\n def set_description_str(self, desc=None, refresh=True):\n \"\"\"Set/modify description without ': ' appended.\"\"\"\n self.desc = desc or ''\n if refresh:\n self.refresh()\n\n def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):\n \"\"\"\n Set/modify postfix (additional stats)\n with automatic formatting based on datatype.\n\n Parameters\n ----------\n ordered_dict : dict or OrderedDict, optional\n refresh : bool, optional\n Forces refresh [default: True].\n kwargs : dict, optional\n \"\"\"\n # Sort in alphabetical order to be more deterministic\n postfix = OrderedDict([] if ordered_dict is None else ordered_dict)\n for key in sorted(kwargs.keys()):\n postfix[key] = kwargs[key]\n # Preprocess stats according to datatype\n for key in postfix.keys():\n # Number: limit the length of the string\n if isinstance(postfix[key], Number):\n postfix[key] = self.format_num(postfix[key])\n # Else for any other type, try to get the string conversion\n elif not isinstance(postfix[key], str):\n postfix[key] = str(postfix[key])\n # Else if it's a string, don't need to preprocess anything\n # Stitch together to get the final postfix\n self.postfix = ', '.join(key + '=' + postfix[key].strip()\n for key in postfix.keys())\n if refresh:\n self.refresh()\n\n def set_postfix_str(self, s='', refresh=True):\n \"\"\"\n Postfix without dictionary expansion, similar to prefix handling.\n \"\"\"\n self.postfix = str(s)\n if refresh:\n self.refresh()\n\n def moveto(self, n):\n # TODO: private method\n self.fp.write('\\n' * n + _term_move_up() * -n)\n getattr(self.fp, 'flush', lambda: None)()\n\n @property\n def format_dict(self):\n \"\"\"Public API for read-only member access.\"\"\"\n if self.disable and not hasattr(self, 'unit'):\n return defaultdict(lambda: None, {\n 'n': self.n, 'total': self.total, 'elapsed': 0, 'unit': 'it'})\n if self.dynamic_ncols:\n self.ncols, self.nrows = self.dynamic_ncols(self.fp)\n return {\n 'n': self.n, 'total': self.total,\n 'elapsed': self._time() - self.start_t if hasattr(self, 'start_t') else 0,\n 'ncols': self.ncols, 'nrows': self.nrows, 'prefix': self.desc,\n 'ascii': self.ascii, 'unit': self.unit, 'unit_scale': self.unit_scale,\n 'rate': self._ema_dn() / self._ema_dt() if self._ema_dt() else None,\n 'bar_format': self.bar_format, 'postfix': self.postfix,\n 'unit_divisor': self.unit_divisor, 'initial': self.initial,\n 'colour': self.colour}\n\n def display(self, msg=None, pos=None):\n \"\"\"\n Use `self.sp` to display `msg` in the specified `pos`.\n\n Consider overloading this function when inheriting to use e.g.:\n `self.some_frontend(**self.format_dict)` instead of `self.sp`.\n\n Parameters\n ----------\n msg : str, optional. What to display (default: `repr(self)`).\n pos : int, optional. Position to `moveto`\n (default: `abs(self.pos)`).\n \"\"\"\n if pos is None:\n pos = abs(self.pos)\n\n nrows = self.nrows or 20\n if pos >= nrows - 1:\n if pos >= nrows:\n return False\n if msg or msg is None: # override at `nrows - 1`\n msg = \" ... (more hidden) ...\"\n\n if not hasattr(self, \"sp\"):\n raise TqdmDeprecationWarning(\n \"Please use `tqdm.gui.tqdm(...)`\"\n \" instead of `tqdm(..., gui=True)`\\n\",\n fp_write=getattr(self.fp, 'write', sys.stderr.write))\n\n if pos:\n self.moveto(pos)\n self.sp(self.__str__() if msg is None else msg)\n if pos:\n self.moveto(-pos)\n return True\n\n @classmethod\n @contextmanager\n def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs):\n \"\"\"\n stream : file-like object.\n method : str, \"read\" or \"write\". The result of `read()` and\n the first argument of `write()` should have a `len()`.\n\n >>> with tqdm.wrapattr(file_obj, \"read\", total=file_obj.size) as fobj:\n ... while True:\n ... chunk = fobj.read(chunk_size)\n ... if not chunk:\n ... break\n \"\"\"\n with cls(total=total, **tqdm_kwargs) as t:\n if bytes:\n t.unit = \"B\"\n t.unit_scale = True\n t.unit_divisor = 1024\n yield CallbackIOWrapper(t.update, stream, method)" }, { "identifier": "__version__", "path": "venv/Lib/site-packages/tqdm/version.py", "snippet": "" } ]
import logging import re import sys from ast import literal_eval as numeric from .std import TqdmKeyError, TqdmTypeError, tqdm from .version import __version__ from importlib import resources from os import path from shutil import copyfile
13,763
fout : binary file with `write` (and optionally `flush`) methods. callback : function(float), e.g.: `tqdm.update` callback_len : If (default: True) do `callback(len(buffer))`. Otherwise, do `callback(data) for data in buffer.split(delim)`. """ fp_write = fout.write if not delim: while True: tmp = fin.read(buf_size) # flush at EOF if not tmp: getattr(fout, 'flush', lambda: None)() return fp_write(tmp) callback(len(tmp)) # return buf = b'' len_delim = len(delim) # n = 0 while True: tmp = fin.read(buf_size) # flush at EOF if not tmp: if buf: fp_write(buf) if callback_len: # n += 1 + buf.count(delim) callback(1 + buf.count(delim)) else: for i in buf.split(delim): callback(i) getattr(fout, 'flush', lambda: None)() return # n while True: i = tmp.find(delim) if i < 0: buf += tmp break fp_write(buf + tmp[:i + len(delim)]) # n += 1 callback(1 if callback_len else (buf + tmp[:i])) buf = b'' tmp = tmp[i + len_delim:] # ((opt, type), ... ) RE_OPTS = re.compile(r'\n {4}(\S+)\s{2,}:\s*([^,]+)') # better split method assuming no positional args RE_SHLEX = re.compile(r'\s*(?<!\S)--?([^\s=]+)(\s+|=|$)') # TODO: add custom support for some of the following? UNSUPPORTED_OPTS = ('iterable', 'gui', 'out', 'file') # The 8 leading spaces are required for consistency CLI_EXTRA_DOC = r""" Extra CLI Options ----------------- name : type, optional TODO: find out why this is needed. delim : chr, optional Delimiting character [default: '\n']. Use '\0' for null. N.B.: on Windows systems, Python converts '\n' to '\r\n'. buf_size : int, optional String buffer size in bytes [default: 256] used when `delim` is specified. bytes : bool, optional If true, will count bytes, ignore `delim`, and default `unit_scale` to True, `unit_divisor` to 1024, and `unit` to 'B'. tee : bool, optional If true, passes `stdin` to both `stderr` and `stdout`. update : bool, optional If true, will treat input as newly elapsed iterations, i.e. numbers to pass to `update()`. Note that this is slow (~2e5 it/s) since every input must be decoded as a number. update_to : bool, optional If true, will treat input as total elapsed iterations, i.e. numbers to assign to `self.n`. Note that this is slow (~2e5 it/s) since every input must be decoded as a number. null : bool, optional If true, will discard input (no stdout). manpath : str, optional Directory in which to install tqdm man pages. comppath : str, optional Directory in which to place tqdm completion. log : str, optional CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET. """ def main(fp=sys.stderr, argv=None): """ Parameters (internal use only) --------- fp : file-like object for tqdm argv : list (default: sys.argv[1:]) """ if argv is None: argv = sys.argv[1:] try: log_idx = argv.index('--log') except ValueError: for i in argv: if i.startswith('--log='): logLevel = i[len('--log='):] break else: logLevel = 'INFO' else: # argv.pop(log_idx) # logLevel = argv.pop(log_idx) logLevel = argv[log_idx + 1] logging.basicConfig(level=getattr(logging, logLevel), format="%(levelname)s:%(module)s:%(lineno)d:%(message)s")
""" Module version for monitoring CLI pipes (`... | python -m tqdm | ...`). """ __all__ = ["main"] log = logging.getLogger(__name__) def cast(val, typ): log.debug((val, typ)) if " or " in typ: for t in typ.split(" or "): try: return cast(val, t) except TqdmTypeError: pass raise TqdmTypeError(val + ' : ' + typ) # sys.stderr.write('\ndebug | `val:type`: `' + val + ':' + typ + '`.\n') if typ == 'bool': if (val == 'True') or (val == ''): return True elif val == 'False': return False else: raise TqdmTypeError(val + ' : ' + typ) try: return eval(typ + '("' + val + '")') except Exception: if typ == 'chr': return chr(ord(eval('"' + val + '"'))).encode() else: raise TqdmTypeError(val + ' : ' + typ) def posix_pipe(fin, fout, delim=b'\\n', buf_size=256, callback=lambda float: None, callback_len=True): """ Params ------ fin : binary file with `read(buf_size : int)` method fout : binary file with `write` (and optionally `flush`) methods. callback : function(float), e.g.: `tqdm.update` callback_len : If (default: True) do `callback(len(buffer))`. Otherwise, do `callback(data) for data in buffer.split(delim)`. """ fp_write = fout.write if not delim: while True: tmp = fin.read(buf_size) # flush at EOF if not tmp: getattr(fout, 'flush', lambda: None)() return fp_write(tmp) callback(len(tmp)) # return buf = b'' len_delim = len(delim) # n = 0 while True: tmp = fin.read(buf_size) # flush at EOF if not tmp: if buf: fp_write(buf) if callback_len: # n += 1 + buf.count(delim) callback(1 + buf.count(delim)) else: for i in buf.split(delim): callback(i) getattr(fout, 'flush', lambda: None)() return # n while True: i = tmp.find(delim) if i < 0: buf += tmp break fp_write(buf + tmp[:i + len(delim)]) # n += 1 callback(1 if callback_len else (buf + tmp[:i])) buf = b'' tmp = tmp[i + len_delim:] # ((opt, type), ... ) RE_OPTS = re.compile(r'\n {4}(\S+)\s{2,}:\s*([^,]+)') # better split method assuming no positional args RE_SHLEX = re.compile(r'\s*(?<!\S)--?([^\s=]+)(\s+|=|$)') # TODO: add custom support for some of the following? UNSUPPORTED_OPTS = ('iterable', 'gui', 'out', 'file') # The 8 leading spaces are required for consistency CLI_EXTRA_DOC = r""" Extra CLI Options ----------------- name : type, optional TODO: find out why this is needed. delim : chr, optional Delimiting character [default: '\n']. Use '\0' for null. N.B.: on Windows systems, Python converts '\n' to '\r\n'. buf_size : int, optional String buffer size in bytes [default: 256] used when `delim` is specified. bytes : bool, optional If true, will count bytes, ignore `delim`, and default `unit_scale` to True, `unit_divisor` to 1024, and `unit` to 'B'. tee : bool, optional If true, passes `stdin` to both `stderr` and `stdout`. update : bool, optional If true, will treat input as newly elapsed iterations, i.e. numbers to pass to `update()`. Note that this is slow (~2e5 it/s) since every input must be decoded as a number. update_to : bool, optional If true, will treat input as total elapsed iterations, i.e. numbers to assign to `self.n`. Note that this is slow (~2e5 it/s) since every input must be decoded as a number. null : bool, optional If true, will discard input (no stdout). manpath : str, optional Directory in which to install tqdm man pages. comppath : str, optional Directory in which to place tqdm completion. log : str, optional CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET. """ def main(fp=sys.stderr, argv=None): """ Parameters (internal use only) --------- fp : file-like object for tqdm argv : list (default: sys.argv[1:]) """ if argv is None: argv = sys.argv[1:] try: log_idx = argv.index('--log') except ValueError: for i in argv: if i.startswith('--log='): logLevel = i[len('--log='):] break else: logLevel = 'INFO' else: # argv.pop(log_idx) # logLevel = argv.pop(log_idx) logLevel = argv[log_idx + 1] logging.basicConfig(level=getattr(logging, logLevel), format="%(levelname)s:%(module)s:%(lineno)d:%(message)s")
d = tqdm.__doc__ + CLI_EXTRA_DOC
2
2023-12-24 15:46:18+00:00
16k
pkariz/grin-explorer
backend/api/views.py
[ { "identifier": "fetch_and_store_block", "path": "backend/api/bootstrap.py", "snippet": "def fetch_and_store_block(blockchain, block_height, prefetch=True):\n # initialize node api\n node_api = NodeV2API(blockchain.node)\n if block_height < 0:\n # no such block height\n raise NodeBlockNotFoundException()\n if prefetch:\n block_data = get_prefetched_header_and_block_data(blockchain.node, block_height)\n else:\n block_data = node_api.get_block(height=block_height)\n header_data = block_data['header']\n timestamp = parse_datetime(header_data['timestamp'])\n hash = header_data['hash']\n # create header instance\n cuckoo_solution = ','.join(map(str, header_data['cuckoo_solution']))\n with transaction.atomic():\n header, header_created = BlockHeader.objects.get_or_create(\n blockchain=blockchain,\n cuckoo_solution=cuckoo_solution,\n kernel_root=header_data['kernel_root'],\n defaults={\n 'version': header_data['version'],\n 'output_root': header_data['output_root'],\n 'range_proof_root': header_data['range_proof_root'],\n 'kernel_mmr_size': header_data['kernel_mmr_size'],\n 'output_mmr_size': header_data['output_mmr_size'],\n 'nonce': str(header_data['nonce']),\n 'edge_bits': header_data['edge_bits'],\n 'secondary_scaling': header_data['secondary_scaling'],\n 'total_difficulty': header_data['total_difficulty'],\n 'total_kernel_offset': header_data['total_kernel_offset'],\n }\n )\n # create block instance\n try:\n block, block_created = Block.objects.get_or_create(\n blockchain=blockchain,\n hash=hash,\n height=block_height,\n timestamp=timestamp,\n header=header,\n prev_hash=block_data['header']['previous'],\n reorg=None,\n nr_inputs=len(block_data['inputs']),\n nr_outputs=len(block_data['outputs']),\n nr_kernels=len(block_data['kernels']),\n )\n except IntegrityError as e:\n # race condition so it's a duplicate. We can skip creation process\n # and just return the block that we already have\n return Block.objects.get(blockchain=blockchain, hash=hash)\n\n if not block_created:\n # we have already fetched all the data since it's done in an atomic\n # transaction, so skip unnecessary work\n return block\n\n # bulk create kernels\n kernels = []\n for kernel_data in block_data['kernels']:\n kernels.append(\n Kernel(\n block=block,\n features=kernel_data['features'],\n fee=kernel_data['fee'],\n fee_shift=kernel_data['fee_shift'],\n lock_height=kernel_data['lock_height'],\n excess=kernel_data['excess'],\n excess_sig=kernel_data['excess_sig'],\n )\n )\n Kernel.objects.bulk_create(kernels)\n\n inputs = []\n # create input instances\n outputs_data = Output.objects\\\n .filter(\n commitment__in=block_data['inputs'],\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\\\n .values('id', 'commitment')\n outputs_mapper = { output_data['commitment'] : output_data['id'] for output_data in outputs_data }\n for input_data in block_data['inputs']:\n inputs.append(\n Input(\n block=block,\n commitment=input_data,\n output_id=outputs_mapper.get(input_data),\n )\n )\n Input.objects.bulk_create(inputs)\n # mark the corresponding outputs as spent, but only on the main chain so\n # that we don't corrupt the reorged data\n Output.objects.filter(pk__in=outputs_mapper.values()).update(spent=True)\n\n # create output instances\n outputs = []\n inputs = Input.objects\\\n .filter(\n commitment__in=list(map(lambda x: x['commit'], block_data['outputs'])),\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\n inputs_mapper = { input.commitment : input for input in inputs }\n for output_data in block_data['outputs']:\n outputs.append(\n Output(\n block=block,\n output_type=output_data['output_type'],\n commitment=output_data['commit'],\n spent=output_data['spent'],\n proof=output_data['proof'],\n proof_hash=output_data['proof_hash'],\n merkle_proof=output_data['merkle_proof'],\n mmr_index=output_data['mmr_index'],\n )\n )\n outputs = Output.objects.bulk_create(outputs)\n # link inputs to created outputs, but only on the main chain so that we\n # don't corrupt the reorged data\n fixed_inputs = []\n for output in outputs:\n matching_input = inputs_mapper.get(output.commitment)\n if matching_input:\n matching_input.output = output\n fixed_inputs.append(matching_input)\n Input.objects.bulk_update(fixed_inputs, ['output'])\n return block" }, { "identifier": "update_blockchain_progress", "path": "backend/api/bootstrap.py", "snippet": "def update_blockchain_progress(blockchain):\n try:\n start_height, end_height = blockchain.get_bootstrap_heights()\n except Exception as e:\n logger.warning(\n 'Failed to get bootstrap heights',\n extra={ 'blockchain': blockchain.slug },\n )\n raise UpdateBlockchainProgressError(blockchain.slug)\n expected_heights = set(range(start_height, end_height + 1))\n existing_heights = set(list(\n blockchain.blocks\\\n .filter(reorg__isnull=True)\\\n .values_list('height', flat=True)\n ))\n missing_heights = expected_heights - existing_heights\n update_load_progress(\n blockchain, \n len(missing_heights),\n end_height - start_height + 1,\n 1,\n 1,\n 2,\n verbose=True\n )" }, { "identifier": "UpdateBlockchainProgressError", "path": "backend/api/exceptions.py", "snippet": "class UpdateBlockchainProgressError(Exception):\n pass" }, { "identifier": "get_filter_backends", "path": "backend/api/helpers.py", "snippet": "def get_filter_backends(replacements):\n \"\"\"\n Returns a tuple of filter backends where default ones, from DefaultMixin,\n are replaced with the given replacements.\n\n Args:\n replacements: dict where key is an existing filter backend class's\n __name__ and value is its replacement filter backend class\n \"\"\"\n current_filters = DefaultMixin.filter_backends\n return tuple([\n filter if filter.__name__ not in replacements else replacements[filter.__name__]\n for filter in list(current_filters)\n ])" }, { "identifier": "load_data_from_redis", "path": "backend/api/helpers.py", "snippet": "def load_data_from_redis(redis_key):\n r = redis.Redis(host='redis')\n data = r.get(redis_key)\n if data is None:\n return\n return json.loads(data)" }, { "identifier": "BlockFilter", "path": "backend/api/filters.py", "snippet": "class BlockFilter(filters.FilterSet):\n class Meta:\n model = Block\n fields = ('blockchain', 'height', 'hash')" }, { "identifier": "CustomBlockSearchFilter", "path": "backend/api/filters.py", "snippet": "class CustomBlockSearchFilter(DRFfilters.SearchFilter):\n \"\"\"\n Alongside the given search_fields this filter filters also by:\n - keyword 'reorgs' --> return only blocks where reorgs happened\n - ['inputs', 'outputs', 'kernels'] ['=', '<', '>', '<=', '>='] [value] -->\n return only blocks matching this computation, eg: 'inputs > 2'\n You cannot combine different types of search (eg. 'reorgs' + 'computation')\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n queryset = super().filter_queryset(request, queryset, view)\n blockchain_slug = view.kwargs['blockchain_slug']\n original_search_terms = self.get_search_terms(request)\n search_terms = self._get_normalized_search_terms(original_search_terms)\n if len(search_terms) == 0:\n # searches:\n # - height --> add filter reorg=None\n # - hash --> nothing to add\n # - outputhash --> add filter reorg=None\n # - block-detail --> nothing to add\n # - block-list --> add filter reorg=None\n if len(original_search_terms) > 1:\n raise APIException('Too many standard search terms')\n if not original_search_terms:\n # it's either an unfiltered block-list or block-detail\n if view.action == 'list':\n queryset = queryset.filter(reorg=None)\n else:\n # there's only 1 original search term, figure out which one\n if len(original_search_terms[0]) != 64:\n # it's not block hash but either block height or output hash\n # in both cases we need to filter out reorgs\n queryset = queryset.filter(reorg=None)\n return queryset\n searched_types = set(map(lambda x: x['type'], search_terms))\n if len(searched_types) > 1:\n raise APIException('Cannot combine different types of searches')\n if searched_types == { 'reorgs' }:\n return self._get_reorgs_qs(blockchain_slug)\n elif searched_types == { 'computation' }:\n return self._get_computations_qs(search_terms, blockchain_slug)\n elif searched_types == { 'hash' }:\n return self._get_hash_qs(search_terms[0]['value'], blockchain_slug, queryset)\n elif searched_types == { 'height' }:\n return self._get_height_qs(search_terms[0]['value'], blockchain_slug)\n elif searched_types == { 'kernel_or_output' }:\n return self._get_kernel_or_output_qs(\n search_terms[0]['value'], blockchain_slug)\n else:\n logger.exception(\n 'Invalid search terms',\n exc_info=e,\n extra={'search_terms': search_terms}\n )\n raise APIException('Invalid search terms')\n\n def _get_normalized_search_terms(self, search_terms):\n \"\"\"\n Search terms of format ['outputs>1'] are not supported. Instead, the\n operators should be surrounded by spaces, eg. ['outputs', '>', '1'].\n Supported operators are ['=', '>', '<', '<=', '>=']\n \"\"\"\n supported_operators = ['=', '>', '<', '<=', '>=']\n normalized_terms = []\n i = 0\n while i <= len(search_terms) - 1:\n if isinstance(search_terms[i], str) and search_terms[i].lower() in ['inputs', 'outputs', 'kernels']:\n operator = search_terms[i+1]\n if operator not in supported_operators:\n raise APIException('Invalid search operator')\n value = int(search_terms[i+2])\n if value < 0:\n raise APIException('Invalid search computation')\n normalized_terms.append({\n 'type': 'computation',\n 'source': search_terms[i],\n 'op': operator,\n 'value': value,\n })\n i += 3\n elif isinstance(search_terms[i], str) and search_terms[i].lower() == 'reorgs':\n normalized_terms.append({ 'type': 'reorgs' })\n i += 1\n elif len(search_terms[i]) == 64:\n # hash\n normalized_terms.append({\n 'type': 'hash',\n 'value': search_terms[i],\n })\n i += 1\n elif len(search_terms[i]) == 66:\n # kernel excess or output commitment\n normalized_terms.append({\n 'type': 'kernel_or_output',\n 'value': search_terms[i],\n })\n i += 1\n else:\n try:\n value = int(search_terms[i])\n except ValueError:\n value = None\n if value >= 0:\n normalized_terms.append({\n 'type': 'height',\n 'value': value,\n })\n i += 1\n else:\n # term which is not for this custom search, eg. block hash\n i += 1\n return normalized_terms\n\n def _get_reorgs_qs(self, blockchain_slug):\n # NOTE: we first filter, then calculate reorg_len on filtered data and\n # then filter on annotated data that we've calculated\n reorg_heights = list(Reorg.objects\\\n .select_related('start_main_block')\\\n .filter(\n blockchain__slug=blockchain_slug,\n start_main_block__reorg=None,\n )\\\n .annotate(reorg_len=F('end_reorg_block__height') - F('start_reorg_block__height') + 1)\\\n .filter(reorg_len__gte=settings.MIN_REORG_LEN)\\\n .values_list('start_main_block__height', flat=True)\n )\n queryset = Block.objects\\\n .filter(\n blockchain__slug=blockchain_slug,\n reorg=None,\n height__in=reorg_heights,\n )\\\n .order_by('-height')\n return queryset\n\n def _get_hash_qs(self, hash, blockchain_slug, queryset):\n return queryset.filter(\n blockchain__slug=blockchain_slug,\n hash=hash,\n )\n\n def _get_height_qs(self, height, blockchain_slug):\n return Block.objects.filter(\n blockchain__slug=blockchain_slug,\n height=height,\n )\n\n def _get_kernel_or_output_qs(self, kernel_or_output, blockchain_slug):\n kernel = Kernel.objects.filter(\n excess=kernel_or_output,\n block__blockchain__slug=blockchain_slug,\n ).first()\n if kernel:\n return Block.objects.filter(hash=kernel.block.hash)\n output = Output.objects.filter(\n commitment=kernel_or_output,\n block__blockchain__slug=blockchain_slug,\n ).first()\n if output:\n return Block.objects.filter(hash=output.block.hash)\n return Block.objects.none()\n\n def _get_computations_qs(self, search_terms, blockchain_slug):\n operator_mapping = {\n '=': '',\n '>': '__gt',\n '<': '__lt',\n '<=': '__lte',\n '>=': '__gte',\n }\n possible_sources = ['inputs', 'outputs', 'kernels']\n searched_sources = set(map(lambda x: x['source'], search_terms))\n op_searched_types = set(possible_sources) & set(searched_sources)\n op_qs = Blockchain.objects.get(slug=blockchain_slug).blocks.all()\n for search_term in search_terms:\n filters = {\n 'blockchain__slug': blockchain_slug,\n 'reorg': None,\n }\n op_map = operator_mapping[search_term['op']]\n filters[f'nr_{search_term[\"source\"]}{op_map}'] = search_term['value']\n op_qs = op_qs.filter(**filters).order_by('-height')\n return op_qs" }, { "identifier": "NodeFilter", "path": "backend/api/filters.py", "snippet": "class NodeFilter(filters.FilterSet):\n class Meta:\n model = Node\n fields = ('name', 'slug', 'archive')" }, { "identifier": "NodeGroupFilter", "path": "backend/api/filters.py", "snippet": "class NodeGroupFilter(filters.FilterSet):\n class Meta:\n model = NodeGroup\n fields = ('name', 'slug')" }, { "identifier": "CustomModelViewSet", "path": "backend/api/mixins.py", "snippet": "class CustomModelViewSet(\n DefaultMixin,\n viewsets.ModelViewSet\n):\n \"\"\"Default viewset for models.\"\"\"\n pass" }, { "identifier": "Blockchain", "path": "backend/api/models.py", "snippet": "class Blockchain(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n # testnet, mainnet etc\n name = models.CharField(max_length=255, unique=True)\n # slug of the name, we use it in url\n slug = models.SlugField(max_length=255, unique=True)\n # node from which the data is fetched\n node = models.ForeignKey(\n Node, related_name='blockchains', on_delete=models.PROTECT)\n # the default blockchain will be picked on the gui by default\n default = models.BooleanField(default=False)\n # if fetch_price is False then the shown price will always be 0.\n # Testnets and localnets should have this set to false.\n fetch_price = models.BooleanField(default=True)\n # load_progress shows current % of loaded blocks. If archive is True then\n # load_progress will represent % of missing all blocks, otherwise % of\n # missing blocks from the latest 1440 blocks\n load_progress = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n default=0.0,\n validators=[MinValueValidator(0), MaxValueValidator(100)]\n )\n\n def __str__(self):\n return f'{self.name} - {self.load_progress} [Node<{self.node}>]'\n\n def bootstrap(self, skip_reorg_check=False):\n # import here to avoid cyclic import\n from .bootstrap import load_blocks\n\n start_height, end_height = self.get_bootstrap_heights()\n load_blocks(self, start_height, end_height, skip_reorg_check)\n\n def get_tip_height(self):\n node_api = NodeV2API(self.node)\n try:\n end_block = node_api.get_tip()['height']\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get node tip')\n raise e\n return end_block\n\n def get_progress_decimal_places(self):\n if self.node.archive:\n return 2\n return 0\n\n def get_bootstrap_heights(self):\n node_api = NodeV2API(self.node)\n end_height = self.get_tip_height()\n try:\n start_height = node_api.get_blocks(0, end_height, 1, False)['blocks'][0]['header']['height']\n except IndexError:\n raise Exception('Node has no blocks.')\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get first block height')\n raise e\n return start_height, end_height\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n if self.default:\n # set other blockchain.default to False\n other_blockchains = Blockchain.objects.all()\n if self.pk:\n other_blockchains = other_blockchains.exclude(pk=self.pk)\n other_blockchains.update(default=False)\n # blockchain doesn't change much so this call doesn't hurt\n old_instance = Blockchain.objects.get(pk=self.pk) if self.pk else None\n res = super().save(*args, **kwargs)\n if old_instance and self.load_progress != old_instance.load_progress:\n # load progress changed, send info\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'blockchain_progress_changed',\n 'message': {\n 'slug': self.slug,\n # convert to float since Decimal is not serializable\n 'load_progress': float(self.load_progress),\n },\n }\n )\n return res\n\n def full_print(self):\n \"\"\"Used for developing and debugging.\"\"\"\n print('MAIN CHAIN:')\n for block in self.blocks.filter(reorg=None).order_by('height'):\n print(' --> ' + block.hash)\n for reorg in Reorg.objects.filter(blockchain=self):\n print('REORG:')\n for block in Block.objects.filter(reorg=reorg).order_by('height'):\n print(' --> ' + block.hash)\n print('------------------------------------------------------')\n\n def reset(self):\n \"\"\"Used for developing and debugging.\"\"\"\n from .models import Block, BlockHeader, Input, Output, Kernel, DramatiqTask, Reorg\n from django.contrib.contenttypes.models import ContentType\n from decimal import Decimal\n\n Input.objects.filter(block__blockchain=self).delete()\n Output.objects.filter(block__blockchain=self).delete()\n Kernel.objects.filter(block__blockchain=self).delete()\n self.reorgs.all().delete()\n\n content_type = ContentType.objects.get_for_model(self)\n DramatiqTask.objects.filter(\n content_type=content_type,\n object_id=self.id,\n ).delete()\n # removing header will also remove the block\n BlockHeader.objects.filter(block__blockchain=self).delete()\n self.load_progress = Decimal('0')\n self.save()" }, { "identifier": "Block", "path": "backend/api/models.py", "snippet": "class Block(TimeStampedModel):\n blockchain = models.ForeignKey(\n Blockchain, related_name='blocks', on_delete=models.CASCADE)\n hash = models.CharField(\n primary_key=True,\n max_length=64,\n validators=[MinLengthValidator(64)],\n db_index=True,\n )\n height = models.PositiveIntegerField(db_index=True)\n timestamp = models.DateTimeField(db_index=True)\n header = models.ForeignKey(\n 'BlockHeader', related_name='block', on_delete=models.CASCADE)\n prev_hash = models.CharField(\n max_length=64,\n null=True,\n blank=True,\n validators=[MinLengthValidator(64)],\n )\n nr_inputs = models.PositiveIntegerField(default=0)\n nr_outputs = models.PositiveIntegerField(default=0)\n nr_kernels = models.PositiveIntegerField(default=0)\n # when reorg is set it means this block is part of a reorg and not the main\n # chain\n reorg = models.ForeignKey(\n 'Reorg', null=True, related_name='blocks', on_delete=models.CASCADE)\n\n def __str__(self):\n suffix = ''\n if self.reorg:\n suffix = ' Reorged: {}'.format(self.reorg.id)\n return '{}: {} (prev: {})'.format(\n self.height, self.hash, self.prev_hash)\n\n def get_next_block(self):\n return Block.objects.filter(prev_hash=self.hash).first()\n\n def get_previous_block(self):\n return Block.objects.filter(hash=self.prev_hash).first()\n\n def full_print(self, prefix=''):\n \"\"\"Used for developing and debugging.\"\"\"\n print('---------------------------------------------------------------')\n print(f'{prefix}Block {self.height}: {self.hash}, reorg: {self.reorg}')\n print(f'{prefix} INPUTS:')\n for input in self.inputs.all():\n print(f'{prefix} {input}, output: {input.output}')\n print(f'{prefix} OUTPUTS:')\n for output in self.outputs.all():\n print(f'{prefix} {output}')\n print(f'{prefix} KERNELS:')\n for kernel in self.kernels.all():\n print(f'{prefix} {kernel}')\n print('---------------------------------------------------------------')" }, { "identifier": "Reorg", "path": "backend/api/models.py", "snippet": "class Reorg(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n blockchain = models.ForeignKey(\n Blockchain, related_name='reorgs', on_delete=models.CASCADE)\n # start_reorg_block and end_reorg_block define starting and ending block,\n # which were reorged\n start_reorg_block = models.ForeignKey(\n Block, related_name='start_reorgs', on_delete=models.CASCADE)\n end_reorg_block = models.ForeignKey(\n Block, related_name='end_reorgs', on_delete=models.CASCADE)\n # start_main_block defines starting block which is the new start of the main\n # chain - the block that replaced start_reorg_block. We usually don't know\n # which the ending block is when we spot the reorg, so we don't store it\n # (we don't even have it in DB at that time yet since we usually get them\n # incrementally in the order they're accepted).\n start_main_block = models.ForeignKey(\n Block, related_name='start_mains', on_delete=models.CASCADE)\n\n def __str__(self):\n return '{}: start: {}, end: {}'.format(\n self.blockchain.slug, self.start_reorg_block, self.end_reorg_block)" }, { "identifier": "Node", "path": "backend/api/models.py", "snippet": "class Node(TimeStampedModel):\n \"\"\"Node on the network. Currently it only supports grin-rust.\"\"\"\n id = models.BigAutoField(primary_key=True)\n # name can be whatever\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n group = models.ForeignKey(\n NodeGroup, related_name='nodes', on_delete=models.PROTECT)\n # foreign api url of the grin-rust node\n api_url = models.URLField()\n # username of the grin-rust node\n api_username = models.CharField(max_length=255)\n # foreign api secret of the grin-rust node\n api_password = models.CharField(max_length=255)\n # if archive is true then we fetch every block when we bootstrap, otherwise\n # we fetch only latest 1440 blocks (1 day)\n archive = models.BooleanField(default=False)\n\n def __str__(self):\n repr = f'{self.name}'\n if self.archive:\n repr += ' (archive)'\n return repr\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n return super().save(*args, **kwargs)\n\n def is_reachable(self):\n try:\n NodeV2API(self).get_tip()\n return True\n except (\n RequestsConnectionError,\n RequestsTimeout,\n RequestsHTTPError,\n RequestsReadTimeout\n ):\n logger.exception('Node unreachable', extra={'node': self.slug})\n return False" }, { "identifier": "NodeGroup", "path": "backend/api/models.py", "snippet": "class NodeGroup(models.Model):\n \"\"\"\n NodeGroup represents a group of nodes. These nodes should be on the same\n network.:\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n # name is probably mainnet, testnet or smth similar\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n self.full_clean()\n return super().save(*args, **kwargs)" }, { "identifier": "DramatiqTask", "path": "backend/api/models.py", "snippet": "class DramatiqTask(TimeStampedModel):\n \"\"\"We store task's message_id so that we can abort the task.\"\"\"\n\n class Type(models.TextChoices):\n BOOTSTRAP = 'bootstrap', 'Bootstrap'\n BLOCKCHAIN_DELETE = 'blockchain_delete', 'Blockchain delete'\n\n class Status(models.TextChoices):\n # NOTE: IN_PROGRESS doesn't really mean it's already in progress, just\n # that it has been sent\n IN_PROGRESS = 'in_progress', 'In progress'\n SKIPPED = 'skipped', 'Skipped'\n SUCCESS = 'success', 'Success'\n FAILURE = 'failure', 'Failure'\n\n id = models.BigAutoField(primary_key=True)\n message_id = models.CharField(max_length=255, unique=True)\n # type tells us what this task is doing, eg. 'bootstrap'\n type = models.CharField(max_length=255, choices=Type.choices)\n status = models.CharField(max_length=255, choices=Status.choices)\n # failure_reason should be short and concise\n failure_reason = models.TextField(null=True, default=None)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n def save(self, *args, **kwargs):\n from .serializers import DramatiqTaskSerializer\n old_instance = DramatiqTask.objects.get(pk=self.pk) if self.pk else None\n res = super().save(*args, **kwargs)\n if old_instance and self.status != old_instance.status:\n # status changed, send info\n print('sending task status update')\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'task_status_changed',\n 'message': DramatiqTaskSerializer(self).data,\n }\n )\n return res" }, { "identifier": "BlockchainSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockchainSerializer(serializers.ModelSerializer):\n node = serializers.PrimaryKeyRelatedField(queryset=Node.objects.all(), write_only=True)\n\n class Meta:\n model = Blockchain\n fields = ('name', 'slug', 'default', 'node', 'load_progress', 'fetch_price')" }, { "identifier": "BlockchainExtendedSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockchainExtendedSerializer(serializers.ModelSerializer):\n tasks = serializers.SerializerMethodField()\n\n class Meta:\n model = Blockchain\n fields = ('name', 'slug', 'node', 'default', 'load_progress', 'fetch_price', 'tasks')\n\n def to_representation(self, obj):\n self.fields['node'] = NodeSerializer()\n return super().to_representation(obj)\n\n def get_tasks(self, blockchain):\n content_type = ContentType.objects.get_for_model(blockchain)\n tasks = DramatiqTask.objects.filter(\n content_type=content_type,\n object_id=blockchain.id,\n )\n return DramatiqTaskSimpleSerializer(tasks, many=True).data" }, { "identifier": "BlockSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockSerializer(serializers.ModelSerializer):\n blockchain = BlockchainSerializer()\n header = BlockHeaderSerializer()\n starting_reorg_blocks = serializers.SerializerMethodField()\n\n class Meta:\n model = Block\n fields = (\n 'hash',\n 'height',\n 'timestamp',\n 'header',\n 'prev_hash',\n 'reorg',\n 'nr_kernels',\n 'nr_inputs',\n 'nr_outputs',\n 'blockchain',\n 'starting_reorg_blocks',\n )\n\n def get_starting_reorg_blocks(self, block):\n reorgs = Reorg.objects.filter(start_main_block=block)\n reorgs = list(filter(\n lambda reorg: reorg.end_reorg_block.height - \\\n reorg.start_reorg_block.height + 1 >= settings.MIN_REORG_LEN,\n reorgs\n ))\n return BlockSerializer(\n [reorg.start_reorg_block for reorg in reorgs], many=True).data" }, { "identifier": "BlockDetailSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockDetailSerializer(serializers.ModelSerializer):\n header = BlockHeaderSerializer()\n kernels = KernelSerializer(many=True)\n inputs = InputSerializer(many=True)\n outputs = OutputSerializer(many=True)\n blockchain = BlockchainSerializer()\n confirmations = serializers.SerializerMethodField()\n next_hash = serializers.SerializerMethodField()\n next_block_reorgs = serializers.SerializerMethodField()\n\n class Meta:\n model = Block\n fields = (\n 'hash',\n 'height',\n 'timestamp',\n 'header',\n 'prev_hash',\n 'kernels',\n 'inputs',\n 'outputs',\n 'blockchain',\n 'confirmations',\n 'next_hash',\n 'reorg',\n 'next_block_reorgs',\n )\n\n def get_confirmations(self, block):\n # in reorged blocks we show confirmations based on the reorged chain!\n tip_height = block.blockchain.blocks\\\n .filter(reorg=block.reorg)\\\n .order_by('-height')\\\n .first().height\n return tip_height - block.height + 1\n\n def get_next_hash(self, block):\n try:\n return Block.objects.get(\n blockchain=block.blockchain,\n reorg=block.reorg,\n prev_hash=block.hash\n ).hash\n except Block.DoesNotExist:\n return None\n\n def get_next_block_reorgs(self, block):\n from .serializers import ReorgSerializer\n reorgs = Reorg.objects.filter(start_main_block__prev_hash=block.hash)\n reorgs = list(filter(\n lambda reorg: reorg.end_reorg_block.height - \\\n reorg.start_reorg_block.height + 1 >= settings.MIN_REORG_LEN,\n reorgs\n ))\n return ReorgSerializer(reorgs, many=True).data" }, { "identifier": "NodeSerializer", "path": "backend/api/serializers.py", "snippet": "class NodeSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Node\n fields = '__all__'" }, { "identifier": "NodeGroupSerializer", "path": "backend/api/serializers.py", "snippet": "class NodeGroupSerializer(serializers.ModelSerializer):\n nodes = NodeSerializer(many=True, read_only=True)\n\n class Meta:\n model = NodeGroup\n fields = '__all__'" }, { "identifier": "DramatiqTaskSerializer", "path": "backend/api/serializers.py", "snippet": "class DramatiqTaskSerializer(serializers.ModelSerializer):\n content_object = serializers.SerializerMethodField()\n\n class Meta:\n model = DramatiqTask\n fields = (\n 'id',\n 'message_id',\n 'type',\n 'status',\n 'failure_reason',\n 'content_object',\n )\n\n def get_content_object(self, task):\n from .serializers import BlockchainSerializer\n serializer_mapper = {\n 'Blockchain': BlockchainSerializer,\n }\n klass = task.content_object.__class__\n return {\n 'model': klass._meta.model_name,\n 'data': serializer_mapper[klass.__name__](task.content_object).data,\n }" }, { "identifier": "bootstrap_blockchain", "path": "backend/api/tasks.py", "snippet": "@dramatiq.actor(max_retries=0, time_limit=float(\"inf\"))\ndef bootstrap_blockchain(blockchain_slug):\n # import here to avoid cyclic import\n from .models import Blockchain\n Blockchain.objects.get(slug=blockchain_slug).bootstrap()" }, { "identifier": "delete_blockchain", "path": "backend/api/tasks.py", "snippet": "@dramatiq.actor(max_retries=0, time_limit=float(\"inf\"))\ndef delete_blockchain(blockchain_slug):\n # import here to avoid cyclic import\n from .models import Blockchain\n Blockchain.objects.get(slug=blockchain_slug).delete()\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'blockchain_deleted',\n 'message': {\n 'slug': blockchain_slug,\n },\n }\n )" } ]
from asgiref.sync import async_to_sync from django.contrib.contenttypes.models import ContentType from django.db.models.deletion import ProtectedError from django.views.generic import TemplateView from django.views.decorators.cache import never_cache from dramatiq_abort import abort from rest_framework import status from rest_framework.exceptions import APIException from rest_framework.exceptions import NotFound from rest_framework.exceptions import ValidationError as DRFValidationError from rest_framework.decorators import action from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from slugify import slugify from .bootstrap import fetch_and_store_block, update_blockchain_progress from .exceptions import UpdateBlockchainProgressError from .helpers import get_filter_backends, load_data_from_redis from .filters import ( BlockFilter, CustomBlockSearchFilter, NodeFilter, NodeGroupFilter, ) from .mixins import CustomModelViewSet from .models import Blockchain, Block, Reorg, Node, NodeGroup, DramatiqTask from .serializers import ( BlockchainSerializer, BlockchainExtendedSerializer, BlockSerializer, BlockDetailSerializer, NodeSerializer, NodeGroupSerializer, DramatiqTaskSerializer, ) from .tasks import bootstrap_blockchain, delete_blockchain import channels import logging import pytz
10,998
# nothing to do, ignore the new block return Response(status=status.HTTP_404_NOT_FOUND) # get request data height = request.data['data']['header']['height'] hash = request.data['hash'] # prev_hash comes as list of int bytes, so we convert it to hex # NOTE: the same is true for some other data which we currently don't # need so we don't transform it, eg. data.header.kernel_root prev_hash = None if request.data['data']['header']['prev_hash']: prev_hash = bytes(request.data['data']['header']['prev_hash']).hex() logger.info( 'Block accepted', extra={ 'height': height, 'hash': hash, 'prev_hash': prev_hash, 'blockchain': blockchain.slug, }, ) web_socket_msg_type = 'send_block' # handle reorg case # we expect blocks to come ordered by height, there are some edge cases # here which are not handled, but they're unlikely to happen (eg. reorg # happens but websocket calls for first blocks fails while for later it # doesn't and then the code bellow wouldn't spot a reorg) block_at_this_height = blockchain.blocks\ .filter(height=height, reorg__isnull=True)\ .first() # we fetch here because anyone can call this view - we don't want to # work with fake data new_block = fetch_and_store_block(blockchain, height, prefetch=False) if block_at_this_height: if block_at_this_height.hash == new_block.hash: # probably have fetched this block while bootstraping, accepted # view got called a bit later so we already have it, noop return Response(status=status.HTTP_200_OK) logger.info( 'Block accepted - reorg spotted', extra={ 'block_at_this_height': block_at_this_height, 'block_at_this_height.hash': block_at_this_height.hash, 'block_at_this_height.reorg': block_at_this_height.reorg, 'hash': new_block.hash }, ) # reorg spotted reorged_blocks = list(blockchain.blocks\ .filter(height__gte=height, reorg__isnull=True) .exclude(pk=new_block.pk) .order_by('height')) logger.info('reorged_blocks at start: {}'.format(reorged_blocks)) # these reorged blocks are guaranteed to be reorged, now find any # previous blocks which were also reorged - aka get common # ancestor of the reorged block at 'height' and the new (main) block # find the common ancestor of this block and the reorged block at # the same height. We start with the current height to avoid more # logic for Reorg instance params if new_block.hash == block_at_this_height.hash: # at height X we got H1, then we got H2 (this call), but now it # reorged back to H1, so we don't do anything, no reorg is # stored since we didn't fetch the block in time from the node logger.info('Reorg cancelled out, noop') return Response(status=status.HTTP_200_OK) logger.info('new_block', extra={'hash': new_block.hash, 'prev_hash': new_block.prev_hash}) prev_block_new_chain = new_block prev_block_old_chain = reorged_blocks[0] logger.info('prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) # remove the first one since it will get added again reorged_blocks = reorged_blocks[1:] logger.info('reorged_blocks after [1:]: {}'.format(reorged_blocks)) main_blocks = [] while True: # theoretically we might be missing the block in db but we don't # cover such cases currently if not prev_block_new_chain: logger.info('reached break in IF NOT prev_block_new_chain') # this means that prev_block_old_chain is also None, since # they're both "previous" of their genesis block break if prev_block_new_chain == prev_block_old_chain: logger.info('reached break in IF NOT prev_block_new_chain == prev_block_old_chain') # found the common ancestor break # add to the left because we want to keep it sorted by height reorged_blocks.insert(0, prev_block_old_chain) main_blocks.insert(0, prev_block_new_chain) logger.info('new reorged_blocks: {}'.format(reorged_blocks)) logger.info('new main_blocks: {}'.format(main_blocks)) prev_block_new_chain = prev_block_new_chain.get_previous_block() prev_block_old_chain = prev_block_old_chain.get_previous_block() logger.info('new prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) logger.info('before reorg create: reorged_blocks: {}, main_blocks: {}'.format(reorged_blocks, main_blocks)) reorg = Reorg.objects.create( blockchain=blockchain, start_reorg_block=reorged_blocks[0], end_reorg_block=reorged_blocks[-1], start_main_block=main_blocks[0], ) # Reorg post_save signal fixes .reorg on new/old blocks and fixes # inputs/outputs web_socket_msg_type = 'reorged' web_socket_msg = BlockSerializer(new_block).data if web_socket_msg_type == 'reorged': web_socket_msg = blockchain.slug # TODO: check if channels-redis 4.x is fixed: https://github.com/django/channels_redis/issues/332 channel_layer = channels.layers.get_channel_layer() async_to_sync(channel_layer.group_send)( 'default_group', { 'type': web_socket_msg_type, 'message': web_socket_msg, } ) # update the loading progress since it could be skewed due to the # periodic task updating it before this view has been called try:
logger = logging.getLogger(__name__) # Serve Vue Application index_view = never_cache(TemplateView.as_view(template_name='index.html')) class NodeGroupViewSet(CustomModelViewSet): """API endpoint for NodeGroup.""" queryset = NodeGroup.objects.all() filterset_class = NodeGroupFilter serializer_class = NodeGroupSerializer lookup_field = 'slug' permission_classes = [IsAuthenticated] def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) return super().create(request, *args, **kwargs) def destroy(self, request, *args, **kwargs): try: return super().destroy(request, *args, **kwargs) except ProtectedError as e: raise DRFValidationError( detail='Node group is related to nodes, delete them first') class NodeViewSet(CustomModelViewSet): """API endpoint for Node.""" queryset = Node.objects.all() filterset_class = NodeFilter serializer_class = NodeSerializer # currently all node views require authentication permission_classes = [IsAuthenticated] lookup_field = 'slug' def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk return super().create(request, *args, **kwargs) def update(self, request, *args, **kwargs): # NOTE: super().partial_update calls update(..., partial=True) if not kwargs.get('partial'): # we don't allow full updates - aka PUT raise DRFPermissionDenied() return super().update(request, *args, **kwargs) def partial_update(self, request, slug=None): request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk return super().partial_update(request, slug=slug) @action(detail=True, methods=['get']) def reachable(self, request, slug=None): node = self.get_object() try: res = node.is_reachable() except Exception as e: logger.exception('Unreachable node') res = False return Response(res, status=status.HTTP_200_OK) def destroy(self, request, *args, **kwargs): try: return super().destroy(request, *args, **kwargs) except ProtectedError as e: raise DRFValidationError( detail='Node is related to blockchains, delete them first') class BlockchainViewSet(CustomModelViewSet): """API endpoint for Blockchain.""" queryset = Blockchain.objects.all() serializer_class = BlockchainSerializer lookup_field = 'slug' def get_serializer_class(self): # when authenticated we return also NodeSerializer data if self.request.user.is_authenticated: return BlockchainExtendedSerializer return BlockchainSerializer def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) request.data['node'] = request.data['node'] return super().create(request, *args, **kwargs) def destroy(self, request, slug=None): instance = self.get_object() message = delete_blockchain.send(instance.slug) task = DramatiqTask.objects.create( type=DramatiqTask.Type.BLOCKCHAIN_DELETE, status=DramatiqTask.Status.IN_PROGRESS, message_id=message.message_id, content_object=instance, ) return Response( DramatiqTaskSerializer(task).data, status=status.HTTP_200_OK) def _abort_previous_tasks(self, blockchain): conflicting_message_ids = DramatiqTask.objects.filter( status=DramatiqTask.Status.IN_PROGRESS, object_id=blockchain.id, content_type=ContentType.objects.get_for_model(blockchain) ).values_list('message_id', flat=True) # abort previous conflicting tasks if they exist for conflicting_message_id in conflicting_message_ids: abort(conflicting_message_id) @action(detail=True, methods=['post']) def bootstrap(self, request, slug=None): blockchain = self.get_object() if not blockchain.node.is_reachable: raise APIException(detail='Node is unreachable') self._abort_previous_tasks(blockchain) # create a new task message = bootstrap_blockchain.send(blockchain.slug) task = DramatiqTask.objects.create( type=DramatiqTask.Type.BOOTSTRAP, status=DramatiqTask.Status.IN_PROGRESS, message_id=message.message_id, content_object=blockchain, ) return Response( DramatiqTaskSerializer(task).data, status=status.HTTP_200_OK) @action( detail=True, methods=['post'], url_path='bootstrap/abort', url_name='bootstrap-abort', ) def abort_bootstrap(self, request, slug=None): blockchain = self.get_object() self._abort_previous_tasks(blockchain) return Response(status=status.HTTP_200_OK) @action(detail=True, methods=['get']) def graphs(self, request, slug=None): """Returns data for all graphs.""" data = { 'transaction_graph': load_data_from_redis(f'tx_graph__{slug}'), } return Response(data=data, status=status.HTTP_200_OK) @action(detail=True, methods=['post']) def accepted(self, request, slug=None): # NOTE: if node is offline and then you start it again then it will # call this view for each block it will get. In this case there will be # many fast sequential calls to this view, there might be too many # postgres connections opened so view executions might actually fail. # The suggested solution is to comment out 'block_accepted_url' in # node's config file, run the node, wait for it to sync, uncomment # 'block_accepted_url' and then manually bootstrap it. blockchain = self.get_object() # check if new block has been receiver when this blockchain is in the # process of being deleted. deleting = DramatiqTask.objects.filter( type=DramatiqTask.Type.BLOCKCHAIN_DELETE, object_id=blockchain.id, content_type=ContentType.objects.get_for_model(blockchain) ).exists() if deleting: # nothing to do, ignore the new block return Response(status=status.HTTP_404_NOT_FOUND) # get request data height = request.data['data']['header']['height'] hash = request.data['hash'] # prev_hash comes as list of int bytes, so we convert it to hex # NOTE: the same is true for some other data which we currently don't # need so we don't transform it, eg. data.header.kernel_root prev_hash = None if request.data['data']['header']['prev_hash']: prev_hash = bytes(request.data['data']['header']['prev_hash']).hex() logger.info( 'Block accepted', extra={ 'height': height, 'hash': hash, 'prev_hash': prev_hash, 'blockchain': blockchain.slug, }, ) web_socket_msg_type = 'send_block' # handle reorg case # we expect blocks to come ordered by height, there are some edge cases # here which are not handled, but they're unlikely to happen (eg. reorg # happens but websocket calls for first blocks fails while for later it # doesn't and then the code bellow wouldn't spot a reorg) block_at_this_height = blockchain.blocks\ .filter(height=height, reorg__isnull=True)\ .first() # we fetch here because anyone can call this view - we don't want to # work with fake data new_block = fetch_and_store_block(blockchain, height, prefetch=False) if block_at_this_height: if block_at_this_height.hash == new_block.hash: # probably have fetched this block while bootstraping, accepted # view got called a bit later so we already have it, noop return Response(status=status.HTTP_200_OK) logger.info( 'Block accepted - reorg spotted', extra={ 'block_at_this_height': block_at_this_height, 'block_at_this_height.hash': block_at_this_height.hash, 'block_at_this_height.reorg': block_at_this_height.reorg, 'hash': new_block.hash }, ) # reorg spotted reorged_blocks = list(blockchain.blocks\ .filter(height__gte=height, reorg__isnull=True) .exclude(pk=new_block.pk) .order_by('height')) logger.info('reorged_blocks at start: {}'.format(reorged_blocks)) # these reorged blocks are guaranteed to be reorged, now find any # previous blocks which were also reorged - aka get common # ancestor of the reorged block at 'height' and the new (main) block # find the common ancestor of this block and the reorged block at # the same height. We start with the current height to avoid more # logic for Reorg instance params if new_block.hash == block_at_this_height.hash: # at height X we got H1, then we got H2 (this call), but now it # reorged back to H1, so we don't do anything, no reorg is # stored since we didn't fetch the block in time from the node logger.info('Reorg cancelled out, noop') return Response(status=status.HTTP_200_OK) logger.info('new_block', extra={'hash': new_block.hash, 'prev_hash': new_block.prev_hash}) prev_block_new_chain = new_block prev_block_old_chain = reorged_blocks[0] logger.info('prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) # remove the first one since it will get added again reorged_blocks = reorged_blocks[1:] logger.info('reorged_blocks after [1:]: {}'.format(reorged_blocks)) main_blocks = [] while True: # theoretically we might be missing the block in db but we don't # cover such cases currently if not prev_block_new_chain: logger.info('reached break in IF NOT prev_block_new_chain') # this means that prev_block_old_chain is also None, since # they're both "previous" of their genesis block break if prev_block_new_chain == prev_block_old_chain: logger.info('reached break in IF NOT prev_block_new_chain == prev_block_old_chain') # found the common ancestor break # add to the left because we want to keep it sorted by height reorged_blocks.insert(0, prev_block_old_chain) main_blocks.insert(0, prev_block_new_chain) logger.info('new reorged_blocks: {}'.format(reorged_blocks)) logger.info('new main_blocks: {}'.format(main_blocks)) prev_block_new_chain = prev_block_new_chain.get_previous_block() prev_block_old_chain = prev_block_old_chain.get_previous_block() logger.info('new prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) logger.info('before reorg create: reorged_blocks: {}, main_blocks: {}'.format(reorged_blocks, main_blocks)) reorg = Reorg.objects.create( blockchain=blockchain, start_reorg_block=reorged_blocks[0], end_reorg_block=reorged_blocks[-1], start_main_block=main_blocks[0], ) # Reorg post_save signal fixes .reorg on new/old blocks and fixes # inputs/outputs web_socket_msg_type = 'reorged' web_socket_msg = BlockSerializer(new_block).data if web_socket_msg_type == 'reorged': web_socket_msg = blockchain.slug # TODO: check if channels-redis 4.x is fixed: https://github.com/django/channels_redis/issues/332 channel_layer = channels.layers.get_channel_layer() async_to_sync(channel_layer.group_send)( 'default_group', { 'type': web_socket_msg_type, 'message': web_socket_msg, } ) # update the loading progress since it could be skewed due to the # periodic task updating it before this view has been called try:
update_blockchain_progress(blockchain)
1
2023-12-24 22:15:11+00:00
16k
wuhy68/Parameter-Efficient-MoE
train_moe.py
[ { "identifier": "CamelidaeConfig", "path": "camelidae/configuration_camelidae.py", "snippet": "class CamelidaeConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n num_experts (`int`, *optional*, defaults to 8):\n The number of MoE expert\n Example:\n\n ```python\n >>> from transformers import CamelidaeModel, CamelidaeConfig\n\n >>> # Initializing a Camelidae camelidae-7b style configuration\n >>> configuration = CamelidaeConfig()\n\n >>> # Initializing a model from the camelidae-7b style configuration\n >>> model = CamelidaeModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"llama\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=32000,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n num_key_value_heads=None,\n hidden_act=\"silu\",\n max_position_embeddings=2048,\n initializer_range=0.02,\n rms_norm_eps=1e-6,\n use_cache=True,\n pad_token_id=None,\n bos_token_id=1,\n eos_token_id=2,\n pretraining_tp=1,\n tie_word_embeddings=False,\n rope_scaling=None,\n moe_dtype=\"bfloat16\",\n moe_scaling=0.25,\n num_experts=8,\n topk=1,\n output_router_logits=True,\n adapter_dim=64,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n\n # for backward compatibility\n if num_key_value_heads is None:\n num_key_value_heads = num_attention_heads\n\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.pretraining_tp = pretraining_tp\n self.use_cache = use_cache\n self.rope_scaling = rope_scaling\n self._rope_scaling_validation()\n\n self.moe_dtype = moe_dtype\n self.moe_scaling = moe_scaling\n self.num_experts = num_experts\n self.topk = topk\n self.output_router_logits = output_router_logits\n\n self.adapter_dim = adapter_dim\n\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n tie_word_embeddings=tie_word_embeddings,\n **kwargs,\n )\n\n def _rope_scaling_validation(self):\n \"\"\"\n Validate the `rope_scaling` configuration.\n \"\"\"\n if self.rope_scaling is None:\n return\n\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\n raise ValueError(\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\n f\"got {self.rope_scaling}\"\n )\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\n raise ValueError(\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\n )\n if (\n rope_scaling_factor is None\n or not isinstance(rope_scaling_factor, float)\n or rope_scaling_factor <= 1.0\n ):\n raise ValueError(\n f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\"\n )" }, { "identifier": "LlamaForCausalLM", "path": "camelidae/modeling_camelidae.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC\n )\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_router_logits: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MoECausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n output_router_logits = (\n output_router_logits if output_router_logits is not None else self.config.output_router_logits\n )\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n output_router_logits=output_router_logits,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(\n self.vocab_size // self.config.pretraining_tp, dim=0\n )\n logits = [\n F.linear(hidden_states, lm_head_slices[i])\n for i in range(self.config.pretraining_tp)\n ]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n\n loss = None\n\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n aux_loss = None\n if output_router_logits:\n aux_loss = load_balancing_loss_func(\n outputs.router_logits if return_dict else outputs[-1], self.config.num_experts, self.config.topk\n )\n if labels is not None:\n loss += 0.01 * aux_loss\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n if output_router_logits:\n output = (aux_loss,) + output\n return (loss,) + output if loss is not None else output\n\n return MoECausalLMOutputWithPast(\n loss=loss,\n aux_loss=aux_loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n router_logits=outputs.router_logits,\n )\n\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n **kwargs,\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(\n past_state.index_select(0, beam_idx.to(past_state.device))\n for past_state in layer_past\n ),\n )\n return reordered_past" }, { "identifier": "get_keys_to_not_convert", "path": "transformers_utils.py", "snippet": "def get_keys_to_not_convert(model):\n r\"\"\"\n An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules\n we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want\n to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in\n int8.\n\n Parameters:\n model (`torch.nn.Module`):\n Input model\n \"\"\"\n # Create a copy of the model and tie the weights, then\n # check if it contains tied weights\n tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager`\n tied_model.tie_weights()\n\n tied_params = find_tied_parameters(tied_model)\n # For compatibility with Accelerate < 0.18\n if isinstance(tied_params, dict):\n tied_keys = sum(list(tied_params.values()), []) + list(tied_params.keys())\n else:\n tied_keys = sum(tied_params, [])\n has_tied_params = len(tied_keys) > 0\n\n # Check if it is a base model\n is_base_model = not hasattr(model, model.base_model_prefix)\n\n # Ignore this for base models (BertModel, GPT2Model, etc.)\n if (not has_tied_params) and is_base_model:\n return []\n\n adapter_module = []\n for n, p in model.named_parameters():\n if 'adapter' in n:\n adapter_module.append(n)\n\n # otherwise they have an attached head\n list_modules = list(model.named_parameters())\n list_last_module = [list_modules[-1][0]]\n\n # add last module together with tied weights\n intersection = set(list_last_module) - set(tied_keys)\n list_untouched = list(set(tied_keys)) + list(intersection) + adapter_module\n\n # remove \".weight\" from the keys\n names_to_remove = [\".weight\", \".bias\"]\n filtered_module_names = []\n for name in list_untouched:\n for name_to_remove in names_to_remove:\n if name_to_remove in name:\n name = name.replace(name_to_remove, \"\")\n filtered_module_names.append(name)\n \n # print(filtered_module_names)\n return filtered_module_names" }, { "identifier": "_load_pretrained_model", "path": "transformers_utils.py", "snippet": "@classmethod\ndef _load_pretrained_model(\n cls,\n model,\n state_dict,\n loaded_keys,\n resolved_archive_file,\n pretrained_model_name_or_path,\n ignore_mismatched_sizes=False,\n sharded_metadata=None,\n _fast_init=True,\n low_cpu_mem_usage=False,\n device_map=None,\n offload_folder=None,\n offload_state_dict=None,\n dtype=None,\n is_quantized=False,\n keep_in_fp32_modules=None,\n ):\n is_safetensors = False\n if is_quantized:\n from transformers.utils.bitsandbytes import set_module_quantized_tensor_to_device\n\n if device_map is not None and \"disk\" in device_map.values():\n archive_file = (\n resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file\n )\n is_safetensors = archive_file.endswith(\".safetensors\")\n if offload_folder is None and not is_safetensors:\n raise ValueError(\n \"The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder`\"\n \" for them. Alternatively, make sure you have `safetensors` installed if the model you are using\"\n \" offers the weights in this format.\"\n )\n if offload_folder is not None:\n os.makedirs(offload_folder, exist_ok=True)\n if offload_state_dict is None:\n offload_state_dict = True\n\n is_sharded_safetensors = is_safetensors and sharded_metadata is not None\n # Retrieve missing & unexpected_keys\n model_state_dict = model.state_dict()\n expected_keys = list(model_state_dict.keys())\n prefix = model.base_model_prefix\n\n def _fix_key(key):\n if \"beta\" in key:\n return key.replace(\"beta\", \"bias\")\n if \"gamma\" in key:\n return key.replace(\"gamma\", \"weight\")\n return key\n\n original_loaded_keys = loaded_keys\n loaded_keys = [_fix_key(key) for key in loaded_keys]\n\n if len(prefix) > 0:\n has_prefix_module = any(s.startswith(prefix) for s in loaded_keys)\n expects_prefix_module = any(s.startswith(prefix) for s in expected_keys)\n else:\n has_prefix_module = False\n expects_prefix_module = False\n\n # key re-naming operations are never done on the keys\n # that are loaded, but always on the keys of the newly initialized model\n remove_prefix_from_model = not has_prefix_module and expects_prefix_module\n add_prefix_to_model = has_prefix_module and not expects_prefix_module\n\n if remove_prefix_from_model:\n _prefix = f\"{prefix}.\"\n expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(_prefix)]\n expected_keys = [s[len(_prefix) :] if s.startswith(_prefix) else s for s in expected_keys]\n elif add_prefix_to_model:\n expected_keys = [\".\".join([prefix, s]) for s in expected_keys]\n\n missing_keys = list(set(expected_keys) - set(loaded_keys))\n unexpected_keys = set(loaded_keys) - set(expected_keys)\n # Remove nonpersistent buffers from unexpected keys: they are not in the state dict but will be in the model\n # buffers\n model_buffers = {n for n, _ in model.named_buffers()}\n if remove_prefix_from_model:\n model_buffers = {key[len(_prefix) :] if key.startswith(_prefix) else key for key in model_buffers}\n elif add_prefix_to_model:\n model_buffers = {\".\".join([prefix, key]) for key in model_buffers}\n unexpected_keys = list(unexpected_keys - model_buffers)\n\n model.tie_weights()\n ptrs = collections.defaultdict(list)\n for name, tensor in model.state_dict().items():\n id_tensor = id_tensor_storage(tensor) if tensor.device != torch.device(\"meta\") else id(tensor)\n ptrs[id_tensor].append(name)\n\n # These are all the pointers of shared tensors.\n tied_params = [names for _, names in ptrs.items() if len(names) > 1]\n\n for group in tied_params:\n if remove_prefix_from_model:\n group = [key[len(_prefix) :] if key.startswith(_prefix) else key for key in group]\n elif add_prefix_to_model:\n group = [\".\".join([prefix, key]) for key in group]\n missing_in_group = [k for k in missing_keys if k in group]\n if len(missing_in_group) > 0 and len(missing_in_group) < len(group):\n missing_keys = [k for k in missing_keys if k not in missing_in_group]\n\n # Some models may have keys that are not in the state by design, removing them before needlessly warning\n # the user.\n if cls._keys_to_ignore_on_load_missing is not None:\n for pat in cls._keys_to_ignore_on_load_missing:\n missing_keys = [k for k in missing_keys if re.search(pat, k) is None]\n\n if cls._keys_to_ignore_on_load_unexpected is not None:\n for pat in cls._keys_to_ignore_on_load_unexpected:\n unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]\n\n # retrieve weights on meta device and put them back on CPU.\n # This is not ideal in terms of memory, but if we don't do that not, we can't initialize them in the next step\n if low_cpu_mem_usage:\n for key in missing_keys:\n if key in list(model_state_dict.keys()):\n key = key\n elif f\"{prefix}.{key}\" in list(model_state_dict.keys()):\n key = f\"{prefix}.{key}\"\n elif key.startswith(prefix) and \".\".join(key.split(\".\")[1:]) in list(model_state_dict.keys()):\n key = \".\".join(key.split(\".\")[1:])\n param = model_state_dict[key]\n\n # upcast in fp32 if any\n target_dtype = dtype\n if (\n keep_in_fp32_modules is not None\n and dtype == torch.float16\n and any(module_to_keep_in_fp32 in key for module_to_keep_in_fp32 in keep_in_fp32_modules)\n ):\n target_dtype = torch.float32\n\n if param.device == torch.device(\"meta\"):\n if not (is_quantized):\n set_module_tensor_to_device(model, key, \"cpu\", torch.empty(*param.size(), dtype=target_dtype))\n else:\n set_module_quantized_tensor_to_device(\n model, key, \"cpu\", torch.empty(*param.size(), dtype=target_dtype)\n )\n\n # retrieve unintialized modules and initialize before maybe overriding that with the pretrained weights.\n if _fast_init:\n if remove_prefix_from_model:\n _loaded_keys = [f\"{prefix}.{k}\" for k in loaded_keys]\n elif add_prefix_to_model:\n _loaded_keys = [k[len(prefix) + 1 :] for k in loaded_keys]\n else:\n _loaded_keys = loaded_keys\n set_initialized_submodules(model, _loaded_keys)\n # This will only initialize submodules that are not marked as initialized by the line above.\n model.apply(model._initialize_weights)\n\n # Set some modules to fp32 if any\n if keep_in_fp32_modules is not None:\n for name, param in model.named_parameters():\n if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules):\n param = param.to(torch.float32)\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if len(cls.base_model_prefix) > 0 and not hasattr(model, cls.base_model_prefix) and has_prefix_module:\n start_prefix = cls.base_model_prefix + \".\"\n if len(cls.base_model_prefix) > 0 and hasattr(model, cls.base_model_prefix) and not has_prefix_module:\n model_to_load = getattr(model, cls.base_model_prefix)\n base_model_expected_keys = list(model_to_load.state_dict().keys())\n if any(key in expected_keys_not_prefixed and key not in base_model_expected_keys for key in loaded_keys):\n raise ValueError(\n \"The state dictionary of the model you are trying to load is corrupted. Are you sure it was \"\n \"properly saved?\"\n )\n if device_map is not None:\n device_map = {k.replace(f\"{cls.base_model_prefix}.\", \"\"): v for k, v in device_map.items()}\n\n def _find_mismatched_keys(\n state_dict,\n model_state_dict,\n loaded_keys,\n add_prefix_to_model,\n remove_prefix_from_model,\n ignore_mismatched_sizes,\n ):\n mismatched_keys = []\n if ignore_mismatched_sizes:\n for checkpoint_key in loaded_keys:\n # If the checkpoint is sharded, we may not have the key here.\n if checkpoint_key not in state_dict:\n continue\n model_key = checkpoint_key\n if remove_prefix_from_model:\n # The model key starts with `prefix` but `checkpoint_key` doesn't so we add it.\n model_key = f\"{prefix}.{checkpoint_key}\"\n elif add_prefix_to_model:\n # The model key doesn't start with `prefix` but `checkpoint_key` does so we remove it.\n model_key = \".\".join(checkpoint_key.split(\".\")[1:])\n\n if (\n model_key in model_state_dict\n and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape\n ):\n mismatched_keys.append(\n (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)\n )\n del state_dict[checkpoint_key]\n\n return mismatched_keys\n\n if resolved_archive_file is not None:\n folder = os.path.sep.join(resolved_archive_file[0].split(os.path.sep)[:-1])\n else:\n folder = None\n if device_map is not None and is_safetensors:\n param_device_map = expand_device_map(device_map, original_loaded_keys)\n\n str_dtype = str(dtype).replace(\"torch.\", \"\") if dtype is not None else \"float32\"\n if sharded_metadata is None:\n archive_file = (\n resolved_archive_file[0]\n if isinstance(resolved_archive_file, (list, tuple))\n else resolved_archive_file\n )\n weight_map = {p: archive_file for p in original_loaded_keys}\n else:\n weight_map = {p: os.path.join(folder, f) for p, f in sharded_metadata[\"weight_map\"].items()}\n offload_index = {\n p: {\"safetensors_file\": f, \"weight_name\": p, \"dtype\": str_dtype}\n for p, f in weight_map.items()\n if param_device_map[p] == \"disk\"\n }\n\n if state_dict is not None:\n # Whole checkpoint\n mismatched_keys = _find_mismatched_keys(\n state_dict,\n model_state_dict,\n original_loaded_keys,\n add_prefix_to_model,\n remove_prefix_from_model,\n ignore_mismatched_sizes,\n )\n error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix)\n offload_index = None\n else:\n # Sharded checkpoint or whole but low_cpu_mem_usage==True\n\n # This should always be a list but, just to be sure.\n if not isinstance(resolved_archive_file, list):\n resolved_archive_file = [resolved_archive_file]\n\n error_msgs = []\n mismatched_keys = []\n if not is_safetensors:\n offload_index = {} if device_map is not None and \"disk\" in device_map.values() else None\n if offload_state_dict:\n state_dict_folder = tempfile.mkdtemp()\n state_dict_index = {}\n else:\n state_dict_folder = None\n state_dict_index = None\n\n if is_sharded_safetensors:\n disk_only_shard_files = get_disk_only_shard_files(device_map, sharded_metadata=sharded_metadata)\n disk_only_shard_files = [os.path.join(folder, f) for f in disk_only_shard_files]\n else:\n disk_only_shard_files = []\n\n if len(resolved_archive_file) > 1:\n resolved_archive_file = logging.tqdm(resolved_archive_file, desc=\"Loading checkpoint shards\")\n for shard_file in resolved_archive_file:\n # Skip the load for shards that only contain disk-offloaded weights when using safetensors for the offload.\n if shard_file in disk_only_shard_files:\n continue\n state_dict = load_state_dict(shard_file)\n\n # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not\n # matching the weights in the model.\n mismatched_keys += _find_mismatched_keys(\n state_dict,\n model_state_dict,\n original_loaded_keys,\n add_prefix_to_model,\n remove_prefix_from_model,\n ignore_mismatched_sizes,\n )\n\n if low_cpu_mem_usage:\n new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(\n model_to_load,\n state_dict,\n loaded_keys,\n start_prefix,\n expected_keys,\n device_map=device_map,\n offload_folder=offload_folder,\n offload_index=offload_index,\n state_dict_folder=state_dict_folder,\n state_dict_index=state_dict_index,\n dtype=dtype,\n is_quantized=is_quantized,\n is_safetensors=is_safetensors,\n keep_in_fp32_modules=keep_in_fp32_modules,\n )\n error_msgs += new_error_msgs\n else:\n error_msgs += _load_state_dict_into_model(model_to_load, state_dict, start_prefix)\n\n # force memory release\n del state_dict\n gc.collect()\n\n if offload_index is not None and len(offload_index) > 0:\n if model != model_to_load:\n # We need to add the prefix of the base model\n prefix = cls.base_model_prefix\n if not is_safetensors:\n for weight_name in offload_index:\n shutil.move(\n os.path.join(offload_folder, f\"{weight_name}.dat\"),\n os.path.join(offload_folder, f\"{prefix}.{weight_name}.dat\"),\n )\n offload_index = {f\"{prefix}.{key}\": value for key, value in offload_index.items()}\n if not is_safetensors:\n save_offload_index(offload_index, offload_folder)\n offload_index = None\n\n if offload_state_dict:\n # Load back temporarily offloaded state dict\n load_offloaded_weights(model_to_load, state_dict_index, state_dict_folder)\n shutil.rmtree(state_dict_folder)\n\n if len(error_msgs) > 0:\n error_msg = \"\\n\\t\".join(error_msgs)\n if \"size mismatch\" in error_msg:\n error_msg += (\n \"\\n\\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method.\"\n )\n raise RuntimeError(f\"Error(s) in loading state_dict for {model.__class__.__name__}:\\n\\t{error_msg}\")\n\n if is_quantized:\n unexpected_keys = [elem for elem in unexpected_keys if \"SCB\" not in elem]\n missing_keys = [elem for elem in missing_keys if \"SCB\" not in elem]\n\n missing_keys = list(filter(lambda x: 'adapter' not in x, missing_keys))\n\n if len(unexpected_keys) > 0:\n archs = [] if model.config.architectures is None else model.config.architectures\n warner = logger.warn if model.__class__.__name__ in archs else logger.info\n warner(\n f\"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when\"\n f\" initializing {model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are\"\n f\" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or\"\n \" with another architecture (e.g. initializing a BertForSequenceClassification model from a\"\n \" BertForPreTraining model).\\n- This IS NOT expected if you are initializing\"\n f\" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical\"\n \" (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\"\n )\n else:\n logger.info(f\"All model checkpoint weights were used when initializing {model.__class__.__name__}.\\n\")\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\\nYou should probably\"\n \" TRAIN this model on a down-stream task to be able to use it for predictions and inference.\"\n )\n elif len(mismatched_keys) == 0:\n logger.info(\n f\"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path}.\\nIf your task is similar to the task the model of the checkpoint\"\n f\" was trained on, you can already use {model.__class__.__name__} for predictions without further\"\n \" training.\"\n )\n if len(mismatched_keys) > 0:\n mismatched_warning = \"\\n\".join(\n [\n f\"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated\"\n for key, shape1, shape2 in mismatched_keys\n ]\n )\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized because the shapes did not\"\n f\" match:\\n{mismatched_warning}\\nYou should probably TRAIN this model on a down-stream task to be able\"\n \" to use it for predictions and inference.\"\n )\n\n return model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs" } ]
import os import gc import json import math import random import copy import logging import torch import utils import bitsandbytes as bnb import transformers import warnings import transformers.integrations import transformers.modeling_utils from os.path import exists, join, isdir from copy import deepcopy from dataclasses import dataclass, field from typing import Dict, Optional, Sequence, Callable, List, Tuple, Union, Any from torch import nn from torch.utils.data import Dataset from transformers import Trainer, BitsAndBytesConfig, set_seed from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training from peft.tuners.lora import LoraLayer from camelidae.configuration_camelidae import CamelidaeConfig from camelidae.modeling_camelidae import LlamaForCausalLM from transformers_utils import ( get_keys_to_not_convert, _load_pretrained_model, )
11,078
labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), ) class SavePeftModelCallback(transformers.TrainerCallback): def save_model(self, args, state, kwargs): # print('Saving PEFT checkpoint...') if state.best_model_checkpoint is not None: checkpoint_folder = os.path.join( state.best_model_checkpoint, "adapter_model" ) else: checkpoint_folder = os.path.join( args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}" ) peft_model_path = os.path.join(checkpoint_folder, "adapter_model") model = kwargs["model"] model.save_pretrained(peft_model_path) moe_state = {} for param_tensor in model.state_dict(): if "adapter" in param_tensor: moe_state.update({param_tensor: model.state_dict()[param_tensor]}) # if "adapter" in param_tensor or "norm" in param_tensor: # moe_state.update({param_tensor: model.state_dict()[param_tensor]}) moe_model_path = os.path.join(checkpoint_folder, "moe_model.bin") # print(moe_state.keys()) torch.save(moe_state, moe_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) def on_save(self, args, state, control, **kwargs): self.save_model(args, state, kwargs) return control def on_train_end(self, args, state, control, **kwargs): def touch(fname, times=None): with open(fname, "a"): os.utime(fname, times) touch(join(args.output_dir, "completed")) self.save_model(args, state, kwargs) def make_supervised_data_module( tokenizer: transformers.PreTrainedTokenizer, data_args ) -> Dict: """Make dataset and collator for supervised fine-tuning.""" train_dataset = SupervisedDataset( tokenizer=tokenizer, data_path=data_args.data_path ) data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) return dict( train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator ) def find_all_linear_names(model, bits=4): cls = ( bnb.nn.Linear4bit if bits == 4 else (bnb.nn.Linear8bitLt if bits == 8 else torch.nn.Linear) ) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split(".") lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if "lm_head" in lora_module_names: # needed for 16-bit lora_module_names.remove("lm_head") return list(lora_module_names) def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) def train(): parser = transformers.HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments) ) model_args, data_args, training_args = parser.parse_args_into_dataclasses() training_args.ddp_find_unused_parameters = False set_seed(42) model_config = CamelidaeConfig.from_pretrained(model_args.model_name_or_path) model_config.pretraining_tp = 1 ## without tensor parallelism rank # Camelidae Config model_config.moe_dtype = "bfloat16" model_config.lora_r = 64 model_config.lora_alpha = 16 model_config.adapter_dim = 64 model_config.topk = 2 model_config.moe_scaling = 1 model_config.num_experts = 8 model_config.output_router_logits = False # # Seq Length Extension # model_config.rope_scaling = { # "type": "dynamic", # "factor": 2, # }
# Copyright 2023 Rohan Taori, Ishaan Gulrajani, Tianyi Zhang, Yann Dubois, Xuechen Li # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. warnings.filterwarnings("ignore") transformers.integrations.get_keys_to_not_convert = get_keys_to_not_convert transformers.modeling_utils.PreTrainedModel._load_pretrained_model = ( _load_pretrained_model ) IGNORE_INDEX = -100 DEFAULT_PAD_TOKEN = "[PAD]" @dataclass class ModelArguments: model_name_or_path: Optional[str] = field(default="facebook/opt-125m") @dataclass class DataArguments: data_path: str = field( default=None, metadata={"help": "Path to the training data."} ) @dataclass class TrainingArguments(transformers.TrainingArguments): report_to: str = field(default="none") cache_dir: Optional[str] = field(default=None) optim: str = field( default="paged_adamw_32bit" ) # "paged_lion_8bit", "paged_adamw_8bit", "paged_lion_32bit", "paged_adamw_32bit" lr_scheduler_type: str = field( default="constant_with_warmup" ) # "constant", "constant_with_warmup", "cosine", "cosine_with_restarts", "linear" model_max_length: int = field( default=2048, metadata={ "help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)." }, ) def _tokenize_fn( strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer ) -> Dict: """Tokenize a list of strings.""" tokenized_list = [ tokenizer( text, return_tensors="pt", padding="longest", max_length=tokenizer.model_max_length, truncation=True, ) for text in strings ] input_ids = labels = [tokenized.input_ids[0] for tokenized in tokenized_list] input_ids_lens = labels_lens = [ tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list ] return dict( input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens, ) def preprocess( sources: Sequence[str], targets: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, ) -> Dict: """Preprocess the data by tokenizing.""" examples = [s + t for s, t in zip(sources, targets)] examples_tokenized, sources_tokenized = [ _tokenize_fn(strings, tokenizer) for strings in (examples, sources) ] input_ids = examples_tokenized["input_ids"] labels = copy.deepcopy(input_ids) for label, source_len in zip(labels, sources_tokenized["input_ids_lens"]): label[:source_len] = IGNORE_INDEX return dict(input_ids=input_ids, labels=labels) class SupervisedDataset(Dataset): """Dataset for supervised fine-tuning.""" def __init__(self, data_path: str, tokenizer: transformers.PreTrainedTokenizer): super(SupervisedDataset, self).__init__() logging.warning("Loading data: {}".format(data_path)) data_list = utils.jload(data_path) # Preprocess Data logging.warning("Processing data") self.tokenizer = tokenizer self.sources = [] self.targets = [] for idx in range(len(data_list)): data = data_list[idx] corpus = data["corpus"] if corpus != "": # pretrain mode source = f"{tokenizer.bos_token}" self.sources.append(source) target = f"{corpus}{tokenizer.eos_token}" self.targets.append(target) else: # instruction mode instruction = data["instruction"] conversation = data["conversation"] if len(conversation) == 1: if instruction == "": source = f"{tokenizer.bos_token}" else: source = f"{tokenizer.bos_token}### System:\n{instruction}\n" source += ( f"### Human:\n{conversation[0]['input']}\n### Assistant:\n" ) self.sources.append(source) target = f"{conversation[0]['output']}{tokenizer.eos_token}" self.targets.append(target) # else: # dialog mode del data_list gc.collect() # ## Debug Mode # self.sources = self.sources[:10000] # self.targets = self.targets[:10000] # logging.warning("Tokenizing inputs... This may take some time...") # data_dict = preprocess(sources, targets, tokenizer) # del sources, targets # gc.collect() # self.input_ids = data_dict["input_ids"] # self.labels = data_dict["labels"] # del data_dict # gc.collect() logging.warning("there are {} data in dataset".format(len(self.sources))) def __len__(self): return len(self.sources) def __getitem__(self, i): # return dict(input_ids=self.input_ids[i], labels=self.labels[i]) source = [self.sources[i]] target = [self.targets[i]] data_dict = preprocess(source, target, self.tokenizer) input_ids = data_dict["input_ids"][0] labels = data_dict["labels"][0] return dict(input_ids=input_ids, labels=labels) @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances: Sequence[Dict]) -> Dict[str, torch.Tensor]: input_ids, labels = tuple( [instance[key] for instance in instances] for key in ("input_ids", "labels") ) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id ) labels = torch.nn.utils.rnn.pad_sequence( labels, batch_first=True, padding_value=IGNORE_INDEX ) return dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), ) class SavePeftModelCallback(transformers.TrainerCallback): def save_model(self, args, state, kwargs): # print('Saving PEFT checkpoint...') if state.best_model_checkpoint is not None: checkpoint_folder = os.path.join( state.best_model_checkpoint, "adapter_model" ) else: checkpoint_folder = os.path.join( args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}" ) peft_model_path = os.path.join(checkpoint_folder, "adapter_model") model = kwargs["model"] model.save_pretrained(peft_model_path) moe_state = {} for param_tensor in model.state_dict(): if "adapter" in param_tensor: moe_state.update({param_tensor: model.state_dict()[param_tensor]}) # if "adapter" in param_tensor or "norm" in param_tensor: # moe_state.update({param_tensor: model.state_dict()[param_tensor]}) moe_model_path = os.path.join(checkpoint_folder, "moe_model.bin") # print(moe_state.keys()) torch.save(moe_state, moe_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) def on_save(self, args, state, control, **kwargs): self.save_model(args, state, kwargs) return control def on_train_end(self, args, state, control, **kwargs): def touch(fname, times=None): with open(fname, "a"): os.utime(fname, times) touch(join(args.output_dir, "completed")) self.save_model(args, state, kwargs) def make_supervised_data_module( tokenizer: transformers.PreTrainedTokenizer, data_args ) -> Dict: """Make dataset and collator for supervised fine-tuning.""" train_dataset = SupervisedDataset( tokenizer=tokenizer, data_path=data_args.data_path ) data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer) return dict( train_dataset=train_dataset, eval_dataset=None, data_collator=data_collator ) def find_all_linear_names(model, bits=4): cls = ( bnb.nn.Linear4bit if bits == 4 else (bnb.nn.Linear8bitLt if bits == 8 else torch.nn.Linear) ) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split(".") lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if "lm_head" in lora_module_names: # needed for 16-bit lora_module_names.remove("lm_head") return list(lora_module_names) def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) def train(): parser = transformers.HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments) ) model_args, data_args, training_args = parser.parse_args_into_dataclasses() training_args.ddp_find_unused_parameters = False set_seed(42) model_config = CamelidaeConfig.from_pretrained(model_args.model_name_or_path) model_config.pretraining_tp = 1 ## without tensor parallelism rank # Camelidae Config model_config.moe_dtype = "bfloat16" model_config.lora_r = 64 model_config.lora_alpha = 16 model_config.adapter_dim = 64 model_config.topk = 2 model_config.moe_scaling = 1 model_config.num_experts = 8 model_config.output_router_logits = False # # Seq Length Extension # model_config.rope_scaling = { # "type": "dynamic", # "factor": 2, # }
model = LlamaForCausalLM.from_pretrained(
1
2023-12-22 02:54:29+00:00
16k
lchen1019/Image_Cropper
ISAT/widgets/mainwindow.py
[ { "identifier": "Ui_MainWindow", "path": "ISAT/ui/MainWindow.py", "snippet": "class Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1280, 764)\n MainWindow.setMinimumSize(QtCore.QSize(800, 600))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n MainWindow.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/icons/icons/isat_bg_50x25.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n MainWindow.setWindowIcon(icon)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setSpacing(0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setEnabled(True)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1280, 24))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menubar.setFont(font)\n self.menubar.setAutoFillBackground(False)\n self.menubar.setDefaultUp(False)\n self.menubar.setNativeMenuBar(True)\n self.menubar.setObjectName(\"menubar\")\n self.menuFile = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menuFile.setFont(font)\n self.menuFile.setObjectName(\"menuFile\")\n self.menuView = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menuView.setFont(font)\n self.menuView.setObjectName(\"menuView\")\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\":/icon/icons/翻译_translate.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n MainWindow.setMenuBar(self.menubar)\n\n self.menuTools = QtWidgets.QMenu(self.menubar)\n self.menuTools.setEnabled(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menuTools.setFont(font)\n self.menuTools.setObjectName(\"menuTools\")\n\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.toolBar = QtWidgets.QToolBar(MainWindow)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.toolBar.setFont(font)\n self.toolBar.setIconSize(QtCore.QSize(24, 24))\n self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)\n self.toolBar.setFloatable(False)\n self.toolBar.setObjectName(\"toolBar\")\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n self.dockWidgetContents_2 = QtWidgets.QWidget()\n self.dockWidgetContents_2.setObjectName(\"dockWidgetContents_2\")\n self.dockWidgetContents_3 = QtWidgets.QWidget()\n self.dockWidgetContents_3.setObjectName(\"dockWidgetContents_3\")\n self.files_dock = QtWidgets.QDockWidget(MainWindow)\n self.files_dock.setObjectName(\"files_dock\")\n self.dockWidgetContents = QtWidgets.QWidget()\n self.dockWidgetContents.setObjectName(\"dockWidgetContents\")\n self.files_dock.setWidget(self.dockWidgetContents)\n MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.files_dock)\n self.dockWidgetContents_4 = QtWidgets.QWidget()\n self.dockWidgetContents_4.setObjectName(\"dockWidgetContents_4\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.dockWidgetContents_4)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.actionOpen_dir = QtWidgets.QAction(MainWindow)\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\":/icon/icons/照片_pic.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionOpen_dir.setIcon(icon2)\n self.actionOpen_dir.setObjectName(\"actionOpen_dir\")\n self.actionZoom_in = QtWidgets.QAction(MainWindow)\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/icon/icons/放大_zoom-in.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionZoom_in.setIcon(icon3)\n self.actionZoom_in.setObjectName(\"actionZoom_in\")\n self.actionZoom_out = QtWidgets.QAction(MainWindow)\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(\":/icon/icons/缩小_zoom-out.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionZoom_out.setIcon(icon4)\n self.actionZoom_out.setObjectName(\"actionZoom_out\")\n self.actionFit_wiondow = QtWidgets.QAction(MainWindow)\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(\":/icon/icons/全宽_fullwidth.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionFit_wiondow.setIcon(icon5)\n self.actionFit_wiondow.setObjectName(\"actionFit_wiondow\")\n self.actionSetting = QtWidgets.QAction(MainWindow)\n icon6 = QtGui.QIcon()\n icon6.addPixmap(QtGui.QPixmap(\":/icon/icons/设置_setting-two.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionSetting.setIcon(icon6)\n self.actionSetting.setObjectName(\"actionSetting\")\n self.actionExit = QtWidgets.QAction(MainWindow)\n icon7 = QtGui.QIcon()\n icon7.addPixmap(QtGui.QPixmap(\":/icon/icons/开关_power.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionExit.setIcon(icon7)\n self.actionExit.setObjectName(\"actionExit\")\n self.actionSave_dir = QtWidgets.QAction(MainWindow)\n icon8 = QtGui.QIcon()\n icon8.addPixmap(QtGui.QPixmap(\":/icon/icons/文件夹-开_folder-open.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionSave_dir.setIcon(icon8)\n self.actionSave_dir.setObjectName(\"actionSave_dir\")\n self.actionSave = QtWidgets.QAction(MainWindow)\n icon9 = QtGui.QIcon()\n icon9.addPixmap(QtGui.QPixmap(\":/icon/icons/保存_save.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionSave.setIcon(icon9)\n self.actionSave.setObjectName(\"actionSave\")\n self.actionPrev = QtWidgets.QAction(MainWindow)\n self.actionPrev.setCheckable(False)\n icon10 = QtGui.QIcon()\n icon10.addPixmap(QtGui.QPixmap(\":/icon/icons/上一步_back.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionPrev.setIcon(icon10)\n self.actionPrev.setMenuRole(QtWidgets.QAction.TextHeuristicRole)\n self.actionPrev.setPriority(QtWidgets.QAction.NormalPriority)\n self.actionPrev.setObjectName(\"actionPrev\")\n self.actionNext = QtWidgets.QAction(MainWindow)\n icon11 = QtGui.QIcon()\n icon11.addPixmap(QtGui.QPixmap(\":/icon/icons/下一步_next.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionNext.setIcon(icon11)\n self.actionNext.setObjectName(\"actionNext\")\n self.actionShortcut = QtWidgets.QAction(MainWindow)\n icon12 = QtGui.QIcon()\n icon12.addPixmap(QtGui.QPixmap(\":/icon/icons/键盘_keyboard-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionShortcut.setIcon(icon12)\n self.actionShortcut.setObjectName(\"actionShortcut\")\n self.actionAbout = QtWidgets.QAction(MainWindow)\n icon13 = QtGui.QIcon()\n icon13.addPixmap(QtGui.QPixmap(\":/icon/icons/我的_me.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionAbout.setIcon(icon13)\n self.actionAbout.setObjectName(\"actionAbout\")\n self.actionDelete = QtWidgets.QAction(MainWindow)\n self.actionDelete.setEnabled(False)\n icon15 = QtGui.QIcon()\n icon15.addPixmap(QtGui.QPixmap(\":/icon/icons/删除_delete.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionDelete.setIcon(icon15)\n self.actionDelete.setObjectName(\"actionDelete\")\n self.actionBit_map = QtWidgets.QAction(MainWindow)\n self.actionBit_map.setCheckable(False)\n self.actionBit_map.setIcon(icon2)\n self.actionBit_map.setObjectName(\"actionBit_map\")\n self.actionEdit = QtWidgets.QAction(MainWindow)\n self.actionEdit.setEnabled(False)\n icon16 = QtGui.QIcon()\n icon16.addPixmap(QtGui.QPixmap(\":/icon/icons/编辑_edit.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionEdit.setIcon(icon16)\n self.actionEdit.setObjectName(\"actionEdit\")\n self.actionTo_top = QtWidgets.QAction(MainWindow)\n self.actionTo_top.setEnabled(False)\n icon17 = QtGui.QIcon()\n icon17.addPixmap(QtGui.QPixmap(\":/icon/icons/去顶部_to-top.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionTo_top.setIcon(icon17)\n self.actionTo_top.setObjectName(\"actionTo_top\")\n self.actionTo_bottom = QtWidgets.QAction(MainWindow)\n self.actionTo_bottom.setEnabled(False)\n icon18 = QtGui.QIcon()\n icon18.addPixmap(QtGui.QPixmap(\":/icon/icons/去底部_to-bottom.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionTo_bottom.setIcon(icon18)\n self.actionTo_bottom.setObjectName(\"actionTo_bottom\")\n self.actionChinese = QtWidgets.QAction(MainWindow)\n self.actionChinese.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionChinese.setFont(font)\n self.actionChinese.setObjectName(\"actionChinese\")\n self.actionEnglish = QtWidgets.QAction(MainWindow)\n self.actionEnglish.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionEnglish.setFont(font)\n self.actionEnglish.setObjectName(\"actionEnglish\")\n self.actionBackspace = QtWidgets.QAction(MainWindow)\n icon19 = QtGui.QIcon()\n icon19.addPixmap(QtGui.QPixmap(\":/icon/icons/删除_delete-two.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionBackspace.setIcon(icon19)\n self.actionBackspace.setObjectName(\"actionBackspace\")\n self.actionCancel = QtWidgets.QAction(MainWindow)\n icon20 = QtGui.QIcon()\n icon20.addPixmap(QtGui.QPixmap(\":/icon/icons/关闭_close-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionCancel.setIcon(icon20)\n self.actionCancel.setObjectName(\"actionCancel\")\n self.actionFinish = QtWidgets.QAction(MainWindow)\n icon21 = QtGui.QIcon()\n icon21.addPixmap(QtGui.QPixmap(\":/icon/icons/校验_check-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionFinish.setIcon(icon21)\n self.actionFinish.setObjectName(\"actionFinish\")\n self.actionPolygon = QtWidgets.QAction(MainWindow)\n icon22 = QtGui.QIcon()\n icon22.addPixmap(QtGui.QPixmap(\":/icon/icons/锚点_anchor.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionPolygon.setIcon(icon22)\n self.actionPolygon.setObjectName(\"actionPolygon\")\n self.actionVisible = QtWidgets.QAction(MainWindow)\n icon23 = QtGui.QIcon()\n icon23.addPixmap(QtGui.QPixmap(\":/icon/icons/眼睛_eyes.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionVisible.setIcon(icon23)\n self.actionVisible.setObjectName(\"actionVisible\")\n self.actionContour_Max_only = QtWidgets.QAction(MainWindow)\n self.actionContour_Max_only.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionContour_Max_only.setFont(font)\n self.actionContour_Max_only.setObjectName(\"actionContour_Max_only\")\n self.actionContour_External = QtWidgets.QAction(MainWindow)\n self.actionContour_External.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionContour_External.setFont(font)\n self.actionContour_External.setObjectName(\"actionContour_External\")\n self.actionContour_All = QtWidgets.QAction(MainWindow)\n self.actionContour_All.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionContour_All.setFont(font)\n self.actionContour_All.setObjectName(\"actionContour_All\")\n self.actionModel_manage = QtWidgets.QAction(MainWindow)\n icon24 = QtGui.QIcon()\n icon24.addPixmap(QtGui.QPixmap(\":/icon/icons/列表_list-middle.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionModel_manage.setIcon(icon24)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionModel_manage.setFont(font)\n self.actionModel_manage.setObjectName(\"actionModel_manage\")\n self.actionConverter = QtWidgets.QAction(MainWindow)\n icon25 = QtGui.QIcon()\n icon25.addPixmap(QtGui.QPixmap(\":/icon/icons/转换文件夹1_folder-conversion-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionConverter.setIcon(icon25)\n self.actionConverter.setObjectName(\"actionConverter\")\n self.menuFile.addAction(self.actionOpen_dir)\n self.menuFile.addAction(self.actionSave_dir)\n self.menuFile.addSeparator()\n self.menuFile.addAction(self.actionPrev)\n self.menuFile.addAction(self.actionNext)\n self.menuFile.addSeparator()\n self.menuFile.addAction(self.actionSetting)\n self.menuFile.addAction(self.actionExit)\n self.menuView.addSeparator()\n self.menuView.addAction(self.actionZoom_in)\n self.menuView.addAction(self.actionZoom_out)\n self.menuView.addAction(self.actionFit_wiondow)\n self.menuView.addSeparator()\n self.menuView.addAction(self.actionBit_map)\n self.menuView.addSeparator()\n self.menuTools.addSeparator()\n self.menuTools.addAction(self.actionConverter)\n self.menubar.addAction(self.menuFile.menuAction())\n self.menubar.addAction(self.menuView.menuAction())\n self.menubar.addAction(self.menuTools.menuAction())\n\n self.toolBar.addAction(self.actionPrev)\n self.toolBar.addAction(self.actionNext)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.actionPolygon)\n self.toolBar.addAction(self.actionFinish)\n self.toolBar.addAction(self.actionCancel)\n self.toolBar.addAction(self.actionSave)\n self.toolBar.addAction(self.actionDelete)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.actionZoom_in)\n self.toolBar.addAction(self.actionZoom_out)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ISAT\"))\n self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))\n self.menuView.setTitle(_translate(\"MainWindow\", \"View\"))\n self.menuTools.setTitle(_translate(\"MainWindow\", \"Tools\"))\n self.toolBar.setWindowTitle(_translate(\"MainWindow\", \"toolBar\"))\n self.files_dock.setWindowTitle(_translate(\"MainWindow\", \"Files\"))\n self.actionOpen_dir.setText(_translate(\"MainWindow\", \"Images dir\"))\n self.actionOpen_dir.setStatusTip(_translate(\"MainWindow\", \"Open images dir.\"))\n self.actionZoom_in.setText(_translate(\"MainWindow\", \"Zoom in\"))\n self.actionZoom_in.setStatusTip(_translate(\"MainWindow\", \"Zoom in.\"))\n self.actionZoom_out.setText(_translate(\"MainWindow\", \"Zoom out\"))\n self.actionZoom_out.setStatusTip(_translate(\"MainWindow\", \"Zoom out.\"))\n self.actionFit_wiondow.setText(_translate(\"MainWindow\", \"Fit window\"))\n self.actionFit_wiondow.setToolTip(_translate(\"MainWindow\", \"Fit window\"))\n self.actionFit_wiondow.setStatusTip(_translate(\"MainWindow\", \"Fit window.\"))\n self.actionFit_wiondow.setShortcut(_translate(\"MainWindow\", \"F\"))\n self.actionSetting.setText(_translate(\"MainWindow\", \"Setting\"))\n self.actionSetting.setStatusTip(_translate(\"MainWindow\", \"Setting.\"))\n self.actionExit.setText(_translate(\"MainWindow\", \"Exit\"))\n self.actionExit.setToolTip(_translate(\"MainWindow\", \"Exit\"))\n self.actionExit.setStatusTip(_translate(\"MainWindow\", \"Exit.\"))\n self.actionSave_dir.setText(_translate(\"MainWindow\", \"Label dir\"))\n self.actionSave_dir.setStatusTip(_translate(\"MainWindow\", \"Open label dir.\"))\n self.actionSave.setText(_translate(\"MainWindow\", \"Save\"))\n self.actionSave.setStatusTip(_translate(\"MainWindow\", \"Save annotation.\"))\n self.actionSave.setShortcut(_translate(\"MainWindow\", \"S\"))\n self.actionPrev.setText(_translate(\"MainWindow\", \"Prev image\"))\n self.actionPrev.setToolTip(_translate(\"MainWindow\", \"Prev image\"))\n self.actionPrev.setStatusTip(_translate(\"MainWindow\", \"Prev image.\"))\n self.actionPrev.setShortcut(_translate(\"MainWindow\", \"A\"))\n self.actionNext.setText(_translate(\"MainWindow\", \"Next image\"))\n self.actionNext.setToolTip(_translate(\"MainWindow\", \"Next image\"))\n self.actionNext.setStatusTip(_translate(\"MainWindow\", \"Next image.\"))\n self.actionNext.setShortcut(_translate(\"MainWindow\", \"D\"))\n self.actionShortcut.setText(_translate(\"MainWindow\", \"Shortcut\"))\n self.actionAbout.setText(_translate(\"MainWindow\", \"About\"))\n self.actionDelete.setText(_translate(\"MainWindow\", \"Delete\"))\n self.actionDelete.setToolTip(_translate(\"MainWindow\", \"Delete polygon\"))\n self.actionDelete.setStatusTip(_translate(\"MainWindow\", \"Delete polygon.\"))\n self.actionDelete.setShortcut(_translate(\"MainWindow\", \"Del\"))\n self.actionBit_map.setText(_translate(\"MainWindow\", \"Bit map\"))\n self.actionBit_map.setStatusTip(_translate(\"MainWindow\", \"Show instance or segmeent state.\"))\n self.actionBit_map.setShortcut(_translate(\"MainWindow\", \"Space\"))\n self.actionEdit.setText(_translate(\"MainWindow\", \"Edit\"))\n self.actionEdit.setToolTip(_translate(\"MainWindow\", \"Edit polygon\"))\n self.actionEdit.setStatusTip(_translate(\"MainWindow\", \"Edit polygon attribute.\"))\n self.actionTo_top.setText(_translate(\"MainWindow\", \"To top\"))\n self.actionTo_top.setToolTip(_translate(\"MainWindow\", \"Move polygon to top layer\"))\n self.actionTo_top.setStatusTip(_translate(\"MainWindow\", \"Move polygon to top layer.\"))\n self.actionTo_top.setShortcut(_translate(\"MainWindow\", \"T\"))\n self.actionTo_bottom.setText(_translate(\"MainWindow\", \"To bottom\"))\n self.actionTo_bottom.setToolTip(_translate(\"MainWindow\", \"Move polygon to bottom layer\"))\n self.actionTo_bottom.setStatusTip(_translate(\"MainWindow\", \"Move polygon to bottom layer.\"))\n self.actionTo_bottom.setShortcut(_translate(\"MainWindow\", \"B\"))\n self.actionChinese.setText(_translate(\"MainWindow\", \"中文\"))\n self.actionEnglish.setText(_translate(\"MainWindow\", \"English\"))\n self.actionBackspace.setText(_translate(\"MainWindow\", \"Backspace\"))\n self.actionBackspace.setToolTip(_translate(\"MainWindow\", \"Backspace\"))\n self.actionBackspace.setStatusTip(_translate(\"MainWindow\", \"Backspace.\"))\n self.actionBackspace.setShortcut(_translate(\"MainWindow\", \"Z\"))\n self.actionCancel.setText(_translate(\"MainWindow\", \"Cancel\"))\n self.actionCancel.setToolTip(_translate(\"MainWindow\", \"Annotate canceled\"))\n self.actionCancel.setStatusTip(_translate(\"MainWindow\", \"Annotate canceled.\"))\n self.actionCancel.setShortcut(_translate(\"MainWindow\", \"Esc\"))\n self.actionFinish.setText(_translate(\"MainWindow\", \"Finish\"))\n self.actionFinish.setToolTip(_translate(\"MainWindow\", \"Annotate finished\"))\n self.actionFinish.setStatusTip(_translate(\"MainWindow\", \"Annotate finished.\"))\n self.actionFinish.setShortcut(_translate(\"MainWindow\", \"E\"))\n self.actionPolygon.setText(_translate(\"MainWindow\", \"Polygon\"))\n self.actionPolygon.setToolTip(_translate(\"MainWindow\", \"Draw polygon\"))\n self.actionPolygon.setStatusTip(_translate(\"MainWindow\", \"Accurately annotate by drawing polygon. \"))\n self.actionPolygon.setShortcut(_translate(\"MainWindow\", \"Q\"))\n self.actionVisible.setText(_translate(\"MainWindow\", \"Visible\"))\n self.actionVisible.setToolTip(_translate(\"MainWindow\", \"Visible\"))\n self.actionVisible.setStatusTip(_translate(\"MainWindow\", \"Visible.\"))\n self.actionVisible.setShortcut(_translate(\"MainWindow\", \"V\"))\n self.actionContour_Max_only.setText(_translate(\"MainWindow\", \"Max only\"))\n self.actionContour_Max_only.setStatusTip(_translate(\"MainWindow\", \"Max contour save only.\"))\n self.actionContour_Max_only.setWhatsThis(_translate(\"MainWindow\", \"Max contour save only.\"))\n self.actionContour_External.setText(_translate(\"MainWindow\", \"External\"))\n self.actionContour_External.setStatusTip(_translate(\"MainWindow\", \"External contour save only.\"))\n self.actionContour_External.setWhatsThis(_translate(\"MainWindow\", \"External contour save only.\"))\n self.actionContour_All.setText(_translate(\"MainWindow\", \"All\"))\n self.actionContour_All.setStatusTip(_translate(\"MainWindow\", \"All contour save.\"))\n self.actionContour_All.setWhatsThis(_translate(\"MainWindow\", \"All contour save.\"))\n self.actionModel_manage.setText(_translate(\"MainWindow\", \"Model manage\"))\n self.actionModel_manage.setStatusTip(_translate(\"MainWindow\", \"Model manage.\"))\n self.actionModel_manage.setWhatsThis(_translate(\"MainWindow\", \"Model manage.\"))\n self.actionConverter.setText(_translate(\"MainWindow\", \"Converter\"))" }, { "identifier": "FilesDockWidget", "path": "ISAT/widgets/files_dock_widget.py", "snippet": "class FilesDockWidget(QtWidgets.QWidget, Ui_Form):\n def __init__(self, mainwindow):\n super(FilesDockWidget, self).__init__()\n self.setupUi(self)\n self.mainwindow = mainwindow\n self.listWidget.clicked.connect(self.listwidget_doubleclick)\n self.lineEdit_jump.returnPressed.connect(self.mainwindow.jump_to)\n\n def generate_item_and_itemwidget(self, file_name):\n item = QtWidgets.QListWidgetItem()\n item.setSizeHint(QtCore.QSize(200, 30))\n item_widget = QtWidgets.QWidget()\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(9, 1, 9, 1)\n\n state_color = QtWidgets.QLabel()\n state_color.setFixedWidth(5)\n state_color.setStyleSheet(\"background-color: {};\".format('#999999'))\n state_color.setObjectName('state_color')\n layout.addWidget(state_color)\n\n category = QtWidgets.QLabel(file_name)\n category.setObjectName('category')\n layout.addWidget(category)\n\n item_widget.setLayout(layout)\n return item, item_widget\n\n def update_widget(self):\n self.listWidget.clear()\n if self.mainwindow.files_list is None:\n return\n\n for file_path in self.mainwindow.files_list:\n _, file_name = os.path.split(file_path)\n item = QtWidgets.QListWidgetItem()\n item.setSizeHint(QtCore.QSize(200, 30))\n # item, item_widget = self.generate_item_and_itemwidget(file_name)\n\n item.setText(file_name)\n self.listWidget.addItem(item)\n # self.listWidget.setItemWidget(item, item_widget)\n\n self.label_all.setText('{}'.format(len(self.mainwindow.files_list)))\n\n def set_select(self, row):\n self.listWidget.setCurrentRow(row)\n\n def listwidget_doubleclick(self):\n row = self.listWidget.currentRow()\n self.mainwindow.current_index = row\n self.mainwindow.show_image(row)" }, { "identifier": "AnnotationScene", "path": "ISAT/widgets/canvas.py", "snippet": "class AnnotationScene(QtWidgets.QGraphicsScene):\n def __init__(self, mainwindow):\n super(AnnotationScene, self).__init__()\n self.mainwindow = mainwindow\n self.image_item:QtWidgets.QGraphicsPixmapItem = None\n self.image_data = None\n self.current_graph:QGraphicsRectItem = None\n self.mode = STATUSMode.VIEW\n self.click = CLICKMode.POSITIVE\n self.click_points = []\n\n self.mask_alpha = 0.5\n self.top_layer = 1\n\n self.guide_line_x:QtWidgets.QGraphicsLineItem = None\n self.guide_line_y:QtWidgets.QGraphicsLineItem = None\n\n # 拖动鼠标描点 \n self.last_draw_time = time.time()\n self.draw_interval = 0.15\n self.pressd = False\n\n def load_image(self, image_path:str):\n self.clear()\n\n self.image_data = np.array(Image.open(image_path))\n \n self.image_item = QtWidgets.QGraphicsPixmapItem()\n self.image_item.setZValue(0)\n self.addItem(self.image_item)\n self.image_item.setPixmap(QtGui.QPixmap(image_path))\n self.setSceneRect(self.image_item.boundingRect())\n \n def start_draw_polygon(self):\n if self.mode != STATUSMode.VIEW:\n return\n self.change_mode_to_create()\n if self.mode == STATUSMode.CREATE:\n self.start_draw()\n \n def start_draw(self):\n print('start_draw')\n self.current_graph = QGraphicsRectItem()\n self.addItem(self.current_graph)\n \n def change_mode_to_view(self):\n self.mode = STATUSMode.VIEW\n self.image_item.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))\n self.mainwindow.actionPrev.setEnabled(True)\n self.mainwindow.actionNext.setEnabled(True)\n\n self.mainwindow.actionPolygon.setEnabled(self.mainwindow.can_be_annotated)\n self.mainwindow.actionBackspace.setEnabled(False)\n self.mainwindow.actionFinish.setEnabled(False)\n self.mainwindow.actionCancel.setEnabled(False)\n\n self.mainwindow.actionEdit.setEnabled(False)\n self.mainwindow.actionDelete.setEnabled(False)\n self.mainwindow.actionSave.setEnabled(self.mainwindow.can_be_annotated)\n\n def change_mode_to_create(self):\n if self.image_item is None:\n return\n self.mode = STATUSMode.CREATE\n self.image_item.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor))\n self.mainwindow.actionPrev.setEnabled(False)\n self.mainwindow.actionNext.setEnabled(False)\n\n self.mainwindow.actionPolygon.setEnabled(False)\n self.mainwindow.actionBackspace.setEnabled(True)\n self.mainwindow.actionFinish.setEnabled(True)\n self.mainwindow.actionCancel.setEnabled(True)\n\n self.mainwindow.actionEdit.setEnabled(False)\n self.mainwindow.actionDelete.setEnabled(False)\n self.mainwindow.actionSave.setEnabled(False)\n\n def finish_draw(self):\n print('finish_draw')\n print(self.click_points)\n\n if self.current_graph is None:\n self.click_points.clear()\n return\n \n # 保存当前矩形\n print(self.click_points)\n print(self.mainwindow.rects)\n rect = {\n \"point1-x\": self.click_points[0][0],\n \"point1-y\": self.click_points[0][1],\n \"point2-x\": self.click_points[1][0],\n \"point2-y\": self.click_points[1][1],\n }\n print(rect)\n self.mainwindow.rects.append(rect)\n\n # 删除当前绘制对象\n self.click_points.clear()\n self.removeItem(self.current_graph)\n self.current_graph = None\n\n self.change_mode_to_view()\n\n\n def cancel_draw(self):\n if self.current_graph is None:\n return\n self.removeItem(self.current_graph)\n self.current_graph = None\n self.change_mode_to_view()\n self.click_points.clear()\n \n\n def mousePressEvent(self, event: 'QtWidgets.QGraphicsSceneMouseEvent'):\n if self.mode == STATUSMode.VIEW:\n return\n sceneX, sceneY = event.scenePos().x(), event.scenePos().y()\n sceneX = 0 if sceneX < 0 else sceneX\n sceneX = self.width()-1 if sceneX > self.width()-1 else sceneX\n sceneY = 0 if sceneY < 0 else sceneY\n sceneY = self.height()-1 if sceneY > self.height()-1 else sceneY\n print(sceneX, sceneY)\n\n if event.button() == QtCore.Qt.MouseButton.LeftButton:\n print('left click')\n self.pressd = True\n\n if len(self.click_points) <= 2:\n self.click_points.append([sceneX, sceneY])\n\n if len(self.click_points) == 2:\n pen = QPen(Qt.red)\n pen.setWidth(5)\n brush = QBrush(QColor(255, 255, 255, 128))\n\n p1 = self.click_points[0]\n p2 = self.click_points[1]\n self.current_graph.setPen(pen)\n self.current_graph.setBrush(brush)\n self.current_graph.setRect(p1[0], p1[1], p2[0]-p1[0], p2[1]-p1[1])\n super(AnnotationScene, self).mousePressEvent(event)\n\n # 拖动鼠标描点 \n def mouseReleaseEvent(self, event: 'QtWidgets.QGraphicsSceneMouseEvent'): \n self.pressd = False\n super(AnnotationScene, self).mouseReleaseEvent(event)\n \n def eventFilter(self, obj, event):\n if event.type() == QEvent.GraphicsSceneMouseMove and event.buttons() == Qt.LeftButton:\n self.mouseMoveEvent(event)\n return True\n return super(RectangleScene, self).eventFilter(obj, event)\n\n def mouseMoveEvent(self, event: 'QtWidgets.QGraphicsSceneMouseEvent'):\n # 拖动鼠标描点\n pos = event.scenePos()\n if pos.x() < 0: pos.setX(0)\n if pos.x() > self.width()-1: pos.setX(self.width()-1)\n if pos.y() < 0: pos.setY(0)\n if pos.y() > self.height()-1: pos.setY(self.height()-1)\n\n if len(self.click_points) == 1:\n pen = QPen(Qt.red)\n pen.setWidth(5)\n brush = QBrush(QColor(255, 255, 255, 128))\n\n p1 = self.click_points[0]\n p2 = [pos.x(), pos.y()]\n self.current_graph.setPen(pen)\n self.current_graph.setBrush(brush)\n self.current_graph.setRect(p1[0], p1[1], p2[0]-p1[0], p2[1]-p1[1])\n else:\n return\n\n # 状态栏,显示当前坐标\n if self.image_data is not None:\n x, y = round(pos.x()), round(pos.y())\n self.mainwindow.labelCoord.setText('xy: ({:>4d},{:>4d})'.format(x, y))\n\n data = self.image_data[y][x]\n if self.image_data.ndim == 2:\n self.mainwindow.labelData.setText('pix: [{:^3d}]'.format(data))\n elif self.image_data.ndim == 3:\n if len(data) == 3:\n self.mainwindow.labelData.setText('rgb: [{:>3d},{:>3d},{:>3d}]'.format(data[0], data[1], data[2]))\n else:\n self.mainwindow.labelData.setText('pix: [{}]'.format(data))\n\n super(AnnotationScene, self).mouseMoveEvent(event)\n \n def show_all(self):\n print('show_all')\n\n pen = QPen(Qt.red)\n pen.setWidth(5)\n brush = QBrush(QColor(255, 255, 255, 128))\n\n for rect in self.mainwindow.rects:\n self.current_graph = QGraphicsRectItem()\n self.addItem(self.current_graph)\n p1 = [rect[\"point1-x\"], rect[\"point1-y\"]]\n p2 = [rect[\"point2-x\"], rect[\"point2-y\"]]\n self.current_graph.setPen(pen)\n self.current_graph.setBrush(brush)\n self.current_graph.setRect(p1[0], p1[1], p2[0]-p1[0], p2[1]-p1[1])\n\n def hide_all(self):\n print('hide_all')\n items_to_remove = [item for item in self.items() if isinstance(item, QGraphicsRectItem)]\n for item in items_to_remove:\n self.removeItem(item)" }, { "identifier": "AnnotationView", "path": "ISAT/widgets/canvas.py", "snippet": "class AnnotationView(QtWidgets.QGraphicsView):\n def __init__(self, parent=None):\n super(AnnotationView, self).__init__(parent)\n self.setMouseTracking(True)\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOn)\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOn)\n self.setDragMode(QtWidgets.QGraphicsView.DragMode.ScrollHandDrag)\n self.factor = 1.2\n\n def wheelEvent(self, event: QtGui.QWheelEvent):\n angel = event.angleDelta()\n angelX, angelY = angel.x(), angel.y()\n point = event.pos() # 当前鼠标位置\n if angelY > 0:\n self.zoom(self.factor, point)\n else:\n self.zoom(1 / self.factor, point)\n\n def zoom_in(self):\n self.zoom(self.factor)\n\n def zoom_out(self):\n self.zoom(1/self.factor)\n\n def zoomfit(self):\n self.fitInView(0, 0, self.scene().width(), self.scene().height(), QtCore.Qt.AspectRatioMode.KeepAspectRatio)\n\n def zoom(self, factor, point=None):\n mouse_old = self.mapToScene(point) if point is not None else None\n # 缩放比例\n\n pix_widget = self.transform().scale(factor, factor).mapRect(QtCore.QRectF(0, 0, 1, 1)).width()\n if pix_widget > 30 and factor > 1: return\n if pix_widget < 0.01 and factor < 1: return\n\n self.scale(factor, factor)\n if point is not None:\n mouse_now = self.mapToScene(point)\n center_now = self.mapToScene(self.viewport().width() // 2, self.viewport().height() // 2)\n center_new = mouse_old - mouse_now + center_now\n self.centerOn(center_new)" }, { "identifier": "STATUSMode", "path": "ISAT/configs.py", "snippet": "class STATUSMode(Enum):\n VIEW = 0\n CREATE = 1\n EDIT = 2" }, { "identifier": "MAPMode", "path": "ISAT/configs.py", "snippet": "class MAPMode(Enum):\n LABEL = 0\n SEMANTIC = 1\n INSTANCE = 2" }, { "identifier": "load_config", "path": "ISAT/configs.py", "snippet": "def load_config(file):\n with open(file, 'rb')as f:\n cfg = yaml.load(f.read(), Loader=yaml.FullLoader)\n return cfg" }, { "identifier": "save_config", "path": "ISAT/configs.py", "snippet": "def save_config(cfg, file):\n s = yaml.dump(cfg)\n with open(file, 'w') as f:\n f.write(s)\n return True" }, { "identifier": "CONFIG_FILE", "path": "ISAT/configs.py", "snippet": "CONFIG_FILE = os.path.join(ISAT_ROOT, 'isat.yaml')" }, { "identifier": "DEFAULT_CONFIG_FILE", "path": "ISAT/configs.py", "snippet": "DEFAULT_CONFIG_FILE = os.path.join(ISAT_ROOT, 'default.yaml')" }, { "identifier": "CHECKPOINT_PATH", "path": "ISAT/configs.py", "snippet": "CHECKPOINT_PATH = os.path.join(ISAT_ROOT, 'checkpoints')" }, { "identifier": "ISAT_ROOT", "path": "ISAT/configs.py", "snippet": "ISAT_ROOT = os.path.split(os.path.abspath(__file__))[0]" }, { "identifier": "Object", "path": "ISAT/annotation.py", "snippet": "class Object:\n def __init__(self, category:str, group:int, segmentation, area, layer, bbox, iscrowd=0, note=''):\n self.category = category\n self.group = group\n self.segmentation = segmentation\n self.area = area\n self.layer = layer\n self.bbox = bbox\n self.iscrowd = iscrowd\n self.note = note" }, { "identifier": "Annotation", "path": "ISAT/annotation.py", "snippet": "class Annotation:\n def __init__(self, image_path, label_path):\n img_folder, img_name = os.path.split(image_path)\n self.description = 'ISAT'\n self.img_folder = img_folder\n self.img_name = img_name\n self.label_path = label_path\n self.note = ''\n\n image = np.array(Image.open(image_path))\n if image.ndim == 3:\n self.height, self.width, self.depth = image.shape\n elif image.ndim == 2:\n self.height, self.width = image.shape\n self.depth = 0\n else:\n self.height, self.width, self.depth = image.shape[:, :3]\n print('Warning: Except image has 2 or 3 ndim, but get {}.'.format(image.ndim))\n del image\n\n self.objects:List[Object,...] = []\n\n def load_annotation(self):\n if os.path.exists(self.label_path):\n with open(self.label_path, 'r') as f:\n dataset = load(f)\n info = dataset.get('info', {})\n description = info.get('description', '')\n if description == 'ISAT':\n # ISAT格式json\n objects = dataset.get('objects', [])\n self.img_name = info.get('name', '')\n width = info.get('width', None)\n if width is not None:\n self.width = width\n height = info.get('height', None)\n if height is not None:\n self.height = height\n depth = info.get('depth', None)\n if depth is not None:\n self.depth = depth\n self.note = info.get('note', '')\n for obj in objects:\n category = obj.get('category', 'unknow')\n group = obj.get('group', 0)\n if group is None: group = 0\n segmentation = obj.get('segmentation', [])\n iscrowd = obj.get('iscrowd', 0)\n note = obj.get('note', '')\n area = obj.get('area', 0)\n layer = obj.get('layer', 2)\n bbox = obj.get('bbox', [])\n obj = Object(category, group, segmentation, area, layer, bbox, iscrowd, note)\n self.objects.append(obj)\n else:\n # 不再支持直接打开labelme标注文件(在菜单栏-tool-convert中提供了isat<->labelme相互转换工具)\n print('Warning: The file {} is not a ISAT json.'.format(self.label_path))\n return self\n\n def save_annotation(self):\n dataset = {}\n dataset['info'] = {}\n dataset['info']['description'] = self.description\n dataset['info']['folder'] = self.img_folder\n dataset['info']['name'] = self.img_name\n dataset['info']['width'] = self.width\n dataset['info']['height'] = self.height\n dataset['info']['depth'] = self.depth\n dataset['info']['note'] = self.note\n dataset['objects'] = []\n for obj in self.objects:\n object = {}\n object['category'] = obj.category\n object['group'] = obj.group\n object['segmentation'] = obj.segmentation\n object['area'] = obj.area\n object['layer'] = obj.layer\n object['bbox'] = obj.bbox\n object['iscrowd'] = obj.iscrowd\n object['note'] = obj.note\n dataset['objects'].append(object)\n with open(self.label_path, 'w') as f:\n dump(dataset, f, indent=4)\n return True" }, { "identifier": "Polygon", "path": "ISAT/widgets/polygon.py", "snippet": "class Polygon(QtWidgets.QGraphicsPolygonItem):\n def __init__(self):\n super(Polygon, self).__init__(parent=None)\n self.line_width = 0\n self.hover_alpha = 150\n self.nohover_alpha = 80\n self.points = []\n self.vertexs = []\n self.category = ''\n self.group = 0\n self.iscrowd = 0\n self.note = ''\n\n self.rxmin, self.rxmax, self.rymin, self.rymax = 0, 0, 0, 0 # 用于绘画完成后,记录多边形的各边界,此处与points对应\n self.color = QtGui.QColor('#ff0000')\n self.is_drawing = True\n\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(QtGui.QBrush(self.color, QtCore.Qt.BrushStyle.FDiagPattern))\n\n self.setAcceptHoverEvents(True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsMovable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemSendsGeometryChanges, True)\n self.setZValue(1e5)\n\n def addPoint(self, point):\n print('addPoint')\n self.points.append(point)\n print(self.points)\n vertex = Vertex(self, self.color, 2)\n # 添加路径点\n self.scene().addItem(vertex)\n self.vertexs.append(vertex)\n vertex.setPos(point)\n\n def movePoint(self, index, point):\n if not 0 <= index < len(self.points):\n return\n self.points[index] = self.mapFromScene(point)\n\n self.redraw()\n if self.scene().mainwindow.load_finished and not self.is_drawing:\n self.scene().mainwindow.set_saved_state(False)\n\n def removePoint(self, index):\n if not self.points:\n return\n self.points.pop(index)\n vertex = self.vertexs.pop(index)\n self.scene().removeItem(vertex)\n del vertex\n self.redraw()\n\n def delete(self):\n self.points.clear()\n while self.vertexs:\n vertex = self.vertexs.pop()\n self.scene().removeItem(vertex)\n del vertex\n\n def moveVertex(self, index, point):\n if not 0 <= index < len(self.vertexs):\n return\n vertex = self.vertexs[index]\n vertex.setEnabled(False)\n vertex.setPos(point)\n vertex.setEnabled(True)\n\n def itemChange(self, change: 'QGraphicsItem.GraphicsItemChange', value: typing.Any):\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemSelectedHasChanged and not self.is_drawing: # 选中改变\n if self.isSelected():\n color = QtGui.QColor('#00A0FF')\n color.setAlpha(self.hover_alpha)\n self.setBrush(color)\n else:\n self.color.setAlpha(self.nohover_alpha)\n self.setBrush(self.color)\n self.scene().mainwindow.annos_dock_widget.set_selected(self) # 更新label面板\n\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemPositionChange: # ItemPositionHasChanged\n bias = value\n l, t, b, r = self.boundingRect().left(), self.boundingRect().top(), self.boundingRect().bottom(), self.boundingRect().right()\n if l + bias.x() < 0: bias.setX(-l)\n if r + bias.x() > self.scene().width(): bias.setX(self.scene().width()-r)\n if t + bias.y() < 0: bias.setY(-t)\n if b + bias.y() > self.scene().height(): bias.setY(self.scene().height()-b)\n\n for index, point in enumerate(self.points):\n self.moveVertex(index, point+bias)\n\n if self.scene().mainwindow.load_finished and not self.is_drawing:\n self.scene().mainwindow.set_saved_state(False)\n\n return super(Polygon, self).itemChange(change, value)\n\n def hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if not self.is_drawing and not self.isSelected():\n self.color.setAlpha(self.hover_alpha)\n self.setBrush(self.color)\n super(Polygon, self).hoverEnterEvent(event)\n\n def hoverLeaveEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if not self.is_drawing and not self.isSelected():\n self.color.setAlpha(self.nohover_alpha)\n self.setBrush(self.color)\n super(Polygon, self).hoverEnterEvent(event)\n\n def mouseDoubleClickEvent(self, event: 'QGraphicsSceneMouseEvent'):\n if event.button() == QtCore.Qt.MouseButton.LeftButton:\n self.scene().mainwindow.category_edit_widget.polygon = self\n self.scene().mainwindow.category_edit_widget.load_cfg()\n self.scene().mainwindow.category_edit_widget.show()\n\n def redraw(self):\n if len(self.points) < 1:\n return\n xs = [p.x() for p in self.points]\n ys = [p.y() for p in self.points]\n self.rxmin, self.rymin, self.rxmax, self.rymax = min(xs), min(ys), max(xs), max(ys)\n self.setPolygon(QtGui.QPolygonF(self.points))\n\n def change_color(self, color):\n self.color = color\n self.color.setAlpha(self.nohover_alpha)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(self.color)\n for vertex in self.vertexs:\n vertex_color = self.color\n vertex_color.setAlpha(255)\n vertex.setPen(QtGui.QPen(vertex_color, self.line_width))\n vertex.setBrush(vertex_color)\n\n def set_drawed(self, category, group, iscrowd, note, color:QtGui.QColor, layer=None):\n self.is_drawing = False\n self.category = category\n if isinstance(group, str):\n group = 0 if group == '' else int(group)\n self.group = group\n self.iscrowd = iscrowd\n self.note = note\n\n self.color = color\n self.color.setAlpha(self.nohover_alpha)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(self.color)\n if layer is not None:\n self.setZValue(layer)\n for vertex in self.vertexs:\n vertex.setColor(color)\n\n def calculate_area(self):\n area = 0\n num_points = len(self.points)\n for i in range(num_points):\n p1 = self.points[i]\n p2 = self.points[(i + 1) % num_points]\n d = p1.x() * p2.y() - p2.x() * p1.y()\n area += d\n return abs(area) / 2\n\n def load_object(self, object):\n segmentation = object.segmentation\n for x, y in segmentation:\n point = QtCore.QPointF(x, y)\n self.addPoint(point)\n color = self.scene().mainwindow.category_color_dict.get(object.category, '#000000')\n self.set_drawed(object.category, object.group, object.iscrowd, object.note, QtGui.QColor(color), object.layer) # ...\n\n def to_object(self):\n if self.is_drawing:\n return None\n segmentation = []\n for point in self.points:\n point = point + self.pos()\n segmentation.append((round(point.x(), 2), round(point.y(), 2)))\n xmin = self.boundingRect().x() + self.pos().x()\n ymin = self.boundingRect().y() + self.pos().y()\n xmax = xmin + self.boundingRect().width()\n ymax = ymin + self.boundingRect().height()\n\n object = Object(self.category, group=self.group, segmentation=segmentation,\n area=self.calculate_area(), layer=self.zValue(), bbox=(xmin, ymin, xmax, ymax), iscrowd=self.iscrowd, note=self.note)\n return object" }, { "identifier": "PromptPoint", "path": "ISAT/widgets/polygon.py", "snippet": "class PromptPoint(QtWidgets.QGraphicsPathItem):\n def __init__(self, pos, type=0):\n super(PromptPoint, self).__init__()\n self.color = QtGui.QColor('#0000FF') if type==0 else QtGui.QColor('#00FF00')\n self.color.setAlpha(255)\n self.painterpath = QtGui.QPainterPath()\n self.painterpath.addEllipse(\n QtCore.QRectF(-1, -1, 2, 2))\n self.setPath(self.painterpath)\n self.setBrush(self.color)\n self.setPen(QtGui.QPen(self.color, 3))\n self.setZValue(1e5)\n\n self.setPos(pos)" }, { "identifier": "ConverterDialog", "path": "ISAT/widgets/converter_dialog.py", "snippet": "class ConverterDialog(QtWidgets.QDialog, Ui_Dialog):\n def __init__(self, parent, mainwindow):\n super(ConverterDialog, self).__init__(parent=parent)\n self.setWindowTitle('转换')\n self.layout = QVBoxLayout()\n self.mainwindow = mainwindow\n self.setWindowModality(QtCore.Qt.WindowModality.WindowModal)\n\n self.path_layout = QHBoxLayout()\n self.button = QPushButton('保存至')\n self.button.clicked.connect(self.select_folder)\n self.path_layout.addWidget(self.button)\n self.path_text = QLineEdit()\n self.path_text.setReadOnly(True)\n self.path_layout.addWidget(self.path_text)\n self.layout.addLayout(self.path_layout)\n\n\n # 最底部居中按钮\n self.bottom_layout = QHBoxLayout()\n self.bottom_layout.addStretch()\n self.bottom_button = QPushButton('转换')\n self.bottom_layout.addWidget(self.bottom_button)\n self.bottom_layout.addStretch()\n self.layout.addLayout(self.bottom_layout)\n self.bottom_button.clicked.connect(self.confirm_action)\n self.setLayout(self.layout)\n\n def select_folder(self):\n folder = QFileDialog.getExistingDirectory(self, '保存至')\n if folder:\n self.path_text.setText(folder)\n\n def confirm_action(self):\n path = self.path_text.text()\n if path == '':\n self.mainwindow.statusBar().showMessage('请先选择保存路径')\n QMessageBox.warning(self, '警告', '请先选择保存路径')\n return\n if not os.path.exists(path):\n os.makedirs(path)\n self.mainwindow.statusBar().showMessage('正在转换')\n labels_dir = self.mainwindow.label_root\n image_dir = self.mainwindow.image_root\n for inx, label in enumerate(os.listdir(labels_dir)):\n print(inx, label)\n label_path = os.path.join(labels_dir, label)\n image_path = os.path.join(image_dir, label[:-5] + '.jpg')\n if not os.path.exists(image_path):\n image_path = os.path.join(image_dir, label[:-5] + '.png')\n if not os.path.exists(image_path):\n image_path = os.path.join(image_dir, label[:-5] + '.jpeg')\n if not os.path.exists(image_path):\n continue\n image = Image.open(image_path)\n with open(label_path, 'r') as f:\n rects = json.load(f)\n \n for inx, rect in enumerate(rects):\n x1, y1, x2, y2 = rect['point1-x'], rect['point1-y'], rect['point2-x'], rect['point2-y']\n left = min(x1, x2)\n right = max(x1, x2)\n top = min(y1, y2)\n bottom = max(y1, y2)\n cropped_image = image.crop((left, top, right, bottom))\n save_path = os.path.join(path, label[:-5] + '_' + str(inx) + image_path[-4:])\n print(save_path)\n cropped_image.save(save_path)\n\n self.mainwindow.statusBar().showMessage('转换完成')\n QMessageBox.warning(self, '提示', '转换完成')" } ]
from PyQt5 import QtWidgets, QtCore, QtGui from ISAT.ui.MainWindow import Ui_MainWindow from ISAT.widgets.files_dock_widget import FilesDockWidget from ISAT.widgets.canvas import AnnotationScene, AnnotationView from ISAT.configs import STATUSMode, MAPMode, load_config, save_config, CONFIG_FILE, DEFAULT_CONFIG_FILE, CHECKPOINT_PATH, ISAT_ROOT from ISAT.annotation import Object, Annotation from ISAT.widgets.polygon import Polygon, PromptPoint from ISAT.widgets.converter_dialog import ConverterDialog from PIL import Image from PyQt5.QtCore import QThread, pyqtSignal import os import json import functools import imgviz import ISAT.icons_rc import numpy as np import cv2 # 调整图像饱和度
13,407
self.png_palette = None # 图像拥有调色盘,说明是单通道的标注png文件 self.instance_cmap = imgviz.label_colormap() # 标注目标 self.current_label:Annotation = None # 新增 手动/自动 group选择 self.group_select_mode = 'auto' # 所有labels self.rects = [] self.is_show_bitmap = False self.init_ui() self.init_connect() self.reset_action() def init_ui(self): #q self.files_dock_widget = FilesDockWidget(mainwindow=self) self.files_dock.setWidget(self.files_dock_widget) # 新增 group 选择 快捷键 self.next_group_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("Tab"), self) self.prev_group_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("`"), self) self.next_group_shortcut.setContext(QtCore.Qt.ApplicationShortcut) self.prev_group_shortcut.setContext(QtCore.Qt.ApplicationShortcut) self.scene = AnnotationScene(mainwindow=self) self.view = AnnotationView(parent=self) self.view.setScene(self.scene) self.setCentralWidget(self.view) self.labelCoord = QtWidgets.QLabel('') self.labelCoord.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight) self.labelCoord.setFixedWidth(150) self.statusbar.addPermanentWidget(self.labelCoord) self.labelData = QtWidgets.QLabel('') self.labelData.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight) self.labelData.setFixedWidth(150) self.statusbar.addPermanentWidget(self.labelData) self.trans = QtCore.QTranslator() self.Converter_dialog = ConverterDialog(self, mainwindow=self) def set_saved_state(self, is_saved:bool): self.saved = is_saved if self.files_list is not None and self.current_index is not None: if is_saved: self.setWindowTitle(self.current_label.label_path) else: self.setWindowTitle('*{}'.format(self.current_label.label_path)) def open_dir(self): dir = QtWidgets.QFileDialog.getExistingDirectory(self) if not dir: return self.files_list.clear() self.files_dock_widget.listWidget.clear() files = [] suffixs = tuple(['{}'.format(fmt.data().decode('ascii').lower()) for fmt in QtGui.QImageReader.supportedImageFormats()]) for f in os.listdir(dir): if f.lower().endswith(suffixs): # f = os.path.join(dir, f) files.append(f) files = sorted(files) self.files_list = files self.files_dock_widget.update_widget() self.current_index = 0 self.image_root = dir self.actionOpen_dir.setStatusTip("Image root: {}".format(self.image_root)) if self.label_root is None: self.label_root = dir self.actionSave_dir.setStatusTip("Label root: {}".format(self.label_root)) # load setting yaml if os.path.exists(os.path.join(dir, 'isat.yaml')): self.config_file = os.path.join(dir, 'isat.yaml') self.show_image(self.current_index) def save_dir(self): dir = QtWidgets.QFileDialog.getExistingDirectory(self) if not dir: return self.label_root = dir self.actionSave_dir.setStatusTip("Label root: {}".format(self.label_root)) # load setting yaml if os.path.exists(os.path.join(dir, 'isat.yaml')): self.config_file = os.path.join(dir, 'isat.yaml') self.reload_cfg() # 刷新图片 if self.current_index is not None: self.show_image(self.current_index) def save(self): print('save') print(self.rects) save_name = self.files_list[self.current_index].split('.')[0] + '.json' save_path = os.path.join(self.label_root, save_name) # 保存json文件 self.rects print(save_path) with open(save_path, 'w') as file: json.dump(self.rects, file) # 保存所有的矩形
# -*- coding: utf-8 -*- # @Author : LG class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setupUi(self) self.image_root: str = None self.label_root:str = None self.files_list: list = [] self.current_index = None self.current_file_index: int = None self.current_group = 1 self.config_file = CONFIG_FILE if os.path.exists(CONFIG_FILE) else DEFAULT_CONFIG_FILE self.saved = True self.can_be_annotated = True self.load_finished = False self.png_palette = None # 图像拥有调色盘,说明是单通道的标注png文件 self.instance_cmap = imgviz.label_colormap() # 标注目标 self.current_label:Annotation = None # 新增 手动/自动 group选择 self.group_select_mode = 'auto' # 所有labels self.rects = [] self.is_show_bitmap = False self.init_ui() self.init_connect() self.reset_action() def init_ui(self): #q self.files_dock_widget = FilesDockWidget(mainwindow=self) self.files_dock.setWidget(self.files_dock_widget) # 新增 group 选择 快捷键 self.next_group_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("Tab"), self) self.prev_group_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence("`"), self) self.next_group_shortcut.setContext(QtCore.Qt.ApplicationShortcut) self.prev_group_shortcut.setContext(QtCore.Qt.ApplicationShortcut) self.scene = AnnotationScene(mainwindow=self) self.view = AnnotationView(parent=self) self.view.setScene(self.scene) self.setCentralWidget(self.view) self.labelCoord = QtWidgets.QLabel('') self.labelCoord.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight) self.labelCoord.setFixedWidth(150) self.statusbar.addPermanentWidget(self.labelCoord) self.labelData = QtWidgets.QLabel('') self.labelData.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight) self.labelData.setFixedWidth(150) self.statusbar.addPermanentWidget(self.labelData) self.trans = QtCore.QTranslator() self.Converter_dialog = ConverterDialog(self, mainwindow=self) def set_saved_state(self, is_saved:bool): self.saved = is_saved if self.files_list is not None and self.current_index is not None: if is_saved: self.setWindowTitle(self.current_label.label_path) else: self.setWindowTitle('*{}'.format(self.current_label.label_path)) def open_dir(self): dir = QtWidgets.QFileDialog.getExistingDirectory(self) if not dir: return self.files_list.clear() self.files_dock_widget.listWidget.clear() files = [] suffixs = tuple(['{}'.format(fmt.data().decode('ascii').lower()) for fmt in QtGui.QImageReader.supportedImageFormats()]) for f in os.listdir(dir): if f.lower().endswith(suffixs): # f = os.path.join(dir, f) files.append(f) files = sorted(files) self.files_list = files self.files_dock_widget.update_widget() self.current_index = 0 self.image_root = dir self.actionOpen_dir.setStatusTip("Image root: {}".format(self.image_root)) if self.label_root is None: self.label_root = dir self.actionSave_dir.setStatusTip("Label root: {}".format(self.label_root)) # load setting yaml if os.path.exists(os.path.join(dir, 'isat.yaml')): self.config_file = os.path.join(dir, 'isat.yaml') self.show_image(self.current_index) def save_dir(self): dir = QtWidgets.QFileDialog.getExistingDirectory(self) if not dir: return self.label_root = dir self.actionSave_dir.setStatusTip("Label root: {}".format(self.label_root)) # load setting yaml if os.path.exists(os.path.join(dir, 'isat.yaml')): self.config_file = os.path.join(dir, 'isat.yaml') self.reload_cfg() # 刷新图片 if self.current_index is not None: self.show_image(self.current_index) def save(self): print('save') print(self.rects) save_name = self.files_list[self.current_index].split('.')[0] + '.json' save_path = os.path.join(self.label_root, save_name) # 保存json文件 self.rects print(save_path) with open(save_path, 'w') as file: json.dump(self.rects, file) # 保存所有的矩形
if self.scene.mode != STATUSMode.VIEW:
4
2023-12-24 16:19:16+00:00
16k
khabbazan/Mattermost-Subscriptions
helpers/channels_graphql_ws/subscription.py
[ { "identifier": "GraphqlWsConsumer", "path": "helpers/channels_graphql_ws/graphql_ws_consumer.py", "snippet": "class GraphqlWsConsumer(ch_websocket.AsyncJsonWebsocketConsumer):\n \"\"\"Channels consumer for the WebSocket GraphQL backend.\n\n NOTE: Each instance of this class maintains one WebSocket\n connection to a single client.\n\n This class implements the WebSocket-based GraphQL protocol used by\n `subscriptions-transport-ws` library (used by Apollo):\n https://github.com/apollographql/subscriptions-transport-ws/blob/master/PROTOCOL.md\n \"\"\"\n\n # ----------------------------------------------------------------- PUBLIC INTERFACE\n\n # Overwrite this in the subclass to specify the GraphQL schema which\n # processes GraphQL queries.\n schema: graphene.Schema\n\n # The interval to send keepalive messages to the clients (seconds).\n send_keepalive_every: Optional[float] = None\n\n # Set to `True` to process requests (i.e. GraphQL documents) from\n # a client in order of arrival, which is the same as sending order,\n # as guaranteed by the WebSocket protocol. This means that request\n # processing for this particular client becomes serial - in other\n # words, the server will not start processing another request\n # before it finishes the current one. Note that requests from\n # different clients (within different WebSocket connections)\n # are still processed asynchronously. Useful for tests.\n strict_ordering: bool = False\n\n # When set to `True` the server will send an empty data message in\n # response to the subscription. This is needed to let client know\n # when the subscription activates, so he can be sure he doesn't miss\n # any notifications. Disabled by default, cause this is an extension\n # to the original protocol and the client must be tuned accordingly.\n confirm_subscriptions: bool = False\n\n # The message sent to the client when subscription activation\n # confirmation is enabled.\n subscription_confirmation_message: Dict[str, Any] = {\"data\": None, \"errors\": None}\n\n # Issue a warning to the log when operation takes longer than\n # specified number in seconds. None disables the warning.\n warn_operation_timeout: Optional[float] = 1\n\n # The size of the subscription notification queue. If there are more\n # notifications (for a single subscription) than the given number,\n # then an oldest notification is dropped and a warning is logged.\n subscription_notification_queue_limit: int = 1024\n\n # GraphQL middleware.\n # Instance of `graphql.MiddlewareManager` or the list of functions\n # (callables) like the following:\n # ```python\n # async def my_middleware(next_middleware, root, info, *args, **kwds):\n # result = next_middleware(root, info, *args, **kwds)\n # if graphql.pyutils.is_awaitable(result):\n # result = await result\n # return result\n # ```\n # The first middleware in the middlewares list will be the closest\n # to the resolver in the middlewares call stack.\n # For more information read docs:\n # - https://docs.graphene-python.org/en/latest/execution/middleware/#middleware\n # - https://graphql-core-3.readthedocs.io/en/latest/diffs.html#custom-middleware\n # Docs about async middlewares are still missing - read the\n # GraphQL-core sources to know more.\n middleware: Optional[graphql.Middleware] = None\n\n async def on_connect(self, payload):\n \"\"\"Client connection handler.\n\n Called after CONNECTION_INIT message from client. Overwrite and\n raise an Exception to tell the server to reject the connection\n when it's necessary.\n\n Args:\n payload: Payload from CONNECTION_INIT message.\n \"\"\"\n del payload\n\n async def on_operation(self, op_id, payload):\n \"\"\"Process business logic before operation processing starts.\n\n Useful e.g. to check that user session is not yet expired.\n\n Throw `graphql.error.GraphQLError` to cancel the operation.\n\n Args:\n op_id: Operation id.\n payload: Payload of the operation.\n \"\"\"\n del op_id, payload\n\n # ------------------------------------------------------------------- IMPLEMENTATION\n\n # A prefix of Channel groups with subscription notifications.\n group_name_prefix: str = \"GQLWS\"\n\n # Structure that holds subscription information.\n @dataclasses.dataclass\n class _SubInf:\n \"\"\"Subscription information structure.\"\"\"\n\n # Subscription identifier - protocol operation identifier.\n sid: int\n # Subscription groups the subscription belongs to.\n groups: List[str]\n # A function which triggets subscription.\n enqueue_notification: Callable[[Any], None]\n # The callback to invoke when client unsubscribes.\n unsubscribed_callback: Callable[..., Awaitable[None]]\n\n def __init__(self, *args, **kwargs):\n \"\"\"Consumer constructor.\"\"\"\n\n assert self.schema is not None, \"An attribute 'schema' is not set! Subclasses must specify \" \"the schema which processes GraphQL subscription queries.\"\n\n # Registry of active (subscribed) subscriptions.\n self._subscriptions: Dict[int, GraphqlWsConsumer._SubInf] = {} # {'<sid>': '<SubInf>', ...}\n self._sids_by_group = {} # {'<grp>': ['<sid0>', '<sid1>', ...], ...}\n\n # Tasks which send notifications to clients indexed by an\n # operation/subscription id.\n self._notifier_tasks: Dict[int, asyncio.Task] = {}\n\n # Task that sends keepalive messages periodically.\n self._keepalive_task = None\n\n # Background tasks to clean it up when a client disconnects.\n # We use weak collection so finished task will be autoremoved.\n self._background_tasks: weakref.WeakSet = weakref.WeakSet()\n\n # Crafty weak collection with per-operation locks. It holds a\n # mapping from the operaion id (protocol message id) to the\n # `asyncio.Lock` used to serialize processing of start & stop\n # requests. Since the collection is weak, it automatically\n # throws away items when locks are garbage collected.\n self._operation_locks: weakref.WeakValueDictionary = weakref.WeakValueDictionary()\n\n # MiddlewareManager maintains internal cache for resolvers\n # wrapped with middlewares. Using the same manager for all\n # operations improves performance.\n self._middleware = None\n if self.middleware:\n self._middleware = self.middleware\n if not isinstance(self._middleware, graphql.MiddlewareManager):\n self._middleware = graphql.MiddlewareManager(*self._middleware)\n\n super().__init__(*args, **kwargs)\n\n # ---------------------------------------------------------- CONSUMER EVENT HANDLERS\n\n async def connect(self):\n \"\"\"Handle new WebSocket connection.\"\"\"\n\n # Check the subprotocol told by the client.\n #\n # NOTE: In Python 3.6 `scope[\"subprotocols\"]` was a string, but\n # starting with Python 3.7 it is a bytes. This can be a proper\n # change or just a bug in the Channels to be fixed. So let's\n # accept both variants until it becomes clear.\n assert GRAPHQL_WS_SUBPROTOCOL in ((sp.decode() if isinstance(sp, bytes) else sp) for sp in self.scope[\"subprotocols\"]), (\n f\"WebSocket client does not request for the subprotocol \" f\"{GRAPHQL_WS_SUBPROTOCOL}!\"\n )\n\n # Accept connection with the GraphQL-specific subprotocol.\n await self.accept(subprotocol=GRAPHQL_WS_SUBPROTOCOL)\n\n async def disconnect(self, code):\n \"\"\"Handle WebSocket disconnect.\n\n Remove itself from the Channels groups, clear triggers and stop\n sending keepalive messages.\n \"\"\"\n\n # Print debug or warning message depending on the value of the\n # connection close code. We consider all reserved codes (<999),\n # 1000 \"Normal Closure\", and 1001 \"Going Away\" as OK.\n # See: https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent\n if not code:\n LOG.warning(\"WebSocket connection closed without a code!\")\n elif code <= 1001:\n LOG.debug(\"WebSocket connection closed with code: %s.\", code)\n else:\n LOG.warning(\"WebSocket connection closed with code: %s!\", code)\n\n # The list of awaitables to simultaneously wait at the end.\n waitlist: List[asyncio.Task] = []\n\n # Unsubscribe from the Channels groups.\n waitlist += [asyncio.create_task(self._channel_layer.group_discard(group, self.channel_name)) for group in self._sids_by_group]\n\n # Cancel all currently running background tasks.\n for bg_task in self._background_tasks:\n bg_task.cancel()\n waitlist += list(self._background_tasks)\n\n # Stop sending keepalive messages (if enabled).\n if self._keepalive_task is not None:\n self._keepalive_task.cancel()\n waitlist += [self._keepalive_task]\n\n # Stop tasks which listen to GraphQL lib and send notifications.\n for notifier_task in self._notifier_tasks.values():\n notifier_task.cancel()\n waitlist += [notifier_task]\n\n # Wait for tasks to stop.\n if waitlist:\n await asyncio.wait(waitlist)\n\n self._background_tasks.clear()\n self._keepalive_task = None\n self._notifier_tasks.clear()\n self._operation_locks.clear()\n self._sids_by_group.clear()\n self._subscriptions.clear()\n\n async def receive_json(self, content): # pylint: disable=arguments-differ\n \"\"\"Process WebSocket message received from the client.\n\n NOTE: We force 'STOP' message processing to wait until 'START'\n with the same operation id finishes (if it is running). This\n protects us from race conditions which may happen when a client\n stops operation immediately after starting it. An illustrative\n example is a subscribe-unsubscribe pair. If we spawn processing\n of both messages concurrently we can deliver subscription\n confirmation after unsubscription confirmation.\n \"\"\"\n\n # Extract message type based on which we select how to proceed.\n msg_type = content[\"type\"].upper()\n\n if msg_type == \"CONNECTION_INIT\":\n task = self._on_gql_connection_init(payload=content[\"payload\"])\n\n elif msg_type == \"CONNECTION_TERMINATE\":\n task = self._on_gql_connection_terminate()\n\n elif msg_type == \"START\":\n op_id = content[\"id\"]\n\n # Create and lock a mutex for this particular operation id,\n # so STOP processing for the same operation id will wait\n # until START processing finishes. Locks are stored in a\n # weak collection so we do not have to manually clean it up.\n if op_id in self._operation_locks:\n raise graphql.error.GraphQLError(f\"Operation with msg_id={op_id} is already running!\")\n op_lock = asyncio.Lock()\n self._operation_locks[op_id] = op_lock\n await op_lock.acquire()\n\n async def on_start():\n try:\n # User hook which raises to cancel processing.\n await self.on_operation(op_id, payload=content[\"payload\"])\n # START message processing.\n await self._on_gql_start(op_id, payload=content[\"payload\"])\n except Exception as ex: # pylint: disable=broad-except\n await self._send_gql_error(op_id, ex)\n finally:\n op_lock.release()\n\n task = on_start()\n\n elif msg_type == \"STOP\":\n op_id = content[\"id\"]\n\n async def on_stop():\n # Wait until START message processing finishes, if any.\n async with self._operation_locks.setdefault(op_id, asyncio.Lock()):\n await self._on_gql_stop(op_id)\n\n task = on_stop()\n\n else:\n task = self._send_gql_error(\n content[\"id\"] if \"id\" in content else None,\n Exception(f\"Wrong message type '{msg_type}'!\"),\n )\n\n # If strict ordering is required then simply wait until the\n # message processing finishes. Otherwise spawn a task so\n # Channels may continue calling `receive_json` while requests\n # (i.e. GraphQL documents) are being processed.\n if self.strict_ordering:\n await task\n else:\n self._spawn_background_task(task)\n\n async def broadcast(self, message):\n \"\"\"The broadcast message handler.\n\n Method is called when new `broadcast` message (sent by\n `Subscription.broadcast`) received from the Channels group.\n\n \"\"\"\n # If strict ordering is required then simply wait until all the\n # broadcast messages are sent. Otherwise spawn a task so this\n # consumer will continue receiving messages.\n if self.strict_ordering:\n await self._process_broadcast(message)\n else:\n self._spawn_background_task(self._process_broadcast(message))\n\n async def _process_broadcast(self, message):\n \"\"\"Process the broadcast message.\n\n This triggers subscription notification to all the subscriptions\n belonging to the group received in the `message`.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task by the `broadcast` method (message handler).\n \"\"\"\n group = message[\"group\"]\n\n # Do nothing if group does not exist. It is quite possible for\n # a client and a backend to concurrently unsubscribe and send\n # notification. And these events do not need to be synchronized.\n if group not in self._sids_by_group:\n return\n\n payload = message[\"payload\"]\n\n # Put the payload to the notification queues of subscriptions\n # belonging to the subscription group. Drop the oldest payloads\n # if the `notification_queue` is full.\n for sid in self._sids_by_group[group]:\n subinf = self._subscriptions[sid]\n subinf.enqueue_notification(payload)\n\n async def unsubscribe(self, message):\n \"\"\"The unsubscribe message handler.\n\n Method is called when new `unsubscribe` message received from\n the Channels group. The message is typically sent by the method\n `Subscription.unsubscribe`. Here we figure out the group message\n received from and stop all the subscriptions in this group.\n \"\"\"\n group = message[\"group\"]\n\n # Do nothing if group does not exist. It is quite possible for\n # a client and a backend to unsubscribe from a subscription\n # concurrently. And these events do not need to be synchronized.\n if group not in self._sids_by_group:\n return\n\n # Send messages which look like user unsubscribes from all\n # subscriptions in the subscription group. This saves us from\n # thinking about raise condition between subscription and\n # unsubscription.\n if self._sids_by_group[group]:\n await asyncio.wait([asyncio.create_task(self.receive_json({\"type\": \"stop\", \"id\": sid})) for sid in self._sids_by_group[group]])\n\n # ---------------------------------------------------------- GRAPHQL PROTOCOL EVENTS\n\n async def _on_gql_connection_init(self, payload):\n \"\"\"Process the CONNECTION_INIT message.\n\n Start sending keepalive messages if `send_keepalive_every` set.\n Respond with either CONNECTION_ACK or CONNECTION_ERROR message.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task. See the `receive_json` handler.\n \"\"\"\n try:\n # Notify subclass a new client is connected.\n await self.on_connect(payload)\n except Exception as ex: # pylint: disable=broad-except\n await self._send_gql_connection_error(ex)\n # Close the connection. NOTE: We use the 4000 code because\n # there are two reasons: A) We can not use codes greater\n # than 1000 and less than 3000 because Daphne and Autobahn\n # do not allow this (see `sendClose` from\n # `autobahn/websocket/protocol.py` and\n # `daphne/ws_protocol.py`). B)\n # https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent\n # Mozilla offers codes 4000–4999 available for all apps.\n await self.close(code=4000)\n else:\n # Send CONNECTION_ACK message.\n await self._send_gql_connection_ack()\n # If keepalive enabled then send one message immediately and\n # schedule periodic messages.\n if self.send_keepalive_every is not None:\n send_keepalive_every = self.send_keepalive_every\n\n async def keepalive_sender():\n \"\"\"Send keepalive messages periodically.\"\"\"\n while True:\n await asyncio.sleep(send_keepalive_every)\n await self._send_gql_connection_keep_alive()\n\n self._keepalive_task = asyncio.create_task(keepalive_sender())\n # Immediately send keepalive message cause it is\n # required by the protocol description.\n await self._send_gql_connection_keep_alive()\n\n async def _on_gql_connection_terminate(self):\n \"\"\"Process the CONNECTION_TERMINATE message.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task. See the `receive_json` handler.\n \"\"\"\n\n # Close the connection.\n await self.close(code=1000)\n\n async def _on_gql_start(self, op_id, payload):\n \"\"\"Process the START message.\n\n Handle the message with query, mutation or subscription request.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task. See the `receive_json` handler.\n \"\"\"\n try:\n if op_id in self._subscriptions:\n message = f\"Subscription with msg_id={op_id} already exists!\"\n raise graphql.error.GraphQLError(message)\n\n # Get the message data.\n query = payload[\"query\"]\n op_name = payload.get(\"operationName\")\n variables = payload.get(\"variables\", {})\n\n # Prepare a context object.\n context = DictAsObject({})\n context.channels_scope = self.scope\n context.channel_name = self.channel_name\n context.graphql_operation_name = op_name\n context.graphql_operation_id = op_id\n\n # Process the request with Graphene and GraphQL-core.\n doc_ast, op_ast, errors = await self._on_gql_start__parse_query(op_name, query)\n if errors:\n await self._send_gql_data(op_id, None, errors)\n await self._send_gql_complete(op_id)\n return\n # Assert values are not None to suppress MyPy complains.\n assert doc_ast is not None\n assert op_ast is not None\n\n # If the operation is subscription.\n if op_ast.operation == graphql.language.ast.OperationType.SUBSCRIPTION:\n LOG.debug(\n \"Subscription request. Operation ID: %s, operation name: %s.)\",\n op_id,\n op_name,\n )\n\n # This returns asynchronous generator or ExecutionResult\n # instance in case of error.\n subscr_result = await self._on_gql_start__subscribe(\n doc_ast,\n operation_name=op_name,\n root_value=None,\n variable_values=variables,\n context_value=context,\n subscribe_field_resolver=functools.partial(\n self._on_gql_start__initialize_subscription_stream,\n op_id,\n op_name,\n ),\n middleware=self._middleware,\n )\n\n # When subscr_result is an AsyncGenerator, consume\n # stream of notifications and send them to clients.\n if not isinstance(subscr_result, graphql.ExecutionResult):\n stream = cast(AsyncIterator[graphql.ExecutionResult], subscr_result)\n # Send subscription activation message (if enabled)\n # NOTE: We do it before reading the the stream\n # stream to guarantee that no notifications are sent\n # before the subscription confirmation message.\n if self.confirm_subscriptions:\n await self._send_gql_data(\n op_id,\n data=self.subscription_confirmation_message[\"data\"],\n errors=self.subscription_confirmation_message[\"errors\"],\n )\n\n consumer_init_done = asyncio.Event()\n\n async def consume_stream():\n consumer_init_done.set()\n try:\n async for item in stream:\n # Skipped subscription event may have no\n # data and no errors. Send message only\n # when we have something to send.\n if item.data or item.errors:\n try:\n await self._send_gql_data(op_id, item.data, item.errors)\n except asyncio.CancelledError:\n break\n except Exception as ex: # pylint: disable=broad-except\n LOG.debug(\n \"Exception in the subscription GraphQL resolver!\" \"Operation %s(%s).\",\n op_name,\n op_id,\n exc_info=ex,\n )\n await self._send_gql_data(op_id, None, [ex])\n\n # We need to end this task when client drops\n # connection or unsubscribes, so lets store it.\n self._notifier_tasks[op_id] = asyncio.create_task(consume_stream())\n\n # We must be sure here that the subscription\n # initialization is finished and the stream consumer\n # is active before we exit this function. Because in\n # the outer scope we have locking mechanism of start\n # and stop operations. And we want to say\n # \"subscription operation is started\" only when it\n # actually is.\n # This allows us to avoid the race condition between\n # simultaneous subscribe and unsubscribe calls.\n await consumer_init_done.wait()\n return\n\n # Else (when gql_subscribe returns ExecutionResult\n # containing error) fallback to standard handling below.\n operation_result = cast(graphql.ExecutionResult, subscr_result)\n\n # If the operation is query or mutation.\n else:\n LOG.debug(\"New query/mutation. Operation %s(%s).\", op_name, op_id)\n\n if self.warn_operation_timeout is not None:\n start_time = time.perf_counter()\n\n # Standard name for \"IntrospectionQuery\". We might also\n # check that\n # `doc_ast.definitions[0].selection_set.selections[0].name.value`\n # equals to `__schema`. This is a more robust way. But\n # it will eat up more CPU pre each query. For now lets\n # check only a query name.\n middleware_manager = self._middleware\n if op_name == \"IntrospectionQuery\":\n # No need to call middlewares for the\n # IntrospectionQuery. There no real resolvers. Only\n # the type information.\n middleware_manager = None\n exec_result = graphql.execution.execute(\n self.schema.graphql_schema,\n document=doc_ast,\n root_value=None,\n operation_name=op_name,\n variable_values=variables,\n context_value=context,\n middleware=middleware_manager,\n )\n if inspect.isawaitable(exec_result):\n exec_result = await exec_result\n operation_result = cast(graphql.ExecutionResult, exec_result)\n\n if self.warn_operation_timeout is not None:\n duration = time.perf_counter() - start_time\n if duration >= self.warn_operation_timeout:\n LOG.warning(\n \"Operation %s(%s) took %.6f seconds. Debug\" \" log contains full operation details.\",\n op_name,\n op_id,\n duration,\n )\n LOG.debug(\n \"Operation %s(%s) took %.6f seconds. Query:\" \" %r, variables: %r.\",\n op_name,\n op_id,\n duration,\n query,\n variables,\n )\n # Respond to a query or mutation immediately.\n await self._send_gql_data(op_id, operation_result.data, operation_result.errors)\n await self._send_gql_complete(op_id)\n\n except Exception as ex: # pylint: disable=broad-except\n if isinstance(ex, graphql.error.GraphQLError):\n # Respond with details of GraphQL execution error.\n LOG.warning(\"GraphQL error! Operation %s(%s).\", op_name, op_id, exc_info=True)\n await self._send_gql_data(op_id, None, [ex])\n await self._send_gql_complete(op_id)\n else:\n # Respond with general error responce.\n await self._send_gql_error(op_id, ex)\n\n async def _on_gql_start__parse_query(\n self, op_name: str, query: str\n ) -> Tuple[Optional[graphql.DocumentNode], Optional[graphql.OperationDefinitionNode], Optional[Iterable[graphql.GraphQLError]],]:\n \"\"\"Parse and validate GraphQL query.\n\n It is highly likely that the same operation will be parsed many\n times, so this function is wrapped with LRU cache.\n\n This async function offloads the GraphQL processing to the\n worker thread cause according to our experiments even GraphQL\n document parsing and validation take a while and depends approx.\n linearly on the size of the selection set.\n\n This is a part of START message processing routine so the name\n prefixed with `_on_gql_start__` to make this explicit.\n\n Returns:\n Tuple with three optional fields:\n 0: AST of parsed GraphQL document.\n 1: GraphQL operation definition.\n 2: Sequence of errors.\n \"\"\"\n\n res = await channels.db.database_sync_to_async(self._on_gql_start__parse_query_sync_cached, thread_sensitive=False)(op_name, query)\n\n doc_ast: Optional[graphql.DocumentNode] = res[0]\n op_ast: Optional[graphql.OperationDefinitionNode] = res[1]\n errors: Optional[Iterable[graphql.GraphQLError]] = res[2]\n\n return (doc_ast, op_ast, errors)\n\n @functools.lru_cache(maxsize=128)\n def _on_gql_start__parse_query_sync_cached(\n self, op_name: str, query: str\n ) -> Tuple[Optional[graphql.DocumentNode], Optional[graphql.OperationDefinitionNode], Optional[Iterable[graphql.GraphQLError]],]:\n \"\"\"Parse and validate GraphQL query. Cached sync implementation.\n\n This is a part of START message processing routine so the name\n prefixed with `_on_gql_start__` to make this explicit.\n \"\"\"\n\n # Parsing.\n try:\n doc_ast = graphql.parse(query)\n except graphql.GraphQLError as ex:\n return None, None, [ex]\n\n # Validation.\n validation_errors: List[graphql.GraphQLError] = graphql.validate(self.schema.graphql_schema, doc_ast)\n if validation_errors:\n return None, None, validation_errors\n\n op_ast = graphql.utilities.get_operation_ast(doc_ast, op_name)\n\n return doc_ast, op_ast, None\n\n async def _on_gql_start__subscribe(\n self,\n document: graphql.DocumentNode,\n root_value: Any = None,\n context_value: Any = None,\n variable_values: Optional[Dict[str, Any]] = None,\n operation_name: Optional[str] = None,\n field_resolver: Optional[graphql.GraphQLFieldResolver] = None,\n subscribe_field_resolver: Optional[graphql.GraphQLFieldResolver] = None,\n middleware: graphql.Middleware = None,\n execution_context_class: Optional[Type[graphql.ExecutionContext]] = None,\n ) -> Union[AsyncIterator[graphql.ExecutionResult], graphql.ExecutionResult]:\n \"\"\"Create a GraphQL subscription.\n\n This is a copy of `graphql.execution.subscribe.subscribe` from\n the GraphQL-core library v3.2.3 improved to support middlewares\n and user defined execution_context_class.\n\n This is a part of START message processing routine so the name\n prefixed with `_on_gql_start__` to make this explicit.\n \"\"\"\n\n result_or_stream = await graphql.create_source_event_stream(\n self.schema.graphql_schema,\n document,\n root_value,\n context_value,\n variable_values,\n operation_name,\n subscribe_field_resolver,\n )\n if isinstance(result_or_stream, graphql.ExecutionResult):\n return result_or_stream\n\n async def map_source_to_response(payload: Any) -> graphql.ExecutionResult:\n \"\"\"Map source to response.\n\n For each payload yielded from a subscription, map it over\n the normal GraphQL :func:`~graphql.execute` function, with\n `payload` as the `root_value`. This implements the\n \"MapSourceToResponseEvent\" algorithm described in the\n GraphQL specification. The :func:`~graphql.execute` function\n provides the \"ExecuteSubscriptionEvent\" algorithm, as it is\n nearly identical to the \"ExecuteQuery\" algorithm, for which\n :func:`~graphql.execute` is also used.\n \"\"\"\n result = graphql.execute(\n self.schema.graphql_schema,\n document,\n payload,\n context_value,\n variable_values,\n operation_name,\n field_resolver,\n middleware=middleware,\n execution_context_class=execution_context_class,\n ) # type: ignore\n result = await result if inspect.isawaitable(result) else result\n result = cast(graphql.ExecutionResult, result)\n # Skip notification if subscription returned `None`.\n if not result.errors and result.data:\n for key in list(result.data.keys()):\n if result.data[key] is None:\n result.data.pop(key)\n return result\n\n # Map every source value to a ExecutionResult value.\n return graphql.MapAsyncIterator(result_or_stream, map_source_to_response)\n\n async def _on_gql_start__initialize_subscription_stream(\n self,\n operation_id: int,\n operation_name: str,\n root: Any,\n info: graphql.GraphQLResolveInfo,\n *args,\n **kwds,\n ):\n \"\"\"Create asynchronous generator with subscription events.\n\n Called inside `_on_gql_start__subscribe` function by\n graphql-core as `subscribe_field_resolver` argument.\n\n This is a part of START message processing routine so the name\n prefixed with `_on_gql_start__` to make this explicit.\n \"\"\"\n # Graphene stores original subscription class in `graphene_type`\n # field of `return_type` object. Since subscriptions are build\n # on top of `graphene` we always have graphene specific\n # `return_type` class.\n return_type = info.return_type\n while graphql.is_wrapping_type(return_type):\n return_type = return_type.of_type # type: ignore[union-attr]\n subscription_class = return_type.graphene_type # type: ignore[union-attr]\n\n # It is ok to access private fields of `Subscription`\n # implementation. `Subscription` class used to create\n # subscriptions as graphene object but actually it is a part of\n # consumer implementation.\n # pylint: disable=protected-access\n\n # Attach current subscription to the group corresponding to\n # the concrete class. This allows to trigger all the\n # subscriptions of the current type, by invoking `publish`\n # without setting the `group` argument.\n groups = [subscription_class._group_name()]\n\n # Invoke the subclass-specified `subscribe` method to get\n # the groups subscription must be attached to.\n if subscription_class._meta.subscribe is not None:\n subclass_groups = subscription_class._meta.subscribe(root, info, *args, **kwds)\n # Properly handle `async def subscribe`.\n if asyncio.iscoroutinefunction(subscription_class._meta.subscribe):\n subclass_groups = await subclass_groups\n assert subclass_groups is None or isinstance(subclass_groups, (list, tuple)), (\n f\"Method 'subscribe' returned a value of an incorrect type\" f\" {type(subclass_groups)}! A list, a tuple, or 'None' expected.\"\n )\n subclass_groups = subclass_groups or []\n else:\n subclass_groups = []\n\n groups += [subscription_class._group_name(group) for group in subclass_groups]\n\n # The subscription notification queue. Required to preserve the\n # order of notifications within a single subscription.\n queue_size = subscription_class.notification_queue_limit\n if queue_size is None or queue_size <= 0:\n # Take default limit from the Consumer class.\n queue_size = self.subscription_notification_queue_limit\n # The subscription notification queue.\n # NOTE: The asyncio.Queue class is not thread-safe. So use the\n # `notification_queue_lock` as a guard while reading or writing\n # to the queue.\n notification_queue: asyncio.Queue = asyncio.Queue(maxsize=queue_size)\n # Lock to ensure that `notification_queue` operations are\n # thread safe.\n notification_queue_lock = threading.RLock()\n\n unsubscribed = subscription_class._meta.unsubscribed\n\n async def unsubscribed_callback():\n \"\"\"Call `unsubscribed` notification.\n\n The `cls._meta.unsubscribed` might do blocking operations,\n so offload it to the thread.\n \"\"\"\n\n if unsubscribed is None:\n return None\n result = unsubscribed(None, info, *args, **kwds)\n # Properly handle `async def unsubscribed`.\n if inspect.isawaitable(result):\n result = await result\n\n def enqueue_notification(payload):\n \"\"\"Put notification to the queue.\n\n Called by the WebSocket consumer (instance of the\n GraphqlWsConsumer subclass) when it receives the broadcast\n message (from the Channels group) sent by the\n Subscription.broadcast.\n\n Args:\n sid: Operation id of the subscription.\n \"\"\"\n while True:\n with notification_queue_lock:\n try:\n notification_queue.put_nowait(payload)\n break # The item was enqueued. Exit the loop.\n except asyncio.QueueFull:\n # The queue is full - issue a warning and throw\n # away the oldest item from the queue.\n # NOTE: Queue with the size 1 means that it is\n # safe to drop intermediate notifications.\n if notification_queue.maxsize != 1:\n LOG.warning(\n \"Subscription notification dropped! Operation %s(%s).\",\n operation_name,\n operation_id,\n )\n notification_queue.get_nowait()\n notification_queue.task_done()\n\n # Try to put the incoming item to the queue\n # within the same lock. This is an speed\n # optimization.\n try:\n notification_queue.put_nowait(payload)\n # The item was enqueued. Exit the loop.\n break\n except asyncio.QueueFull:\n # Kind'a impossible to get here, but if we\n # do, then we should retry until the queue\n # have capacity to process item.\n pass\n\n waitlist = []\n for group in groups:\n self._sids_by_group.setdefault(group, []).append(operation_id)\n waitlist.append(asyncio.create_task(self._channel_layer.group_add(group, self.channel_name)))\n self._subscriptions[operation_id] = self._SubInf(\n groups=groups,\n sid=operation_id,\n unsubscribed_callback=unsubscribed_callback,\n enqueue_notification=enqueue_notification,\n )\n if waitlist:\n await asyncio.wait(waitlist)\n\n _deserialize = channels.db.database_sync_to_async(Serializer.deserialize, thread_sensitive=False)\n\n # For each notification (event) yielded from this function the\n # `_on_gql_start__subscribe` function will call subscription\n # resolver (`publish`) via `graphql.execute` method.\n while True:\n with notification_queue_lock:\n payload = await notification_queue.get()\n data = await _deserialize(payload)\n yield data\n with notification_queue_lock:\n notification_queue.task_done()\n\n async def _on_gql_stop(self, op_id):\n \"\"\"Process the STOP message.\n\n Handle an unsubscribe request.\n\n NOTE: Depending on the value of the `strict_ordering` setting\n this method is either awaited directly or offloaded to an async\n task. See the `receive_json` handler.\n \"\"\"\n LOG.debug(\"Stop handling or unsubscribe operation %s.\", op_id)\n\n # Currently only subscriptions can be stopped. But we see but\n # some clients (e.g. GraphiQL) send the stop message even for\n # queries and mutations. We also see that the Apollo server\n # ignores such messages, so we ignore them as well.\n if op_id not in self._subscriptions:\n return\n\n waitlist: List[asyncio.Task] = []\n\n # Remove the subscription from the registry.\n subinf = self._subscriptions.pop(op_id)\n\n # Cancel the task which watches the notification queue.\n consumer_task = self._notifier_tasks.pop(op_id, None)\n if consumer_task:\n consumer_task.cancel()\n waitlist.append(consumer_task)\n\n # Stop listening for corresponding groups.\n for group in subinf.groups:\n # Remove the subscription from groups it belongs to. Remove\n # the group itself from the `_sids_by_group` if there are no\n # subscriptions left in it.\n assert self._sids_by_group[group].count(op_id) == 1, (\n f\"Registry is inconsistent: group '{group}' has \" f\"{self._sids_by_group[group].count(op_id)} \" \"occurrences of op_id={op_id}!\"\n )\n self._sids_by_group[group].remove(op_id)\n if not self._sids_by_group[group]:\n del self._sids_by_group[group]\n waitlist.append(asyncio.create_task(self._channel_layer.group_discard(group, self.channel_name)))\n\n if waitlist:\n await asyncio.wait(waitlist)\n\n await subinf.unsubscribed_callback()\n\n # Send the unsubscription confirmation message.\n await self._send_gql_complete(op_id)\n\n # -------------------------------------------------------- GRAPHQL PROTOCOL MESSAGES\n\n async def _send_gql_connection_ack(self):\n \"\"\"Sent in reply to the `connection_init` request.\"\"\"\n await self.send_json({\"type\": \"connection_ack\"})\n\n async def _send_gql_connection_error(self, error: Exception):\n \"\"\"Connection error sent in reply to the `connection_init`.\"\"\"\n LOG.warning(\"GraphQL connection error: %s!\", error, exc_info=error)\n await self.send_json({\"type\": \"connection_error\", \"payload\": self._format_error(error)})\n\n async def _send_gql_data(self, op_id, data: Optional[dict], errors: Optional[Iterable[Exception]]):\n \"\"\"Send GraphQL `data` message to the client.\n\n Args:\n data: Dict with GraphQL query response.\n errors: List of exceptions occurred during processing the\n GraphQL query. (Errors happened in resolvers.)\n \"\"\"\n # Log errors with tracebacks so we can understand what happened\n # in a failed resolver.\n for ex in errors or []:\n # Typical exception here is `GraphQLLocatedError` which has\n # reference to the original error raised from a resolver.\n tb = ex.__traceback__\n LOG.warning(\n \"GraphQL resolver failed! Operation id: %s:\\n%s\",\n op_id,\n \"\".join(traceback.format_exception(type(ex), ex, tb)).strip(),\n )\n\n await self.send_json(\n {\n \"type\": \"data\",\n \"id\": op_id,\n \"payload\": {\n \"data\": data,\n **({\"errors\": [self._format_error(e) for e in errors]} if errors else {}), # type: ignore\n },\n }\n )\n\n async def _send_gql_error(self, op_id, error: Exception):\n \"\"\"Tell client there is a query processing error.\n\n Server sends this message upon a failing operation.\n It can be an unexpected or unexplained GraphQL execution error\n or a bug in the code. It is unlikely that this is GraphQL\n validation errors (such errors are part of data message and\n must be sent by the `_send_gql_data` method).\n\n Args:\n op_id: Id of the operation that failed on the server.\n error: String with the information about the error.\n\n \"\"\"\n LOG.warning(\"Operation %s processing error: %s!\", op_id, error, exc_info=error)\n formatted_error = self._format_error(error)\n await self.send_json(\n {\n \"type\": \"error\",\n \"id\": op_id,\n \"payload\": {\"errors\": [formatted_error]},\n }\n )\n\n async def _send_gql_complete(self, op_id):\n \"\"\"Send GraphQL `complete` message to the client.\n\n Args:\n op_id: Id of the corresponding operation.\n\n \"\"\"\n await self.send_json({\"type\": \"complete\", \"id\": op_id})\n\n async def _send_gql_connection_keep_alive(self):\n \"\"\"Send the keepalive (ping) message.\"\"\"\n await self.send_json({\"type\": \"ka\"})\n\n # ---------------------------------------------------------------------- AUXILIARIES\n\n @staticmethod\n def _format_error(error: Exception) -> graphql.GraphQLFormattedError:\n \"\"\"Format given exception `error` to send over a network.\n\n This function will add the \"extensions.code\" field containing an\n exception class name. A frontend may use this value to handle\n errors properly.\n\n If your backend throws an Exception, then an error will be formatted\n for a client like this:\n {\n \"id\": \"NNN\",\n \"type\": \"data\",\n \"payload\": {\n \"data\": {...},\n \"errors\": [{\n \"message\": \"Test error\",\n \"locations\": [{\"line\": NNN, \"column\": NNN}],\n \"path\": [\"somepath\"],\n \"extensions\": {\"code\": \"Exception\"}\n }]\n }\n }\n\n If you define custom exception class (`class\n CustomErr(Exception)`), then the error code in the \"extensions\"\n field will equals to the \"CustomErr\":\n \"extensions\": {\"code\": \"Exception\"}\n\n There is a special case of errors on connection. They behave\n using same logic: in the \"code\" field there will be an\n exception class name:\n {\n \"payload\": {\n \"message\": \"message from a exception\",\n \"extensions\": {\"code\": \"UserUnauthenticatedError\"}\n },\n \"type\": \"connection_error\"\n }\n\n NOTE: If you need to add more fields to the error, then override\n this function in a subclass. Another way to enrich errors is to\n use a GraphQLError based classes for your exceptions.\n \"\"\"\n if isinstance(error, graphql.error.GraphQLError):\n if error.extensions and \"code\" not in error.extensions:\n if error.original_error:\n error.extensions[\"code\"] = type(error.original_error).__name__\n return error.formatted\n\n # Usually the GraphQL-core library wraps any exception with\n # GraphQLError. So this code should be unreachable, unless there\n # are some bugs in the library.\n return {\n \"message\": f\"{type(error).__name__}: {str(error)}\",\n \"extensions\": {\"code\": type(error).__name__},\n }\n\n def _spawn_background_task(self, awaitable):\n \"\"\"Spawn background task.\n\n Tasks are canceled and awaited when a client disconnects.\n Args:\n awaitable: An awaitable to run in a task.\n Returns:\n A started `asyncio.Task` instance.\n\n \"\"\"\n background_task = asyncio.create_task(awaitable)\n self._background_tasks.add(background_task)\n return background_task\n\n @property\n def _channel_layer(self):\n \"\"\"Channel layer.\"\"\"\n # We cannot simply check existence of channel layer in the\n # consumer constructor, so we added this property.\n assert self.channel_layer is not None, \"Channel layer is not configured!\"\n return self.channel_layer" }, { "identifier": "Serializer", "path": "helpers/channels_graphql_ws/serializer.py", "snippet": "class Serializer:\n \"\"\"Serialize/deserialize Python collection with Django models.\n\n Serialize/deserialize the data with the MessagePack like Redis\n Channels layer backend does.\n\n If `data` contains Django models, then it is serialized by the\n Django serialization utilities. For details see:\n Django serialization:\n https://docs.djangoproject.com/en/dev/topics/serialization/\n MessagePack:\n https://github.com/msgpack/msgpack-python\n \"\"\"\n\n @staticmethod\n def serialize(data):\n \"\"\"Serialize the `data`.\"\"\"\n\n def encode_extra_types(obj):\n \"\"\"MessagePack hook to serialize extra types.\n\n The recipe took from the MessagePack for Python docs:\n https://github.com/msgpack/msgpack-python#packingunpacking-of-custom-data-type\n\n Supported types:\n - Django models (through `django.core.serializers`).\n - Python `datetime` types:\n - `datetime.datetime`\n - `datetime.date`\n - `datetime.time`\n\n \"\"\"\n if isinstance(obj, django.db.models.Model):\n return {\n \"__djangomodel__\": True,\n \"as_str\": django.core.serializers.serialize(\"json\", [obj]),\n }\n if isinstance(obj, datetime.datetime):\n return {\"__datetime__\": True, \"as_str\": obj.isoformat()}\n if isinstance(obj, datetime.date):\n return {\"__date__\": True, \"as_str\": obj.isoformat()}\n if isinstance(obj, datetime.time):\n return {\"__time__\": True, \"as_str\": obj.isoformat()}\n return obj\n\n return msgpack.packb(data, default=encode_extra_types, use_bin_type=True)\n\n @staticmethod\n def deserialize(data):\n \"\"\"Deserialize the `data`.\"\"\"\n\n def decode_extra_types(obj):\n \"\"\"MessagePack hook to deserialize extra types.\"\"\"\n if \"__djangomodel__\" in obj:\n obj = next(django.core.serializers.deserialize(\"json\", obj[\"as_str\"])).object\n elif \"__datetime__\" in obj:\n obj = datetime.datetime.fromisoformat(obj[\"as_str\"])\n elif \"__date__\" in obj:\n obj = datetime.date.fromisoformat(obj[\"as_str\"])\n elif \"__time__\" in obj:\n obj = datetime.time.fromisoformat(obj[\"as_str\"])\n return obj\n\n return msgpack.unpackb(data, object_hook=decode_extra_types, raw=False)" } ]
import asyncio import collections import hashlib import logging import asgiref.sync import channels.db import channels.layers import graphene import graphene.types.objecttype import graphene.types.utils import graphene.utils.get_unbound_function import graphene.utils.props from typing import Optional from .graphql_ws_consumer import GraphqlWsConsumer from .serializer import Serializer
13,467
return cls.unsubscribe_sync(group=group) @classmethod async def unsubscribe_async(cls, *, group=None): """Unsubscribe, asynchronous version.""" # Send the 'unsubscribe' message to the Channels group. group = cls._group_name(group) await cls._channel_layer().group_send(group=group, message={"type": "unsubscribe", "group": group}) @classmethod def unsubscribe_sync(cls, *, group=None): """Unsubscribe, synchronous version.""" # Send the message to the Channels group. group = cls._group_name(group) sync_channel_layer_group_send = asgiref.sync.async_to_sync(cls._channel_layer().group_send) sync_channel_layer_group_send( group=group, message={ "type": "unsubscribe", "group": group, }, ) @classmethod def Field(cls, name=None, description=None, deprecation_reason=None, required=False): # noqa """Represent subscription as a field to mount it to the schema. Typical usage: class Subscription(graphene.ObjectType): on_new_chat_message = OnNewChatMessage.Field() """ return graphene.Field( cls._meta.output, args=cls._meta.arguments, resolver=cls._meta.publish, name=name, description=description, deprecation_reason=deprecation_reason, required=required, ) # ------------------------------------------------------------------- IMPLEMENTATION @classmethod def __init_subclass_with_meta__( cls, subscribe=None, publish=None, unsubscribed=None, output=None, arguments=None, _meta=None, **options, ): # pylint: disable=arguments-renamed """Prepare subscription on subclass creation. This method is invoked by the superclass `__init__subclass__`. It is needed to process class fields, `Meta` and inheritance parameters. This is genuine Graphene approach inherited/cloned from the original Mutation class implementation. """ if not _meta: _meta = SubscriptionOptions(cls) output = output or getattr(cls, "Output", None) # Collect fields if output class is not explicitly defined. fields: dict = {} if not output: fields = collections.OrderedDict() for base in reversed(cls.__mro__): fields.update(graphene.types.utils.yank_fields_from_attrs(base.__dict__, _as=graphene.Field)) output = cls if not arguments: input_class = getattr(cls, "Arguments", None) if input_class: arguments = graphene.utils.props.props(input_class) else: arguments = {} # Get `publish`, `subscribe`, and `unsubscribe` handlers. subscribe = subscribe or getattr(cls, "subscribe", None) publish = publish or getattr(cls, "publish", None) unsubscribed = unsubscribed or getattr(cls, "unsubscribed", None) assert publish is not None, ( f"Subscription '{cls.__qualname__}' does not define a" " method 'publish'! All subscriptions must define" " 'publish' which processes GraphQL queries!" ) if _meta.fields: _meta.fields.update(fields) else: _meta.fields = fields # Auxiliary alias. graphene_get_function = graphene.utils.get_unbound_function.get_unbound_function # pylint: disable=attribute-defined-outside-init _meta.arguments = arguments _meta.output = output _meta.publish = graphene_get_function(publish) _meta.subscribe = graphene_get_function(subscribe) _meta.unsubscribed = graphene_get_function(unsubscribed) super().__init_subclass_with_meta__(_meta=_meta, **options) @classmethod def _group_name(cls, group=None): """Group name based on the name of the subscription class.""" suffix = f"{cls.__module__}.{cls.__qualname__}" if group is not None: suffix += "-" + group # Wrap the suffix into SHA256 to guarantee that the length of # the group name is limited. Otherwise Channels will complain # about that the group name is wrong (actually is too long). suffix_sha256 = hashlib.sha256() suffix_sha256.update(suffix.encode("utf-8"))
# Copyright (C) DATADVANCE, 2010-2023 # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """Graphene-like subscription class. The `Subscription` class itself is a "creative" copy of `Mutation` class from the Graphene (`graphene/types/mutation.py`). """ # Module logger. LOG = logging.getLogger(__name__) class Subscription(graphene.ObjectType): """Subscription type definition. Subclass this the Subscription class to define a GraphQL subscription. The class works with the `GraphqlWsConsumer` which maintains a WebSocket connection with the client. The subclass specifies the following methods. You can define each of them as a `@classmethod`, as a `@staticmethod`, or even as a regular method (like Graphene typically does). It shall work fine either way. NOTE, if you define the method as a regular method (not a classmethod or a staticmethod) you will receive the first argument (`payload`/`root`) into the `self` argument. [async] publish(payload, info, *args, **kwds): This method invoked each time subscription "triggers". Raising an exception here will lead to sending the notification with the error. Technically the WebSocket message will contain extra field "extensions.code" holding the classname of the exception raised. To suppress the notification return `None`. Can be implemented as both asynchronous (`async def`) or synchronous (`def`) function. Asynchronous implementation runs blazingly fast in the main event loop of the main thread. You must be careful with blocking calls though. You can offload blocking operations to a thread in such cases. Synchronous implementation always runs in a worker thread which comes with a price of extra overhead. Required. Args: payload: The `payload` from the `broadcast` invocation. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. Returns: The same that any Graphene resolver returns. [async] subscribe(root, info, *args, **kwds): Called when client subscribes. Define this to do some extra work when client subscribes and to group subscriptions into different subscription groups. Method signature is the same as in other GraphQL "resolver" methods but it may return the subscription groups names to put the subscription into. Can be implemented as both asynchronous (`async def`) or synchronous (`def`) function. Asynchronous implementation runs blazingly fast in the main event loop of the main thread. You must be careful with blocking calls though. You can offload blocking operations to a thread in such cases. Synchronous implementation always runs in a worker thread which comes with a price of extra overhead. Optional. Args: root: Root resolver object. Typically `None`. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. Returns: The list or tuple of subscription group names this subscription instance belongs to. Later the subscription will trigger on publishes to any of that groups. If method returns None (default behavior) then the subscription is only put to the default group (the one which corresponds to the `Subscription` subclass). [async] unsubscribed(root, info, *args, **kwds): Called when client unsubscribes. Define this to be notified when client unsubscribes. Can be implemented as both asynchronous (`async def`) or synchronous (`def`) function. Asynchronous implementation runs blazingly fast in the main event loop of the main thread. You must be careful with blocking calls though. You can offload blocking operations to a thread in such cases. Synchronous implementation always runs in a worker thread which comes with a price of extra overhead. Args: root: Always `None`. info: The value of `info.context` is a Channels websocket context with all the connection information. args, kwds: Values of the GraphQL subscription inputs. The methods enlisted above receives "standard" set of GraphQL resolver arguments. The `info` field has `context` which can be used to transmit some useful payload between these methods. For example if `subscribe` sets `info.context.zen=42` then `publish` will have access to this value as `info.context.zen`. Static methods of subscription subclass: broadcast(): Call this to notify all subscriptions in the group. unsubscribe(): Call this to stop all subscriptions in the group. NOTE: If you call any of these methods from the asynchronous context then `await` the result of the call. """ # ----------------------------------------------------------------------- PUBLIC API # Subscription notifications queue limit. Set this to control the # amount of notifications server keeps in the queue when # notifications come faster than server processes them. Setting this # to 1 drops all notifications in the queue except the latest one. # Useful to skip intermediate notifications, e.g. progress reports. notification_queue_limit: Optional[int] = None @classmethod def broadcast(cls, *, group=None, payload=None): """Call this method to notify all subscriptions in the group. Can be called from both synchronous and asynchronous contexts. It is necessary to `await` if called from the async context. Args: group: Name of the subscription group which members must be notified. `None` means that all the subscriptions of type will be triggered. payload: The payload delivered to the `publish` handler. NOTE: The `payload` is serialized before sending to the subscription group. """ try: event_loop = asyncio.get_event_loop() except RuntimeError: pass else: if event_loop.is_running(): return event_loop.create_task(cls.broadcast_async(group=group, payload=payload)) return cls.broadcast_sync(group=group, payload=payload) @classmethod async def broadcast_async(cls, *, group=None, payload=None): """Broadcast, asynchronous version.""" # Manually serialize the `payload` to allow transfer of Django # models inside `payload`, auto serialization does not do this. serialized_payload = await channels.db.database_sync_to_async(Serializer.serialize, thread_sensitive=False)(payload) # Send the message to the Channels group. group = cls._group_name(group) group_send = cls._channel_layer().group_send # Will result in a call of `GraphqlWsConsumer.broadcast`. await group_send( group=group, message={ "type": "broadcast", "group": group, "payload": serialized_payload, }, ) @classmethod def broadcast_sync(cls, *, group=None, payload=None): """Broadcast, synchronous version.""" # Manually serialize the `payload` to allow transfer of Django # models inside the `payload`. serialized_payload = Serializer.serialize(payload) group = cls._group_name(group) sync_channel_layer_group_send = asgiref.sync.async_to_sync(cls._channel_layer().group_send) # Will result in a call of `GraphqlWsConsumer.broadcast`. sync_channel_layer_group_send( group=group, message={ "type": "broadcast", "group": group, "payload": serialized_payload, }, ) @classmethod def unsubscribe(cls, *, group=None): """Call this method to stop all subscriptions in the group. This method can be called from both synchronous and asynchronous contexts. If you call it from the asynchronous context then you have to `await`. Args: group: Name of the subscription group which members must be unsubscribed. `None` means that all the client of the subscription will be unsubscribed. """ try: event_loop = asyncio.get_event_loop() except RuntimeError: pass else: if event_loop.is_running(): return asyncio.create_task(cls.unsubscribe_async(group=group)) return cls.unsubscribe_sync(group=group) @classmethod async def unsubscribe_async(cls, *, group=None): """Unsubscribe, asynchronous version.""" # Send the 'unsubscribe' message to the Channels group. group = cls._group_name(group) await cls._channel_layer().group_send(group=group, message={"type": "unsubscribe", "group": group}) @classmethod def unsubscribe_sync(cls, *, group=None): """Unsubscribe, synchronous version.""" # Send the message to the Channels group. group = cls._group_name(group) sync_channel_layer_group_send = asgiref.sync.async_to_sync(cls._channel_layer().group_send) sync_channel_layer_group_send( group=group, message={ "type": "unsubscribe", "group": group, }, ) @classmethod def Field(cls, name=None, description=None, deprecation_reason=None, required=False): # noqa """Represent subscription as a field to mount it to the schema. Typical usage: class Subscription(graphene.ObjectType): on_new_chat_message = OnNewChatMessage.Field() """ return graphene.Field( cls._meta.output, args=cls._meta.arguments, resolver=cls._meta.publish, name=name, description=description, deprecation_reason=deprecation_reason, required=required, ) # ------------------------------------------------------------------- IMPLEMENTATION @classmethod def __init_subclass_with_meta__( cls, subscribe=None, publish=None, unsubscribed=None, output=None, arguments=None, _meta=None, **options, ): # pylint: disable=arguments-renamed """Prepare subscription on subclass creation. This method is invoked by the superclass `__init__subclass__`. It is needed to process class fields, `Meta` and inheritance parameters. This is genuine Graphene approach inherited/cloned from the original Mutation class implementation. """ if not _meta: _meta = SubscriptionOptions(cls) output = output or getattr(cls, "Output", None) # Collect fields if output class is not explicitly defined. fields: dict = {} if not output: fields = collections.OrderedDict() for base in reversed(cls.__mro__): fields.update(graphene.types.utils.yank_fields_from_attrs(base.__dict__, _as=graphene.Field)) output = cls if not arguments: input_class = getattr(cls, "Arguments", None) if input_class: arguments = graphene.utils.props.props(input_class) else: arguments = {} # Get `publish`, `subscribe`, and `unsubscribe` handlers. subscribe = subscribe or getattr(cls, "subscribe", None) publish = publish or getattr(cls, "publish", None) unsubscribed = unsubscribed or getattr(cls, "unsubscribed", None) assert publish is not None, ( f"Subscription '{cls.__qualname__}' does not define a" " method 'publish'! All subscriptions must define" " 'publish' which processes GraphQL queries!" ) if _meta.fields: _meta.fields.update(fields) else: _meta.fields = fields # Auxiliary alias. graphene_get_function = graphene.utils.get_unbound_function.get_unbound_function # pylint: disable=attribute-defined-outside-init _meta.arguments = arguments _meta.output = output _meta.publish = graphene_get_function(publish) _meta.subscribe = graphene_get_function(subscribe) _meta.unsubscribed = graphene_get_function(unsubscribed) super().__init_subclass_with_meta__(_meta=_meta, **options) @classmethod def _group_name(cls, group=None): """Group name based on the name of the subscription class.""" suffix = f"{cls.__module__}.{cls.__qualname__}" if group is not None: suffix += "-" + group # Wrap the suffix into SHA256 to guarantee that the length of # the group name is limited. Otherwise Channels will complain # about that the group name is wrong (actually is too long). suffix_sha256 = hashlib.sha256() suffix_sha256.update(suffix.encode("utf-8"))
return f"{GraphqlWsConsumer.group_name_prefix}-{suffix_sha256.hexdigest()}"
0
2023-12-25 11:40:56+00:00
16k
facebookresearch/ca_body
ca_body/models/mesh_vae_drivable.py
[ { "identifier": "ConvBlock", "path": "ca_body/nn/blocks.py", "snippet": "class ConvBlock(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n size,\n lrelu_slope=0.2,\n kernel_size=3,\n padding=1,\n wnorm_dim=0,\n ):\n super().__init__()\n\n Conv2dWNUB = weight_norm_wrapper(la.Conv2dUB, \"Conv2dWNUB\", g_dim=wnorm_dim, v_dim=None)\n Conv2dWN = weight_norm_wrapper(th.nn.Conv2d, \"Conv2dWN\", g_dim=wnorm_dim, v_dim=None)\n\n # TODO: do we really need this?\n self.conv_resize = Conv2dWN(in_channels, out_channels, kernel_size=1)\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=kernel_size,\n padding=padding,\n height=size,\n width=size,\n )\n\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n padding=padding,\n height=size,\n width=size,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_skip = self.conv_resize(x)\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n return x + x_skip" }, { "identifier": "ConvDownBlock", "path": "ca_body/nn/blocks.py", "snippet": "class ConvDownBlock(nn.Module):\n def __init__(self, in_channels, out_channels, size, lrelu_slope=0.2, groups=1, wnorm_dim=0):\n \"\"\"Constructor.\n\n Args:\n in_channels: int, # of input channels\n out_channels: int, # of input channels\n size: the *input* size\n \"\"\"\n super().__init__()\n\n Conv2dWNUB = weight_norm_wrapper(la.Conv2dUB, \"Conv2dWNUB\", g_dim=wnorm_dim, v_dim=None)\n Conv2dWN = weight_norm_wrapper(th.nn.Conv2d, \"Conv2dWN\", g_dim=wnorm_dim, v_dim=None)\n\n self.conv_resize = Conv2dWN(\n in_channels, out_channels, kernel_size=1, stride=2, groups=groups\n )\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=3,\n height=size,\n width=size,\n groups=groups,\n padding=1,\n )\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=3,\n stride=2,\n height=size // 2,\n width=size // 2,\n groups=groups,\n padding=1,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_skip = self.conv_resize(x)\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n return x + x_skip" }, { "identifier": "UpConvBlockDeep", "path": "ca_body/nn/blocks.py", "snippet": "class UpConvBlockDeep(nn.Module):\n def __init__(self, in_channels, out_channels, size, lrelu_slope=0.2, wnorm_dim=0, groups=1):\n super().__init__()\n self.upsample = nn.UpsamplingBilinear2d(size)\n\n Conv2dWNUB = weight_norm_wrapper(la.Conv2dUB, \"Conv2dWNUB\", g_dim=wnorm_dim, v_dim=None)\n Conv2dWN = weight_norm_wrapper(th.nn.Conv2d, \"Conv2dWN\", g_dim=wnorm_dim, v_dim=None)\n # NOTE: the old one normalizes only across one dimension\n\n self.conv_resize = Conv2dWN(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n groups=groups,\n )\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=3,\n height=size,\n width=size,\n padding=1,\n groups=groups,\n )\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=3,\n height=size,\n width=size,\n padding=1,\n groups=groups,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_up = self.upsample(x)\n x_skip = self.conv_resize(x_up)\n\n x = x_up\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n\n return x + x_skip" }, { "identifier": "tile2d", "path": "ca_body/nn/blocks.py", "snippet": "def tile2d(x, size: int):\n \"\"\"Tile a given set of features into a convolutional map.\n\n Args:\n x: float tensor of shape [N, F]\n size: int or a tuple\n\n Returns:\n a feature map [N, F, size[0], size[1]]\n \"\"\"\n # size = size if isinstance(size, tuple) else (size, size)\n # NOTE: expecting only int here (!!!)\n return x[:, :, np.newaxis, np.newaxis].expand(-1, -1, size, size)" }, { "identifier": "weights_initializer", "path": "ca_body/nn/blocks.py", "snippet": "def weights_initializer(lrelu_slope=0.2):\n # pyre-ignore\n def init_fn(m):\n if isinstance(\n m,\n (\n nn.Conv2d,\n nn.Conv1d,\n nn.ConvTranspose2d,\n nn.Linear,\n ),\n ):\n gain = nn.init.calculate_gain(\"leaky_relu\", lrelu_slope)\n nn.init.kaiming_uniform_(m.weight.data, a=gain)\n if hasattr(m, \"bias\") and m.bias is not None:\n nn.init.zeros_(m.bias.data)\n else:\n logger.debug(f\"skipping initialization for {m}\")\n\n return init_fn" }, { "identifier": "LearnableBlur", "path": "ca_body/nn/dof_cal.py", "snippet": "class LearnableBlur(nn.Module):\n # TODO: should we make this conditional?\n def __init__(self, cameras: List[str]) -> None:\n super().__init__()\n self.cameras = cameras\n self.register_parameter(\n \"weights_raw\", nn.Parameter(th.ones(len(cameras), 3, dtype=th.float32))\n )\n\n def name_to_idx(self, cameras: List[str]) -> th.Tensor:\n return th.tensor(\n [self.cameras.index(c) for c in cameras],\n device=self.weights_raw.device,\n dtype=th.long,\n )\n\n # pyre-ignore\n def reg(self, cameras: List[str]):\n # pyre-ignore\n idxs = self.name_to_idx(cameras)\n # pyre-ignore\n return self.weights_raw[idxs]\n\n # pyre-ignore\n def forward(self, img: th.Tensor, cameras: List[str]):\n B = img.shape[0]\n # B, C, H, W\n idxs = self.name_to_idx(cameras)\n # TODO: mask?\n # pyre-ignore\n weights = th.softmax(self.weights_raw[idxs], dim=-1)\n weights = weights.reshape(B, 3, 1, 1, 1)\n return (\n weights[:, 0] * img\n + weights[:, 1] * gaussian_blur(img, [3, 3])\n + weights[:, 2] * gaussian_blur(img, [7, 7])\n )" }, { "identifier": "GeometryModule", "path": "ca_body/utils/geom.py", "snippet": "class GeometryModule(nn.Module):\n def __init__(\n self,\n vi,\n vt,\n vti,\n v2uv,\n uv_size,\n flip_uv=False,\n impaint=False,\n impaint_threshold=100.0,\n ):\n super().__init__()\n\n self.register_buffer(\"vi\", th.as_tensor(vi))\n self.register_buffer(\"vt\", th.as_tensor(vt))\n self.register_buffer(\"vti\", th.as_tensor(vti))\n self.register_buffer(\"v2uv\", th.as_tensor(v2uv, dtype=th.int64))\n\n # TODO: should we just pass topology here?\n self.n_verts = v2uv.shape[0]\n\n self.uv_size = uv_size\n\n # TODO: can't we just index face_index?\n index_image = make_uv_vert_index(\n self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n ).cpu()\n face_index, bary_image = make_uv_barys(\n self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n if impaint:\n if uv_size >= 1024:\n logger.info(\n \"impainting index image might take a while for sizes >= 1024\"\n )\n\n index_image, bary_image = index_image_impaint(\n index_image, bary_image, impaint_threshold\n )\n # TODO: we can avoid doing this 2x\n face_index = index_image_impaint(\n face_index, distance_threshold=impaint_threshold\n )\n\n self.register_buffer(\"index_image\", index_image.cpu())\n self.register_buffer(\"bary_image\", bary_image.cpu())\n self.register_buffer(\"face_index_image\", face_index.cpu())\n\n def render_index_images(self, uv_size, flip_uv=False, impaint=False):\n index_image = make_uv_vert_index(\n self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n face_image, bary_image = make_uv_barys(\n self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n\n if impaint:\n index_image, bary_image = index_image_impaint(\n index_image,\n bary_image,\n )\n\n return index_image, face_image, bary_image\n\n def vn(self, verts):\n return vert_normals(verts, self.vi[np.newaxis].to(th.long))\n\n def to_uv(self, values):\n return values_to_uv(values, self.index_image, self.bary_image)\n\n def from_uv(self, values_uv):\n # TODO: we need to sample this\n return sample_uv(values_uv, self.vt, self.v2uv.to(th.long))" }, { "identifier": "compute_view_cos", "path": "ca_body/utils/geom.py", "snippet": "def compute_view_cos(verts, faces, camera_pos):\n vn = F.normalize(vert_normals(verts, faces), dim=-1)\n v2c = F.normalize(verts - camera_pos[:, np.newaxis], dim=-1)\n return th.einsum(\"bnd,bnd->bn\", vn, v2c)" }, { "identifier": "depth_discontuity_mask", "path": "ca_body/utils/geom.py", "snippet": "def depth_discontuity_mask(\n depth: th.Tensor, threshold: float = 40.0, kscale: float = 4.0, pool_ksize: int = 3\n) -> th.Tensor:\n device = depth.device\n\n with th.no_grad():\n # TODO: pass the kernel?\n kernel = th.as_tensor(\n [\n [[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]],\n [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]],\n ],\n dtype=th.float32,\n device=device,\n )\n\n disc_mask = (th.norm(F.conv2d(depth, kernel, bias=None, padding=1), dim=1) > threshold)[\n :, np.newaxis\n ]\n disc_mask = (\n F.avg_pool2d(disc_mask.float(), pool_ksize, stride=1, padding=pool_ksize // 2) > 0.0\n )\n\n return disc_mask" }, { "identifier": "depth2normals", "path": "ca_body/utils/geom.py", "snippet": "def depth2normals(depth, focal, princpt) -> th.Tensor:\n \"\"\"Convert depth image to normal image using camera intrinsics\n\n Args:\n depth: th.Tensor\n [B, 1, H, W] depth image\n\n focal: th.Tensor\n [B, 2, 2] camera focal lengths\n\n princpt: th.Tensor\n [B, 2] camera principal points\n\n Returns:\n th.Tensor: [B, 3, H, W] normal image\n \"\"\"\n\n return xyz2normals(depth2xyz(depth, focal, princpt))" }, { "identifier": "ShadowUNet", "path": "ca_body/nn/shadow.py", "snippet": "class ShadowUNet(nn.Module):\n def __init__(\n self,\n uv_size,\n ao_mean,\n shadow_size,\n lrelu_slope=0.2,\n beta=1.0,\n n_dims=64,\n interp_mode=\"bilinear\",\n biases=True,\n trainable_mean=False,\n ):\n super().__init__()\n\n # this is the size of the output\n self.uv_size = uv_size\n self.shadow_size = shadow_size\n\n ao_mean = F.interpolate(\n th.as_tensor(ao_mean)[np.newaxis],\n size=(self.shadow_size, self.shadow_size),\n )[0]\n if not trainable_mean:\n # TODO:\n self.register_buffer(\"ao_mean\", ao_mean)\n else:\n self.register_parameter(\"ao_mean\", th.nn.Parameter(ao_mean))\n\n self.depth = 3\n self.lrelu_slope = lrelu_slope\n self.interp_mode = interp_mode\n self.align_corners = None\n if interp_mode == \"bilinear\":\n self.align_corners = False\n\n # the base number of dimensions for the shadow maps\n n_dims = n_dims\n\n # TODO: generate this?\n self.n_enc_dims = [\n (1, n_dims),\n (n_dims, n_dims),\n (n_dims, n_dims),\n (n_dims, n_dims),\n ]\n\n self.sizes = [shadow_size // (2**i) for i in range(len(self.n_enc_dims))]\n\n logger.debug(f\"sizes: {self.sizes}\")\n\n self.enc_layers = nn.ModuleList()\n for i, size in enumerate(self.sizes):\n n_in, n_out = self.n_enc_dims[i]\n logger.debug(f\"EncoderLayers({i}): {n_in}, {n_out}, {size}\")\n self.enc_layers.append(\n nn.Sequential(\n la.Conv2dWNUB(\n n_in,\n n_out,\n kernel_size=3,\n height=size,\n width=size,\n stride=1,\n padding=1,\n ),\n nn.LeakyReLU(self.lrelu_slope, inplace=True),\n )\n )\n\n self.n_dec_dims = [\n (n_dims, n_dims),\n (n_dims * 2, n_dims),\n (n_dims * 2, n_dims),\n (n_dims * 2, n_dims),\n ]\n self.dec_layers = nn.ModuleList()\n for i in range(len(self.sizes)):\n size = self.sizes[-i - 1]\n n_in, n_out = self.n_dec_dims[i]\n logger.debug(f\"DecoderLayer({i}): {n_in}, {n_out}, {size}\")\n\n self.dec_layers.append(\n nn.Sequential(\n la.Conv2dWNUB(\n n_in,\n n_out,\n kernel_size=3,\n height=size,\n width=size,\n stride=1,\n padding=1,\n ),\n nn.LeakyReLU(self.lrelu_slope, inplace=True),\n )\n )\n\n self.apply(weights_initializer(self.lrelu_slope))\n\n if biases:\n self.shadow_pred = la.Conv2dWNUB(\n self.n_dec_dims[-1][-1],\n 1,\n kernel_size=3,\n height=self.sizes[0],\n width=self.sizes[0],\n stride=1,\n padding=1,\n )\n else:\n self.shadow_pred = la.Conv2dWN(\n self.n_dec_dims[-1][-1],\n 1,\n kernel_size=3,\n stride=1,\n padding=1,\n )\n\n self.shadow_pred.apply(weights_initializer(1.0))\n self.beta = beta\n\n def forward(self, ao_map):\n # resizing the inputs if necessary\n if ao_map.shape[-2:] != (self.shadow_size, self.shadow_size):\n ao_map = F.interpolate(ao_map, size=(self.shadow_size, self.shadow_size))\n\n x = ao_map - self.ao_mean\n\n enc_acts = []\n # unet enc\n for i, layer in enumerate(self.enc_layers):\n # TODO: try applying a 1D sparse op?\n x = layer(x)\n enc_acts.append(x)\n # TODO: add this layer elsewhere?\n if i < len(self.sizes) - 1:\n x = F.interpolate(\n x,\n scale_factor=0.5,\n mode=\"bilinear\",\n recompute_scale_factor=True,\n align_corners=True,\n )\n\n # we do not need the last one?\n for i, layer in enumerate(self.dec_layers):\n if i > 0:\n x_prev = enc_acts[-i - 1]\n x = F.interpolate(x, size=x_prev.shape[2:4], mode=\"bilinear\", align_corners=True)\n x = th.cat([x, x_prev], dim=1)\n x = layer(x)\n\n shadow_map_lowres = th.sigmoid(self.shadow_pred(x) + self.beta)\n shadow_map = F.interpolate(\n shadow_map_lowres,\n (self.uv_size, self.uv_size),\n mode=self.interp_mode,\n align_corners=self.align_corners,\n )\n\n return {\n \"shadow_map\": shadow_map,\n \"ao_map\": ao_map,\n \"shadow_map_lowres\": shadow_map_lowres,\n }" }, { "identifier": "PoseToShadow", "path": "ca_body/nn/shadow.py", "snippet": "class PoseToShadow(nn.Module):\n def __init__(\n self,\n n_pose_dims,\n uv_size,\n beta=1.0,\n ) -> None:\n super().__init__()\n self.n_pose_dims = n_pose_dims\n self.uv_size = uv_size\n\n self.fc_block = nn.Sequential(\n la.LinearWN(self.n_pose_dims, 256 * 4 * 4),\n nn.LeakyReLU(0.2),\n )\n self.conv_block = nn.Sequential(\n la.ConvTranspose2dWNUB(256, 256, 8, 8, 4, 2, 1),\n nn.LeakyReLU(0.2),\n la.ConvTranspose2dWNUB(256, 128, 16, 16, 4, 2, 1),\n nn.LeakyReLU(0.2),\n la.ConvTranspose2dWNUB(128, 128, 32, 32, 4, 2, 1),\n nn.LeakyReLU(0.2),\n la.ConvTranspose2dWNUB(128, 64, 64, 64, 4, 2, 1),\n nn.LeakyReLU(0.2),\n # la.ConvTranspose2dWNUB(64, 64, 128, 128, 4, 2, 1),\n # nn.LeakyReLU(0.2),\n # la.ConvTranspose2dWNUB(64, 1, 256, 256, 4, 2, 1),\n la.ConvTranspose2dWNUB(64, 1, 128, 128, 4, 2, 1),\n )\n self.beta = beta\n self.apply(lambda x: la.glorot(x, 0.2))\n la.glorot(self.conv_block[-1], 1.0)\n\n def forward(self, pose: th.Tensor):\n assert pose.shape\n x = self.fc_block(pose)\n x = self.conv_block(x.reshape(-1, 256, 4, 4))\n shadow_map_lowres = th.sigmoid(x + self.beta)\n\n shadow_map = F.interpolate(\n shadow_map_lowres, size=(self.uv_size, self.uv_size), mode=\"bilinear\"\n )\n return {\"shadow_map\": shadow_map}" }, { "identifier": "UNetWB", "path": "ca_body/nn/unet.py", "snippet": "class UNetWB(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n size: int,\n n_init_ftrs: int=8,\n out_scale: float =0.1,\n ):\n # super().__init__(*args, **kwargs)\n super().__init__()\n\n self.out_scale = out_scale\n\n F = n_init_ftrs\n\n self.size = size\n\n self.down1 = nn.Sequential(\n Conv2dWNUB(in_channels, F, self.size // 2, self.size // 2, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down2 = nn.Sequential(\n Conv2dWNUB(F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down3 = nn.Sequential(\n Conv2dWNUB(2 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down4 = nn.Sequential(\n Conv2dWNUB(4 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down5 = nn.Sequential(\n Conv2dWNUB(8 * F, 16 * F, self.size // 32, self.size // 32, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up1 = nn.Sequential(\n ConvTranspose2dWNUB(16 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up2 = nn.Sequential(\n ConvTranspose2dWNUB(8 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up3 = nn.Sequential(\n ConvTranspose2dWNUB(4 * F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up4 = nn.Sequential(\n ConvTranspose2dWNUB(2 * F, F, self.size // 2, self.size // 2, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up5 = nn.Sequential(\n ConvTranspose2dWNUB(F, F, self.size, self.size, 4, 2, 1), nn.LeakyReLU(0.2)\n )\n self.out = Conv2dWNUB(F + in_channels, out_channels, self.size, self.size, kernel_size=1)\n self.apply(lambda x: glorot(x, 0.2))\n glorot(self.out, 1.0)\n\n def forward(self, x):\n x1 = x\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x6 = self.down5(x5)\n # TODO: switch to concat?\n x = self.up1(x6) + x5\n x = self.up2(x) + x4\n x = self.up3(x) + x3\n x = self.up4(x) + x2\n x = self.up5(x)\n x = th.cat([x, x1], dim=1)\n return self.out(x) * self.out_scale" }, { "identifier": "CalV5", "path": "ca_body/nn/color_cal.py", "snippet": "class CalV5(CalBase):\n def __init__(\n self,\n # pyre-fixme[2]: Parameter must be annotated.\n cameras,\n # pyre-fixme[2]: Parameter must be annotated.\n identity_camera,\n gs_lrscale: float = 1e0,\n col_lrscale: float = 1e-1,\n ) -> None:\n super(CalBase, self).__init__()\n\n if identity_camera not in cameras:\n identity_camera = cameras[0]\n logger.warning(\n f\"Requested color-calibration identity camera not present, defaulting to {identity_camera}.\"\n )\n\n # pyre-fixme[4]: Attribute must be annotated.\n self.identity_camera = identity_camera\n # pyre-fixme[4]: Attribute must be annotated.\n self.cameras = cameras\n self.gs_lrscale = gs_lrscale\n self.col_lrscale = col_lrscale\n self.holder: ParamHolder = ParamHolder(\n # pyre-fixme[6]: For 1st param expected `Tuple[int]` but got `int`.\n 3 + 3,\n cameras,\n init_value=th.FloatTensor([1, 1, 1, 0, 0, 0]),\n )\n\n # pyre-fixme[4]: Attribute must be annotated.\n self.identity_idx = self.holder.to_idx([identity_camera]).item()\n # pyre-fixme[4]: Attribute must be annotated.\n self.grey_idxs = [self.holder.to_idx([c]).item() for c in cameras if c.startswith(\"41\")]\n\n s = th.FloatTensor([0.37, 0.52, 0.52])\n self.holder.params.data[th.LongTensor(self.grey_idxs), :3] = s\n\n def name_to_idx(self, cam_names: Sequence[str]) -> th.Tensor:\n return self.holder.to_idx(cam_names)\n\n # pyre-fixme[2]: Parameter must be annotated.\n def initialize_from_texs(self, ds) -> float:\n tex_mean = ds.tex_mean.permute(1, 2, 0)\n texs = {}\n idx = 0\n while ds[idx] is None:\n idx += 1\n\n for cam in self.cameras:\n samp = ds[idx, cam]\n if samp is None:\n continue\n\n tex = samp[\"tex\"]\n texs[cam] = tex.permute(1, 2, 0)\n\n stats = {}\n for cam in texs.keys():\n t = texs[cam]\n mask = (t > 0).all(dim=2)\n t = t * ds.tex_std + tex_mean\n stats[cam] = (t[mask].mean(dim=0), t[mask].std(dim=0))\n\n normstats = {}\n for cam in texs.keys():\n mean, std = stats[cam]\n imean, istd = stats[self.identity_camera]\n scale = istd / std\n bias = imean - scale * mean\n normstats[cam] = (scale.clamp(max=2), bias)\n\n for cam, nstats in normstats.items():\n cidx = self.name_to_idx([cam])[0]\n if cidx in self.grey_idxs:\n nstats = (nstats[0] / 3, nstats[1] / 3)\n self.holder.params.data[cidx, 0:3] = nstats[0]\n self.holder.params.data[cidx, 3:6] = nstats[1]\n return len(stats.keys()) / len(ds.cameras)\n\n # pyre-fixme[3]: Return type must be annotated.\n # pyre-fixme[2]: Parameter must be annotated.\n # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module`\n # inconsistently.\n def load_state_dict(self, state_dict, strict: bool = True):\n state_dict = {k[7:]: v for k, v in state_dict.items() if k.startswith(\"holder.\")}\n return self.holder.load_state_dict(state_dict, strict=strict)\n\n # pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.\n # pyre-fixme[3]: Return type must be annotated.\n def state_dict(\n self,\n # pyre-fixme[2]: Parameter must be annotated.\n destination=None,\n prefix: str = \"\",\n keep_vars: bool = False,\n saving: bool = False,\n ):\n sd = super(CalBase, self).state_dict(\n destination=destination, prefix=prefix, keep_vars=keep_vars\n )\n if saving:\n sd[prefix + \"holder.key_list\"] = self.holder.key_list\n return sd\n\n def forward(self, image: th.Tensor, cam_idxs: th.Tensor) -> th.Tensor:\n params = self.holder(cam_idxs)\n outs = []\n hook_scales = []\n for i in range(cam_idxs.shape[0]):\n idx = cam_idxs[i]\n img = image[i : i + 1]\n if idx == self.identity_idx:\n outs.append(img)\n hook_scales.append(1)\n continue\n\n w, b = params[i, :3], params[i, 3:]\n if idx in self.grey_idxs:\n b = b.sum()\n out = (img * w[None, :, None, None]).sum(dim=1, keepdim=True).expand(\n -1, 3, -1, -1\n ) + b\n else:\n out = img * w[None, :, None, None] + b[None, :, None, None]\n outs.append(out)\n hook_scales.append(self.gs_lrscale if idx in self.grey_idxs else self.col_lrscale)\n\n hook_scales = th.tensor(hook_scales, device=image.device, dtype=th.float32)\n cal_out = th.cat(outs)\n\n if self.training and params.requires_grad:\n params.register_hook(lambda g, hs=hook_scales: scale_hook(g, hs[:, None]))\n return cal_out" }, { "identifier": "linear2displayBatch", "path": "ca_body/utils/image.py", "snippet": "def linear2displayBatch(\n val: th.Tensor,\n gamma: float = 1.5,\n wbscale: np.ndarray = __DEFAULT_WB_SCALE,\n black: float = 5.0 / 255.0,\n mode: str = \"srgb\",\n) -> th.Tensor:\n scaling: th.Tensor = th.from_numpy(wbscale).to(val.device)\n val = val.float() / 255.0 * scaling[None, :, None, None] - black\n if mode == \"srgb\":\n val = linear2srgb(val, gamma=gamma)\n else:\n val = val ** th.tensor(1.0 / gamma)\n return th.clamp(val, 0, 1) * 255.0" }, { "identifier": "LBSModule", "path": "ca_body/utils/lbs.py", "snippet": "class LBSModule(nn.Module):\n def __init__(\n self, lbs_model_json, lbs_config_dict, lbs_template_verts, lbs_scale, global_scaling\n ):\n super().__init__()\n self.lbs_fn = LinearBlendSkinning(lbs_model_json, lbs_config_dict)\n\n self.register_buffer(\"lbs_scale\", th.as_tensor(lbs_scale, dtype=th.float32))\n self.register_buffer(\n \"lbs_template_verts\", th.as_tensor(lbs_template_verts, dtype=th.float32)\n )\n self.register_buffer(\"global_scaling\", th.as_tensor(global_scaling))\n\n def pose(self, verts_unposed, motion, template: Optional[th.Tensor] = None):\n scale = self.lbs_scale.expand(motion.shape[0], -1)\n if template is None:\n template = self.lbs_template_verts\n return self.lbs_fn(motion, scale, verts_unposed + template) * self.global_scaling\n\n def unpose(self, verts, motion):\n B = motion.shape[0]\n scale = self.lbs_scale.expand(B, -1)\n return (\n self.lbs_fn.unpose(motion, scale, verts / self.global_scaling) - self.lbs_template_verts\n )\n\n def template_pose(self, motion):\n B = motion.shape[0]\n scale = self.lbs_scale.expand(B, -1)\n verts = self.lbs_template_verts[np.newaxis].expand(B, -1, -1)\n return self.lbs_fn(motion, scale, verts) * self.global_scaling[np.newaxis]" }, { "identifier": "RenderLayer", "path": "ca_body/utils/render.py", "snippet": "class RenderLayer(nn.Module):\n \n def __init__(self, h, w, vi, vt, vti, flip_uvs=False):\n super().__init__()\n self.register_buffer(\"vi\", vi, persistent=False)\n self.register_buffer(\"vt\", vt, persistent=False)\n self.register_buffer(\"vti\", vti, persistent=False)\n raster_settings = RasterizationSettings(image_size=(h, w))\n self.rasterizer = MeshRasterizer(raster_settings=raster_settings)\n self.flip_uvs = flip_uvs \n image_size = th.as_tensor([h, w], dtype=th.int32)\n self.register_buffer(\"image_size\", image_size)\n \n def forward(self, verts: th.Tensor, tex: th.Tensor, K: th.Tensor, Rt: th.Tensor, background: th.Tensor = None, output_filters: List[str] = None):\n\n assert output_filters is None\n assert background is None\n\n B = verts.shape[0]\n\n image_size = self.image_size[None].repeat(B, 1)\n \n cameras = cameras_from_opencv_projection(Rt[:,:,:3], Rt[:,:3,3], K, image_size)\n\n faces = self.vi[None].repeat(B, 1, 1)\n faces_uvs = self.vti[None].repeat(B, 1, 1)\n verts_uvs = self.vt[None].repeat(B, 1, 1) \n \n # NOTE: this is confusing but correct\n if not self.flip_uvs:\n tex = th.flip(tex.permute(0, 2, 3, 1), (1,))\n\n textures = TexturesUV(\n maps=tex,\n faces_uvs=faces_uvs,\n verts_uvs=verts_uvs,\n ) \n meshes = Meshes(verts, faces, textures=textures)\n \n fragments = self.rasterizer(meshes, cameras=cameras)\n rgb = meshes.sample_textures(fragments)[:,:,:,0] \n rgb[fragments.pix_to_face[...,0] == -1] = 0.0 \n\n return {'render': rgb.permute(0, 3, 1, 2)}" }, { "identifier": "SeamSampler", "path": "ca_body/utils/seams.py", "snippet": "class SeamSampler(nn.Module):\n def __init__(self, seamless_data: Dict[str, Any]) -> None:\n super().__init__()\n\n self.register_buffer(\"dst_ij\", seamless_data[\"dst_ij\"])\n self.register_buffer(\"src_ij\", seamless_data[\"src_ij\"])\n self.register_buffer(\"uvs\", seamless_data[\"uvs\"])\n self.register_buffer(\"weights\", seamless_data[\"weights\"])\n\n def impaint(self, value: th.Tensor) -> th.Tensor:\n return impaint_batch(value, self.dst_ij, self.src_ij)\n\n def resample(self, tex: th.Tensor) -> th.Tensor:\n return resample_tex(tex, self.uvs, self.weights)\n\n def resample_border_only(self, tex: th.Tensor) -> th.Tensor:\n tex = resample_tex(tex, self.uvs, self.weights)\n return tex\n\n def forward(self, tex: th.Tensor) -> th.Tensor:\n x = self.impaint(tex)\n x = self.resample(x)\n return x" }, { "identifier": "RenderLayer", "path": "ca_body/utils/render.py", "snippet": "class RenderLayer(nn.Module):\n \n def __init__(self, h, w, vi, vt, vti, flip_uvs=False):\n super().__init__()\n self.register_buffer(\"vi\", vi, persistent=False)\n self.register_buffer(\"vt\", vt, persistent=False)\n self.register_buffer(\"vti\", vti, persistent=False)\n raster_settings = RasterizationSettings(image_size=(h, w))\n self.rasterizer = MeshRasterizer(raster_settings=raster_settings)\n self.flip_uvs = flip_uvs \n image_size = th.as_tensor([h, w], dtype=th.int32)\n self.register_buffer(\"image_size\", image_size)\n \n def forward(self, verts: th.Tensor, tex: th.Tensor, K: th.Tensor, Rt: th.Tensor, background: th.Tensor = None, output_filters: List[str] = None):\n\n assert output_filters is None\n assert background is None\n\n B = verts.shape[0]\n\n image_size = self.image_size[None].repeat(B, 1)\n \n cameras = cameras_from_opencv_projection(Rt[:,:,:3], Rt[:,:3,3], K, image_size)\n\n faces = self.vi[None].repeat(B, 1, 1)\n faces_uvs = self.vti[None].repeat(B, 1, 1)\n verts_uvs = self.vt[None].repeat(B, 1, 1) \n \n # NOTE: this is confusing but correct\n if not self.flip_uvs:\n tex = th.flip(tex.permute(0, 2, 3, 1), (1,))\n\n textures = TexturesUV(\n maps=tex,\n faces_uvs=faces_uvs,\n verts_uvs=verts_uvs,\n ) \n meshes = Meshes(verts, faces, textures=textures)\n \n fragments = self.rasterizer(meshes, cameras=cameras)\n rgb = meshes.sample_textures(fragments)[:,:,:,0] \n rgb[fragments.pix_to_face[...,0] == -1] = 0.0 \n\n return {'render': rgb.permute(0, 3, 1, 2)}" }, { "identifier": "FaceDecoderFrontal", "path": "ca_body/nn/face.py", "snippet": "class FaceDecoderFrontal(nn.Module):\n def __init__(\n self,\n assets: AttrDict,\n n_latent: int = 256,\n n_vert_out: int = 3 * 7306,\n tex_out_shp: Tuple[int, int] = (1024, 1024),\n tex_roi: Tuple[Tuple[int, int], Tuple[int, int]] = ((0, 0), (1024, 1024)),\n ) -> None:\n super().__init__()\n self.n_latent = n_latent\n self.n_vert_out = n_vert_out\n self.tex_roi = tex_roi\n self.tex_roi_shp: Tuple[int, int] = tuple(\n [int(i) for i in np.diff(np.array(tex_roi), axis=0).squeeze()]\n )\n self.tex_out_shp = tex_out_shp\n\n self.encmod = nn.Sequential(la.LinearWN(n_latent, 256), nn.LeakyReLU(0.2, inplace=True))\n self.geommod = nn.Sequential(la.LinearWN(256, n_vert_out))\n\n self.viewmod = nn.Sequential(la.LinearWN(3, 8), nn.LeakyReLU(0.2, inplace=True))\n self.texmod2 = nn.Sequential(\n la.LinearWN(256 + 8, 256 * 4 * 4), nn.LeakyReLU(0.2, inplace=True)\n )\n self.texmod = nn.Sequential(\n la.ConvTranspose2dWNUB(256, 256, 8, 8, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(256, 128, 16, 16, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(128, 128, 32, 32, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(128, 64, 64, 64, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(64, 64, 128, 128, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(64, 32, 256, 256, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(32, 8, 512, 512, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(8, 3, 1024, 1024, 4, 2, 1),\n )\n\n self.bias = nn.Parameter(th.zeros(3, self.tex_roi_shp[0], self.tex_roi_shp[1]))\n self.bias.data.zero_()\n\n self.register_buffer(\n \"frontal_view\", th.as_tensor(assets.face_frontal_view, dtype=th.float32)\n )\n\n self.apply(lambda x: la.glorot(x, 0.2))\n la.glorot(self.texmod[-1], 1.0)\n\n def forward(self, face_embs: th.Tensor) -> Dict[str, th.Tensor]:\n B = face_embs.shape[0]\n view = self.frontal_view[np.newaxis].expand(B, -1)\n encout = self.encmod(face_embs)\n geomout = self.geommod(encout)\n viewout = self.viewmod(view)\n encview = th.cat([encout, viewout], dim=1)\n texout = self.texmod(self.texmod2(encview).view(-1, 256, 4, 4))\n out = {\"face_geom\": geomout.view(geomout.shape[0], -1, 3)}\n out[\"face_tex_raw\"] = texout\n texout = texout + self.bias[None]\n out[\"face_tex\"] = 255 * (texout + 0.5)\n return out" } ]
import logging import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F import ca_body.nn.layers as la from typing import Dict, Optional, Tuple from torchvision.utils import make_grid from torchvision.transforms.functional import gaussian_blur from ca_body.nn.blocks import ( ConvBlock, ConvDownBlock, UpConvBlockDeep, tile2d, weights_initializer, ) from ca_body.nn.dof_cal import LearnableBlur from ca_body.utils.geom import ( GeometryModule, compute_view_cos, depth_discontuity_mask, depth2normals, ) from ca_body.nn.shadow import ShadowUNet, PoseToShadow from ca_body.nn.unet import UNetWB from ca_body.nn.color_cal import CalV5 from ca_body.utils.image import linear2displayBatch from ca_body.utils.lbs import LBSModule from ca_body.utils.render import RenderLayer from ca_body.utils.seams import SeamSampler from ca_body.utils.render import RenderLayer from ca_body.nn.face import FaceDecoderFrontal
11,646
class AutoEncoder(nn.Module): def __init__( self, encoder, decoder, decoder_view, encoder_face, # hqlp decoder to get the codes decoder_face, shadow_net, upscale_net, assets, pose_to_shadow=None, renderer=None, cal=None, pixel_cal=None, learn_blur: bool = True, ): super().__init__() # TODO: should we have a shared LBS here? self.geo_fn = GeometryModule( assets.topology.vi, assets.topology.vt, assets.topology.vti, assets.topology.v2uv, uv_size=1024, impaint=True, ) self.lbs_fn = LBSModule( assets.lbs_model_json, assets.lbs_config_dict, assets.lbs_template_verts, assets.lbs_scale, assets.global_scaling, ) self.seam_sampler = SeamSampler(assets.seam_data_1024) self.seam_sampler_2k = SeamSampler(assets.seam_data_2048) # joint tex -> body and clothes # TODO: why do we have a joint one in the first place? tex_mean = gaussian_blur(th.as_tensor(assets.tex_mean)[np.newaxis], kernel_size=11) self.register_buffer("tex_mean", F.interpolate(tex_mean, (2048, 2048), mode='bilinear')) # this is shared self.tex_std = assets.tex_var if 'tex_var' in assets else 64.0 face_cond_mask = th.as_tensor(assets.face_cond_mask, dtype=th.float32)[ np.newaxis, np.newaxis ] self.register_buffer("face_cond_mask", face_cond_mask) meye_mask = self.geo_fn.to_uv( th.as_tensor(assets.mouth_eyes_mask_geom[np.newaxis, :, np.newaxis]) ) meye_mask = F.interpolate(meye_mask, (2048, 2048), mode='bilinear') self.register_buffer("meye_mask", meye_mask) self.decoder = ConvDecoder( geo_fn=self.geo_fn, seam_sampler=self.seam_sampler, **decoder, assets=assets, ) # embs for everything but face non_head_mask = 1.0 - assets.face_mask self.encoder = Encoder( geo_fn=self.geo_fn, mask=non_head_mask, **encoder, ) self.encoder_face = FaceEncoder( assets=assets, **encoder_face, ) # using face decoder to generate better conditioning decoder_face_ckpt_path = None if 'ckpt' in decoder_face: decoder_face_ckpt_path = decoder_face.pop('ckpt') self.decoder_face = FaceDecoderFrontal(assets=assets, **decoder_face) if decoder_face_ckpt_path is not None: self.decoder_face.load_state_dict(th.load(decoder_face_ckpt_path), strict=False) self.decoder_view = UNetViewDecoder( self.geo_fn, seam_sampler=self.seam_sampler, **decoder_view, ) self.shadow_net = ShadowUNet( ao_mean=assets.ao_mean, interp_mode="bilinear", biases=False, **shadow_net, ) self.pose_to_shadow_enabled = False if pose_to_shadow is not None: self.pose_to_shadow_enabled = True self.pose_to_shadow = PoseToShadow(**pose_to_shadow) self.upscale_net = UpscaleNet( in_channels=6, size=1024, upscale_factor=2, out_channels=3, **upscale_net ) self.pixel_cal_enabled = False if pixel_cal is not None: self.pixel_cal_enabled = True self.pixel_cal = CameraPixelBias(**pixel_cal, cameras=assets.camera_ids) self.learn_blur_enabled = False if learn_blur: self.learn_blur_enabled = True
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) class CameraPixelBias(nn.Module): def __init__(self, image_height, image_width, cameras, ds_rate) -> None: super().__init__() self.image_height = image_height self.image_width = image_width self.cameras = cameras self.n_cameras = len(cameras) bias = th.zeros( (self.n_cameras, 1, image_width // ds_rate, image_height // ds_rate), dtype=th.float32 ) self.register_parameter("bias", nn.Parameter(bias)) def forward(self, idxs: th.Tensor): bias_up = F.interpolate( self.bias[idxs], size=(self.image_height, self.image_width), mode='bilinear' ) return bias_up class AutoEncoder(nn.Module): def __init__( self, encoder, decoder, decoder_view, encoder_face, # hqlp decoder to get the codes decoder_face, shadow_net, upscale_net, assets, pose_to_shadow=None, renderer=None, cal=None, pixel_cal=None, learn_blur: bool = True, ): super().__init__() # TODO: should we have a shared LBS here? self.geo_fn = GeometryModule( assets.topology.vi, assets.topology.vt, assets.topology.vti, assets.topology.v2uv, uv_size=1024, impaint=True, ) self.lbs_fn = LBSModule( assets.lbs_model_json, assets.lbs_config_dict, assets.lbs_template_verts, assets.lbs_scale, assets.global_scaling, ) self.seam_sampler = SeamSampler(assets.seam_data_1024) self.seam_sampler_2k = SeamSampler(assets.seam_data_2048) # joint tex -> body and clothes # TODO: why do we have a joint one in the first place? tex_mean = gaussian_blur(th.as_tensor(assets.tex_mean)[np.newaxis], kernel_size=11) self.register_buffer("tex_mean", F.interpolate(tex_mean, (2048, 2048), mode='bilinear')) # this is shared self.tex_std = assets.tex_var if 'tex_var' in assets else 64.0 face_cond_mask = th.as_tensor(assets.face_cond_mask, dtype=th.float32)[ np.newaxis, np.newaxis ] self.register_buffer("face_cond_mask", face_cond_mask) meye_mask = self.geo_fn.to_uv( th.as_tensor(assets.mouth_eyes_mask_geom[np.newaxis, :, np.newaxis]) ) meye_mask = F.interpolate(meye_mask, (2048, 2048), mode='bilinear') self.register_buffer("meye_mask", meye_mask) self.decoder = ConvDecoder( geo_fn=self.geo_fn, seam_sampler=self.seam_sampler, **decoder, assets=assets, ) # embs for everything but face non_head_mask = 1.0 - assets.face_mask self.encoder = Encoder( geo_fn=self.geo_fn, mask=non_head_mask, **encoder, ) self.encoder_face = FaceEncoder( assets=assets, **encoder_face, ) # using face decoder to generate better conditioning decoder_face_ckpt_path = None if 'ckpt' in decoder_face: decoder_face_ckpt_path = decoder_face.pop('ckpt') self.decoder_face = FaceDecoderFrontal(assets=assets, **decoder_face) if decoder_face_ckpt_path is not None: self.decoder_face.load_state_dict(th.load(decoder_face_ckpt_path), strict=False) self.decoder_view = UNetViewDecoder( self.geo_fn, seam_sampler=self.seam_sampler, **decoder_view, ) self.shadow_net = ShadowUNet( ao_mean=assets.ao_mean, interp_mode="bilinear", biases=False, **shadow_net, ) self.pose_to_shadow_enabled = False if pose_to_shadow is not None: self.pose_to_shadow_enabled = True self.pose_to_shadow = PoseToShadow(**pose_to_shadow) self.upscale_net = UpscaleNet( in_channels=6, size=1024, upscale_factor=2, out_channels=3, **upscale_net ) self.pixel_cal_enabled = False if pixel_cal is not None: self.pixel_cal_enabled = True self.pixel_cal = CameraPixelBias(**pixel_cal, cameras=assets.camera_ids) self.learn_blur_enabled = False if learn_blur: self.learn_blur_enabled = True
self.learn_blur = LearnableBlur(assets.camera_ids)
5
2023-12-27 15:31:35+00:00
16k
daswer123/rvc-python
rvc_python/modules/vc/modules.py
[ { "identifier": "load_audio", "path": "rvc_python/lib/audio.py", "snippet": "def load_audio(file, sr):\n file = (\n file.strip(\" \").strip('\"').strip(\"\\n\").strip('\"').strip(\" \")\n ) # 防止小白拷路径头尾带了空格和\"和回车\n if os.path.exists(file) == False:\n raise RuntimeError(\n \"You input a wrong audio path that does not exists, please fix it!\"\n )\n try:\n with open(file, \"rb\") as f:\n with BytesIO() as out:\n audio2(f, out, \"f32le\", sr)\n return np.frombuffer(out.getvalue(), np.float32).flatten()\n\n except AttributeError:\n audio = file[1] / 32768.0\n if len(audio.shape) == 2:\n audio = np.mean(audio, -1)\n return librosa.resample(audio, orig_sr=file[0], target_sr=16000)\n\n except:\n raise RuntimeError(traceback.format_exc())" }, { "identifier": "wav2", "path": "rvc_python/lib/audio.py", "snippet": "def wav2(i, o, format):\n inp = av.open(i, \"rb\")\n if format == \"m4a\":\n format = \"mp4\"\n out = av.open(o, \"wb\", format=format)\n if format == \"ogg\":\n format = \"libvorbis\"\n if format == \"mp4\":\n format = \"aac\"\n\n ostream = out.add_stream(format)\n\n for frame in inp.decode(audio=0):\n for p in ostream.encode(frame):\n out.mux(p)\n\n for p in ostream.encode(None):\n out.mux(p)\n\n out.close()\n inp.close()" }, { "identifier": "SynthesizerTrnMs256NSFsid", "path": "rvc_python/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n nsff0 = nsff0[:, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs256NSFsid_nono", "path": "rvc_python/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs256NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder256(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid", "path": "rvc_python/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr,\n **kwargs\n ):\n super().__init__()\n if type(sr) == type(\"strr\"):\n sr = sr2sr[sr]\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n )\n self.dec = GeneratorNSF(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n sr=sr,\n is_half=kwargs[\"is_half\"],\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(\n self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds\n ): # 这里ds是id,[bs,1]\n # print(1,pitch.shape)#[bs,t]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length)\n pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size)\n # print(-2,pitchf.shape,z_slice.shape)\n o = self.dec(z_slice, pitchf, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n nsff0 = nsff0[:, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, nsff0, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "SynthesizerTrnMs768NSFsid_nono", "path": "rvc_python/lib/infer_pack/models.py", "snippet": "class SynthesizerTrnMs768NSFsid_nono(nn.Module):\n def __init__(\n self,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n spk_embed_dim,\n gin_channels,\n sr=None,\n **kwargs\n ):\n super().__init__()\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.gin_channels = gin_channels\n # self.hop_length = hop_length#\n self.spk_embed_dim = spk_embed_dim\n self.enc_p = TextEncoder768(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n f0=False,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n self.flow = ResidualCouplingBlock(\n inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels\n )\n self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)\n print(\"gin_channels:\", gin_channels, \"self.spk_embed_dim:\", self.spk_embed_dim)\n\n def remove_weight_norm(self):\n self.dec.remove_weight_norm()\n self.flow.remove_weight_norm()\n self.enc_q.remove_weight_norm()\n\n def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1]\n g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q)\n\n def infer(self, phone, phone_lengths, sid, rate=None):\n g = self.emb_g(sid).unsqueeze(-1)\n m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths)\n z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask\n if rate:\n head = int(z_p.shape[2] * rate)\n z_p = z_p[:, :, -head:]\n x_mask = x_mask[:, :, -head:]\n z = self.flow(z_p, x_mask, g=g, reverse=True)\n o = self.dec(z * x_mask, g=g)\n return o, x_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "Pipeline", "path": "rvc_python/modules/vc/pipeline.py", "snippet": "class Pipeline(object):\n def __init__(self, tgt_sr, config, lib_dir):\n self.x_pad, self.x_query, self.x_center, self.x_max, self.is_half = (\n config.x_pad,\n config.x_query,\n config.x_center,\n config.x_max,\n config.is_half,\n )\n self.sr = 16000 # hubert输入采样率\n self.window = 160 # 每帧点数\n self.t_pad = self.sr * self.x_pad # 每条前后pad时间\n self.t_pad_tgt = tgt_sr * self.x_pad\n self.t_pad2 = self.t_pad * 2\n self.t_query = self.sr * self.x_query # 查询切点前后查询时间\n self.t_center = self.sr * self.x_center # 查询切点位置\n self.t_max = self.sr * self.x_max # 免查询时长阈值\n self.device = config.device\n self.lib_dir = lib_dir\n\n def get_f0(\n self,\n input_audio_path,\n x,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0=None,\n ):\n global input_audio_path2wav\n time_step = self.window / self.sr * 1000\n f0_min = 50\n f0_max = 1100\n f0_mel_min = 1127 * np.log(1 + f0_min / 700)\n f0_mel_max = 1127 * np.log(1 + f0_max / 700)\n if f0_method == \"pm\":\n f0 = (\n parselmouth.Sound(x, self.sr)\n .to_pitch_ac(\n time_step=time_step / 1000,\n voicing_threshold=0.6,\n pitch_floor=f0_min,\n pitch_ceiling=f0_max,\n )\n .selected_array[\"frequency\"]\n )\n pad_size = (p_len - len(f0) + 1) // 2\n if pad_size > 0 or p_len - len(f0) - pad_size > 0:\n f0 = np.pad(\n f0, [[pad_size, p_len - len(f0) - pad_size]], mode=\"constant\"\n )\n elif f0_method == \"harvest\":\n input_audio_path2wav[input_audio_path] = x.astype(np.double)\n f0 = cache_harvest_f0(input_audio_path, self.sr, f0_max, f0_min, 10)\n if filter_radius > 2:\n f0 = signal.medfilt(f0, 3)\n elif f0_method == \"crepe\":\n model = \"full\"\n # Pick a batch size that doesn't cause memory errors on your gpu\n batch_size = 512\n # Compute pitch using first gpu\n audio = torch.tensor(np.copy(x))[None].float()\n f0, pd = torchcrepe.predict(\n audio,\n self.sr,\n self.window,\n f0_min,\n f0_max,\n model,\n batch_size=batch_size,\n device=self.device,\n return_periodicity=True,\n )\n pd = torchcrepe.filter.median(pd, 3)\n f0 = torchcrepe.filter.mean(f0, 3)\n f0[pd < 0.1] = 0\n f0 = f0[0].cpu().numpy()\n elif f0_method == \"rmvpe\":\n if not hasattr(self, \"model_rmvpe\"):\n from rvc_python.lib.rmvpe import RMVPE\n\n logger.info(\n \"Loading rmvpe model - base_models/rmvpe.pth\"\n )\n rmvpe_path = Path(f\"{self.lib_dir}\\\\base_model\\\\rmvpe.pt\")\n self.model_rmvpe = RMVPE(\n rmvpe_path,\n is_half=self.is_half,\n device=self.device,\n )\n f0 = self.model_rmvpe.infer_from_audio(x, thred=0.03)\n\n if \"privateuseone\" in str(self.device): # clean ortruntime memory\n del self.model_rmvpe.model\n del self.model_rmvpe\n logger.info(\"Cleaning ortruntime memory\")\n\n f0 *= pow(2, f0_up_key / 12)\n # with open(\"test.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n tf0 = self.sr // self.window # 每秒f0点数\n if inp_f0 is not None:\n delta_t = np.round(\n (inp_f0[:, 0].max() - inp_f0[:, 0].min()) * tf0 + 1\n ).astype(\"int16\")\n replace_f0 = np.interp(\n list(range(delta_t)), inp_f0[:, 0] * 100, inp_f0[:, 1]\n )\n shape = f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)].shape[0]\n f0[self.x_pad * tf0 : self.x_pad * tf0 + len(replace_f0)] = replace_f0[\n :shape\n ]\n # with open(\"test_opt.txt\",\"w\")as f:f.write(\"\\n\".join([str(i)for i in f0.tolist()]))\n f0bak = f0.copy()\n f0_mel = 1127 * np.log(1 + f0 / 700)\n f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (\n f0_mel_max - f0_mel_min\n ) + 1\n f0_mel[f0_mel <= 1] = 1\n f0_mel[f0_mel > 255] = 255\n f0_coarse = np.rint(f0_mel).astype(np.int32)\n return f0_coarse, f0bak # 1-0\n\n def vc(\n self,\n model,\n net_g,\n sid,\n audio0,\n pitch,\n pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n ): # ,file_index,file_big_npy\n feats = torch.from_numpy(audio0)\n if self.is_half:\n feats = feats.half()\n else:\n feats = feats.float()\n if feats.dim() == 2: # double channels\n feats = feats.mean(-1)\n assert feats.dim() == 1, feats.dim()\n feats = feats.view(1, -1)\n padding_mask = torch.BoolTensor(feats.shape).to(self.device).fill_(False)\n\n inputs = {\n \"source\": feats.to(self.device),\n \"padding_mask\": padding_mask,\n \"output_layer\": 9 if version == \"v1\" else 12,\n }\n t0 = ttime()\n with torch.no_grad():\n logits = model.extract_features(**inputs)\n feats = model.final_proj(logits[0]) if version == \"v1\" else logits[0]\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = feats.clone()\n if (\n not isinstance(index, type(None))\n and not isinstance(big_npy, type(None))\n and index_rate != 0\n ):\n npy = feats[0].cpu().numpy()\n if self.is_half:\n npy = npy.astype(\"float32\")\n\n # _, I = index.search(npy, 1)\n # npy = big_npy[I.squeeze()]\n\n score, ix = index.search(npy, k=8)\n weight = np.square(1 / score)\n weight /= weight.sum(axis=1, keepdims=True)\n npy = np.sum(big_npy[ix] * np.expand_dims(weight, axis=2), axis=1)\n\n if self.is_half:\n npy = npy.astype(\"float16\")\n feats = (\n torch.from_numpy(npy).unsqueeze(0).to(self.device) * index_rate\n + (1 - index_rate) * feats\n )\n\n feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)\n if protect < 0.5 and pitch is not None and pitchf is not None:\n feats0 = F.interpolate(feats0.permute(0, 2, 1), scale_factor=2).permute(\n 0, 2, 1\n )\n t1 = ttime()\n p_len = audio0.shape[0] // self.window\n if feats.shape[1] < p_len:\n p_len = feats.shape[1]\n if pitch is not None and pitchf is not None:\n pitch = pitch[:, :p_len]\n pitchf = pitchf[:, :p_len]\n\n if protect < 0.5 and pitch is not None and pitchf is not None:\n pitchff = pitchf.clone()\n pitchff[pitchf > 0] = 1\n pitchff[pitchf < 1] = protect\n pitchff = pitchff.unsqueeze(-1)\n feats = feats * pitchff + feats0 * (1 - pitchff)\n feats = feats.to(feats0.dtype)\n p_len = torch.tensor([p_len], device=self.device).long()\n with torch.no_grad():\n hasp = pitch is not None and pitchf is not None\n arg = (feats, p_len, pitch, pitchf, sid) if hasp else (feats, p_len, sid)\n audio1 = (net_g.infer(*arg)[0][0, 0]).data.cpu().float().numpy()\n del hasp, arg\n del feats, p_len, padding_mask\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n t2 = ttime()\n times[0] += t1 - t0\n times[2] += t2 - t1\n return audio1\n\n def pipeline(\n self,\n model,\n net_g,\n sid,\n audio,\n input_audio_path,\n times,\n f0_up_key,\n f0_method,\n file_index,\n index_rate,\n if_f0,\n filter_radius,\n tgt_sr,\n resample_sr,\n rms_mix_rate,\n version,\n protect,\n f0_file=None,\n ):\n if (\n file_index != \"\"\n # and file_big_npy != \"\"\n # and os.path.exists(file_big_npy) == True\n and os.path.exists(file_index)\n and index_rate != 0\n ):\n try:\n index = faiss.read_index(file_index)\n # big_npy = np.load(file_big_npy)\n big_npy = index.reconstruct_n(0, index.ntotal)\n except:\n traceback.print_exc()\n index = big_npy = None\n else:\n index = big_npy = None\n audio = signal.filtfilt(bh, ah, audio)\n audio_pad = np.pad(audio, (self.window // 2, self.window // 2), mode=\"reflect\")\n opt_ts = []\n if audio_pad.shape[0] > self.t_max:\n audio_sum = np.zeros_like(audio)\n for i in range(self.window):\n audio_sum += np.abs(audio_pad[i : i - self.window])\n for t in range(self.t_center, audio.shape[0], self.t_center):\n opt_ts.append(\n t\n - self.t_query\n + np.where(\n audio_sum[t - self.t_query : t + self.t_query]\n == audio_sum[t - self.t_query : t + self.t_query].min()\n )[0][0]\n )\n s = 0\n audio_opt = []\n t = None\n t1 = ttime()\n audio_pad = np.pad(audio, (self.t_pad, self.t_pad), mode=\"reflect\")\n p_len = audio_pad.shape[0] // self.window\n inp_f0 = None\n if hasattr(f0_file, \"name\"):\n try:\n with open(f0_file.name, \"r\") as f:\n lines = f.read().strip(\"\\n\").split(\"\\n\")\n inp_f0 = []\n for line in lines:\n inp_f0.append([float(i) for i in line.split(\",\")])\n inp_f0 = np.array(inp_f0, dtype=\"float32\")\n except:\n traceback.print_exc()\n # print(sid)\n # sid = os.path.abspath(sid)\n sid = torch.tensor(sid, device=self.device).unsqueeze(0).long()\n pitch, pitchf = None, None\n if if_f0 == 1:\n pitch, pitchf = self.get_f0(\n input_audio_path,\n audio_pad,\n p_len,\n f0_up_key,\n f0_method,\n filter_radius,\n inp_f0,\n )\n pitch = pitch[:p_len]\n pitchf = pitchf[:p_len]\n if \"mps\" not in str(self.device) or \"xpu\" not in str(self.device):\n pitchf = pitchf.astype(np.float32)\n pitch = torch.tensor(pitch, device=self.device).unsqueeze(0).long()\n pitchf = torch.tensor(pitchf, device=self.device).unsqueeze(0).float()\n t2 = ttime()\n times[1] += t2 - t1\n for t in opt_ts:\n t = t // self.window * self.window\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n pitch[:, s // self.window : (t + self.t_pad2) // self.window],\n pitchf[:, s // self.window : (t + self.t_pad2) // self.window],\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[s : t + self.t_pad2 + self.window],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n s = t\n if if_f0 == 1:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n pitch[:, t // self.window :] if t is not None else pitch,\n pitchf[:, t // self.window :] if t is not None else pitchf,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n else:\n audio_opt.append(\n self.vc(\n model,\n net_g,\n sid,\n audio_pad[t:],\n None,\n None,\n times,\n index,\n big_npy,\n index_rate,\n version,\n protect,\n )[self.t_pad_tgt : -self.t_pad_tgt]\n )\n audio_opt = np.concatenate(audio_opt)\n if rms_mix_rate != 1:\n audio_opt = change_rms(audio, 16000, audio_opt, tgt_sr, rms_mix_rate)\n if tgt_sr != resample_sr >= 16000:\n audio_opt = librosa.resample(\n audio_opt, orig_sr=tgt_sr, target_sr=resample_sr\n )\n audio_max = np.abs(audio_opt).max() / 0.99\n max_int16 = 32768\n if audio_max > 1:\n max_int16 /= audio_max\n audio_opt = (audio_opt * max_int16).astype(np.int16)\n del pitch, pitchf, sid\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n return audio_opt" } ]
import traceback import logging import numpy as np import soundfile as sf import torch from io import BytesIO from rvc_python.lib.audio import load_audio, wav2 from rvc_python.lib.infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from rvc_python.modules.vc.pipeline import Pipeline from rvc_python.modules.vc.utils import *
10,867
file_index = ( file_index.strip(" ") .strip('"') .strip("\n") .strip('"') .strip(" ") .replace("trained", "added") ) elif file_index2: file_index = file_index2 else: file_index = "" # 防止小白写错,自动帮他替换掉 audio_opt = self.pipeline.pipeline( self.hubert_model, self.net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, file_index, index_rate, self.if_f0, filter_radius, self.tgt_sr, resample_sr, rms_mix_rate, self.version, protect, f0_file, ) if self.tgt_sr != resample_sr >= 16000: tgt_sr = resample_sr else: tgt_sr = self.tgt_sr index_info = ( "Index:\n%s." % file_index if os.path.exists(file_index) else "Index not used." ) return audio_opt except: info = traceback.format_exc() logger.warning(info) return info, (None, None) def vc_multi( self, sid, dir_path, opt_root, paths, f0_up_key, f0_method, file_index, file_index2, index_rate, filter_radius, resample_sr, rms_mix_rate, protect, format1, ): try: dir_path = ( dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") ) # 防止小白拷路径头尾带了空格和"和回车 opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") os.makedirs(opt_root, exist_ok=True) try: if dir_path != "": paths = [ os.path.join(dir_path, name) for name in os.listdir(dir_path) ] else: paths = [path.name for path in paths] except: traceback.print_exc() paths = [path.name for path in paths] infos = [] print(paths) for path in paths: info, opt = self.vc_single( sid, path, f0_up_key, None, f0_method, file_index, file_index2, # file_big_npy, index_rate, filter_radius, resample_sr, rms_mix_rate, protect, ) print(info) if "Success" in info: try: tgt_sr, audio_opt = opt if format1 in ["wav", "flac"]: sf.write( "%s/%s.%s" % (opt_root, os.path.basename(path), format1), audio_opt, tgt_sr, ) else: path = "%s/%s.%s" % ( opt_root, os.path.basename(path), format1, ) with BytesIO() as wavf: sf.write(wavf, audio_opt, tgt_sr, format="wav") wavf.seek(0, 0) with open(path, "wb") as outf:
logger = logging.getLogger(__name__) class VC: def __init__(self, lib_dir, config): self.lib_dir = lib_dir self.n_spk = None self.tgt_sr = None self.net_g = None self.pipeline = None self.cpt = None self.version = None self.if_f0 = None self.version = None self.hubert_model = None self.config = config def get_vc(self,sid,version = "v2", *to_return_protect): # logger.info("Get sid: " + sid) to_return_protect0 = { "visible": self.if_f0 != 0, "value": to_return_protect[0] if self.if_f0 != 0 and to_return_protect else 0.5, "__type__": "update", } to_return_protect1 = { "visible": self.if_f0 != 0, "value": to_return_protect[1] if self.if_f0 != 0 and to_return_protect else 0.33, "__type__": "update", } if sid == "" or sid == []: if self.hubert_model is not None: # 考虑到轮询, 需要加个判断看是否 sid 是由有模型切换到无模型的 logger.info("Clean model cache") del (self.net_g, self.n_spk, self.hubert_model, self.tgt_sr) # ,cpt self.hubert_model = ( self.net_g ) = self.n_spk = self.hubert_model = self.tgt_sr = None if torch.cuda.is_available(): torch.cuda.empty_cache() ###楼下不这么折腾清理不干净 self.if_f0 = self.cpt.get("f0", 1) self.version = self.cpt.get("version", "v1") if self.version == "v1": if self.if_f0 == 1: self.net_g = SynthesizerTrnMs256NSFsid( *self.cpt["config"], is_half=self.config.is_half ) else: self.net_g = SynthesizerTrnMs256NSFsid_nono(*self.cpt["config"]) elif self.version == "v2": if self.if_f0 == 1: self.net_g = SynthesizerTrnMs768NSFsid( *self.cpt["config"], is_half=self.config.is_half ) else: self.net_g = SynthesizerTrnMs768NSFsid_nono(*self.cpt["config"]) del self.net_g, self.cpt if torch.cuda.is_available(): torch.cuda.empty_cache() return ( {"visible": False, "__type__": "update"}, { "visible": True, "value": to_return_protect0, "__type__": "update", }, { "visible": True, "value": to_return_protect1, "__type__": "update", }, "", "", ) person = f'{sid}' logger.info(f"Loading: {person}") # print(sid,person) self.cpt = torch.load(sid, map_location="cpu") self.tgt_sr = self.cpt["config"][-1] self.cpt["config"][-3] = self.cpt["weight"]["emb_g.weight"].shape[0] # n_spk self.if_f0 = self.cpt.get("f0", 1) self.version = version synthesizer_class = { ("v1", 1): SynthesizerTrnMs256NSFsid, ("v1", 0): SynthesizerTrnMs256NSFsid_nono, ("v2", 1): SynthesizerTrnMs768NSFsid, ("v2", 0): SynthesizerTrnMs768NSFsid_nono, } self.net_g = synthesizer_class.get( (self.version, self.if_f0), SynthesizerTrnMs256NSFsid )(*self.cpt["config"], is_half=self.config.is_half) del self.net_g.enc_q self.net_g.load_state_dict(self.cpt["weight"], strict=False) self.net_g.eval().to(self.config.device) if self.config.is_half: self.net_g = self.net_g.half() else: self.net_g = self.net_g.float() self.pipeline = Pipeline(self.tgt_sr, self.config,lib_dir=self.lib_dir) n_spk = self.cpt["config"][-3] return ( ( {"visible": True, "maximum": n_spk, "__type__": "update"}, to_return_protect0, to_return_protect1, ) if to_return_protect else {"visible": True, "maximum": n_spk, "__type__": "update"} ) def vc_single( self, sid, input_audio_path, f0_up_key, f0_file, f0_method, file_index, file_index2, index_rate, filter_radius, resample_sr, rms_mix_rate, protect, ): if input_audio_path is None: return "You need to upload an audio", None f0_up_key = int(f0_up_key) try: audio = load_audio(input_audio_path, 16000) audio_max = np.abs(audio).max() / 0.95 if audio_max > 1: audio /= audio_max times = [0, 0, 0] if self.hubert_model is None: self.hubert_model = load_hubert(self.config,self.lib_dir) if file_index: file_index = ( file_index.strip(" ") .strip('"') .strip("\n") .strip('"') .strip(" ") .replace("trained", "added") ) elif file_index2: file_index = file_index2 else: file_index = "" # 防止小白写错,自动帮他替换掉 audio_opt = self.pipeline.pipeline( self.hubert_model, self.net_g, sid, audio, input_audio_path, times, f0_up_key, f0_method, file_index, index_rate, self.if_f0, filter_radius, self.tgt_sr, resample_sr, rms_mix_rate, self.version, protect, f0_file, ) if self.tgt_sr != resample_sr >= 16000: tgt_sr = resample_sr else: tgt_sr = self.tgt_sr index_info = ( "Index:\n%s." % file_index if os.path.exists(file_index) else "Index not used." ) return audio_opt except: info = traceback.format_exc() logger.warning(info) return info, (None, None) def vc_multi( self, sid, dir_path, opt_root, paths, f0_up_key, f0_method, file_index, file_index2, index_rate, filter_radius, resample_sr, rms_mix_rate, protect, format1, ): try: dir_path = ( dir_path.strip(" ").strip('"').strip("\n").strip('"').strip(" ") ) # 防止小白拷路径头尾带了空格和"和回车 opt_root = opt_root.strip(" ").strip('"').strip("\n").strip('"').strip(" ") os.makedirs(opt_root, exist_ok=True) try: if dir_path != "": paths = [ os.path.join(dir_path, name) for name in os.listdir(dir_path) ] else: paths = [path.name for path in paths] except: traceback.print_exc() paths = [path.name for path in paths] infos = [] print(paths) for path in paths: info, opt = self.vc_single( sid, path, f0_up_key, None, f0_method, file_index, file_index2, # file_big_npy, index_rate, filter_radius, resample_sr, rms_mix_rate, protect, ) print(info) if "Success" in info: try: tgt_sr, audio_opt = opt if format1 in ["wav", "flac"]: sf.write( "%s/%s.%s" % (opt_root, os.path.basename(path), format1), audio_opt, tgt_sr, ) else: path = "%s/%s.%s" % ( opt_root, os.path.basename(path), format1, ) with BytesIO() as wavf: sf.write(wavf, audio_opt, tgt_sr, format="wav") wavf.seek(0, 0) with open(path, "wb") as outf:
wav2(wavf, outf, format1)
1
2023-12-26 19:05:42+00:00
16k
open-mmlab/Amphion
modules/wenet_extractor/transformer/encoder.py
[ { "identifier": "MultiHeadedAttention", "path": "modules/wenet_extractor/transformer/attention.py", "snippet": "class MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head: int, n_feat: int, dropout_rate: float):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super().__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.dropout = nn.Dropout(p=dropout_rate)\n\n def forward_qkv(\n self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor, size\n (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor, size\n (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor, size\n (#batch, n_head, time2, d_k).\n\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n\n return q, k, v\n\n def forward_attention(\n self,\n value: torch.Tensor,\n scores: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ) -> torch.Tensor:\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value, size\n (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score, size\n (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask, size (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be True?\n # 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the\n # 1st chunk to ease the onnx export.]\n # 2. pytorch training\n if mask.size(2) > 0: # time2 > 0\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n # For last chunk, time2 might be larger than scores.size(-1)\n mask = mask[:, :, :, : scores.size(-1)] # (batch, 1, *, time2)\n scores = scores.masked_fill(mask, -float(\"inf\"))\n attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be False?\n # 1. onnx(16/-1, -1/-1, 16/0)\n # 2. jit (16/-1, -1/-1, 16/0, 16/4)\n else:\n attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n pos_emb: torch.Tensor = torch.empty(0),\n cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n 1.When applying cross attention between decoder and encoder,\n the batch padding mask for input is in (#batch, 1, T) shape.\n 2.When applying self attention of encoder,\n the mask is in (#batch, T, T) shape.\n 3.When applying self attention of decoder,\n the mask is in (#batch, L, L) shape.\n 4.If the different position in decoder see different block\n of the encoder, such as Mocha, the passed in mask could be\n in (#batch, L, T) shape. But there is no such case in current\n Wenet.\n cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n\n # NOTE(xcsong):\n # when export onnx model, for 1st chunk, we feed\n # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)\n # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).\n # In all modes, `if cache.size(0) > 0` will alwayse be `True`\n # and we will always do splitting and\n # concatnation(this will simplify onnx export). Note that\n # it's OK to concat & split zero-shaped tensors(see code below).\n # when export jit model, for 1st chunk, we always feed\n # cache(0, 0, 0, 0) since jit supports dynamic if-branch.\n # >>> a = torch.ones((1, 2, 0, 4))\n # >>> b = torch.ones((1, 2, 3, 4))\n # >>> c = torch.cat((a, b), dim=2)\n # >>> torch.equal(b, c) # True\n # >>> d = torch.split(a, 2, dim=-1)\n # >>> torch.equal(d[0], d[1]) # True\n if cache.size(0) > 0:\n key_cache, value_cache = torch.split(cache, cache.size(-1) // 2, dim=-1)\n k = torch.cat([key_cache, k], dim=2)\n v = torch.cat([value_cache, v], dim=2)\n # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's\n # non-trivial to calculate `next_cache_start` here.\n new_cache = torch.cat((k, v), dim=-1)\n\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n return self.forward_attention(v, scores, mask), new_cache" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "modules/wenet_extractor/transformer/attention.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding.\n Paper: https://arxiv.org/abs/1901.02860\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x, zero_triu: bool = False):\n \"\"\"Compute relative positinal encoding.\n Args:\n x (torch.Tensor): Input tensor (batch, time, size).\n zero_triu (bool): If true, return the lower triangular part of\n the matrix.\n Returns:\n torch.Tensor: Output tensor.\n \"\"\"\n\n zero_pad = torch.zeros(\n (x.size()[0], x.size()[1], x.size()[2], 1), device=x.device, dtype=x.dtype\n )\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)\n\n if zero_triu:\n ones = torch.ones((x.size(2), x.size(3)))\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n pos_emb: torch.Tensor = torch.empty(0),\n cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n pos_emb (torch.Tensor): Positional embedding tensor\n (#batch, time2, size).\n cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n # NOTE(xcsong):\n # when export onnx model, for 1st chunk, we feed\n # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)\n # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).\n # In all modes, `if cache.size(0) > 0` will alwayse be `True`\n # and we will always do splitting and\n # concatnation(this will simplify onnx export). Note that\n # it's OK to concat & split zero-shaped tensors(see code below).\n # when export jit model, for 1st chunk, we always feed\n # cache(0, 0, 0, 0) since jit supports dynamic if-branch.\n # >>> a = torch.ones((1, 2, 0, 4))\n # >>> b = torch.ones((1, 2, 3, 4))\n # >>> c = torch.cat((a, b), dim=2)\n # >>> torch.equal(b, c) # True\n # >>> d = torch.split(a, 2, dim=-1)\n # >>> torch.equal(d[0], d[1]) # True\n if cache.size(0) > 0:\n key_cache, value_cache = torch.split(cache, cache.size(-1) // 2, dim=-1)\n k = torch.cat([key_cache, k], dim=2)\n v = torch.cat([value_cache, v], dim=2)\n # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's\n # non-trivial to calculate `next_cache_start` here.\n new_cache = torch.cat((k, v), dim=-1)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, time2)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n # Remove rel_shift since it is useless in speech recognition,\n # and it requires special attention for streaming.\n # matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask), new_cache" }, { "identifier": "ConvolutionModule", "path": "modules/wenet_extractor/transformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\n \"\"\"ConvolutionModule in Conformer model.\"\"\"\n\n def __init__(\n self,\n channels: int,\n kernel_size: int = 15,\n activation: nn.Module = nn.ReLU(),\n norm: str = \"batch_norm\",\n causal: bool = False,\n bias: bool = True,\n ):\n \"\"\"Construct an ConvolutionModule object.\n Args:\n channels (int): The number of channels of conv layers.\n kernel_size (int): Kernel size of conv layers.\n causal (int): Whether use causal convolution or not\n \"\"\"\n super().__init__()\n\n self.pointwise_conv1 = nn.Conv1d(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n # self.lorder is used to distinguish if it's a causal convolution,\n # if self.lorder > 0: it's a causal convolution, the input will be\n # padded with self.lorder frames on the left in forward.\n # else: it's a symmetrical convolution\n if causal:\n padding = 0\n self.lorder = kernel_size - 1\n else:\n # kernel_size should be an odd number for none causal convolution\n assert (kernel_size - 1) % 2 == 0\n padding = (kernel_size - 1) // 2\n self.lorder = 0\n self.depthwise_conv = nn.Conv1d(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=padding,\n groups=channels,\n bias=bias,\n )\n\n assert norm in [\"batch_norm\", \"layer_norm\"]\n if norm == \"batch_norm\":\n self.use_layer_norm = False\n self.norm = nn.BatchNorm1d(channels)\n else:\n self.use_layer_norm = True\n self.norm = nn.LayerNorm(channels)\n\n self.pointwise_conv2 = nn.Conv1d(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.activation = activation\n\n def forward(\n self,\n x: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n cache: torch.Tensor = torch.zeros((0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute convolution module.\n Args:\n x (torch.Tensor): Input tensor (#batch, time, channels).\n mask_pad (torch.Tensor): used for batch padding (#batch, 1, time),\n (0, 0, 0) means fake mask.\n cache (torch.Tensor): left context cache, it is only\n used in causal convolution (#batch, channels, cache_t),\n (0, 0, 0) meas fake cache.\n Returns:\n torch.Tensor: Output tensor (#batch, time, channels).\n \"\"\"\n # exchange the temporal dimension and the feature dimension\n x = x.transpose(1, 2) # (#batch, channels, time)\n\n # mask batch padding\n if mask_pad.size(2) > 0: # time > 0\n x.masked_fill_(~mask_pad, 0.0)\n\n if self.lorder > 0:\n if cache.size(2) == 0: # cache_t == 0\n x = nn.functional.pad(x, (self.lorder, 0), \"constant\", 0.0)\n else:\n assert cache.size(0) == x.size(0) # equal batch\n assert cache.size(1) == x.size(1) # equal channel\n x = torch.cat((cache, x), dim=2)\n assert x.size(2) > self.lorder\n new_cache = x[:, :, -self.lorder :]\n else:\n # It's better we just return None if no cache is required,\n # However, for JIT export, here we just fake one tensor instead of\n # None.\n new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n\n # GLU mechanism\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\n\n # 1D Depthwise Conv\n x = self.depthwise_conv(x)\n if self.use_layer_norm:\n x = x.transpose(1, 2)\n x = self.activation(self.norm(x))\n if self.use_layer_norm:\n x = x.transpose(1, 2)\n x = self.pointwise_conv2(x)\n # mask batch padding\n if mask_pad.size(2) > 0: # time > 0\n x.masked_fill_(~mask_pad, 0.0)\n\n return x.transpose(1, 2), new_cache" }, { "identifier": "PositionalEncoding", "path": "modules/wenet_extractor/transformer/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\n \"\"\"Positional encoding.\n\n :param int d_model: embedding dim\n :param float dropout_rate: dropout rate\n :param int max_len: maximum input length\n\n PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))\n PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))\n \"\"\"\n\n def __init__(\n self,\n d_model: int,\n dropout_rate: float,\n max_len: int = 5000,\n reverse: bool = False,\n ):\n \"\"\"Construct an PositionalEncoding object.\"\"\"\n super().__init__()\n self.d_model = d_model\n self.xscale = math.sqrt(self.d_model)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.max_len = max_len\n\n self.pe = torch.zeros(self.max_len, self.d_model)\n position = torch.arange(0, self.max_len, dtype=torch.float32).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\n * -(math.log(10000.0) / self.d_model)\n )\n self.pe[:, 0::2] = torch.sin(position * div_term)\n self.pe[:, 1::2] = torch.cos(position * div_term)\n self.pe = self.pe.unsqueeze(0)\n\n def forward(\n self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input. Its shape is (batch, time, ...)\n offset (int, torch.tensor): position offset\n\n Returns:\n torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)\n torch.Tensor: for compatibility to RelPositionalEncoding\n \"\"\"\n\n self.pe = self.pe.to(x.device)\n pos_emb = self.position_encoding(offset, x.size(1), False)\n x = x * self.xscale + pos_emb\n return self.dropout(x), self.dropout(pos_emb)\n\n def position_encoding(\n self, offset: Union[int, torch.Tensor], size: int, apply_dropout: bool = True\n ) -> torch.Tensor:\n \"\"\"For getting encoding in a streaming fashion\n\n Attention!!!!!\n we apply dropout only once at the whole utterance level in a none\n streaming way, but will call this function several times with\n increasing input size in a streaming scenario, so the dropout will\n be applied several times.\n\n Args:\n offset (int or torch.tensor): start offset\n size (int): required size of position encoding\n\n Returns:\n torch.Tensor: Corresponding encoding\n \"\"\"\n # How to subscript a Union type:\n # https://github.com/pytorch/pytorch/issues/69434\n if isinstance(offset, int):\n assert offset + size < self.max_len\n pos_emb = self.pe[:, offset : offset + size]\n elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar\n assert offset + size < self.max_len\n pos_emb = self.pe[:, offset : offset + size]\n else: # for batched streaming decoding on GPU\n assert torch.max(offset) + size < self.max_len\n index = offset.unsqueeze(1) + torch.arange(0, size).to(\n offset.device\n ) # B X T\n flag = index > 0\n # remove negative offset\n index = index * flag\n pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model\n\n if apply_dropout:\n pos_emb = self.dropout(pos_emb)\n return pos_emb" }, { "identifier": "RelPositionalEncoding", "path": "modules/wenet_extractor/transformer/embedding.py", "snippet": "class RelPositionalEncoding(PositionalEncoding):\n \"\"\"Relative positional encoding module.\n See : Appendix B in https://arxiv.org/abs/1901.02860\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n \"\"\"\n\n def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):\n \"\"\"Initialize class.\"\"\"\n super().__init__(d_model, dropout_rate, max_len, reverse=True)\n\n def forward(\n self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute positional encoding.\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n torch.Tensor: Positional embedding tensor (1, time, `*`).\n \"\"\"\n self.pe = self.pe.to(x.device)\n x = x * self.xscale\n pos_emb = self.position_encoding(offset, x.size(1), False)\n return self.dropout(x), self.dropout(pos_emb)" }, { "identifier": "NoPositionalEncoding", "path": "modules/wenet_extractor/transformer/embedding.py", "snippet": "class NoPositionalEncoding(torch.nn.Module):\n \"\"\"No position encoding\"\"\"\n\n def __init__(self, d_model: int, dropout_rate: float):\n super().__init__()\n self.d_model = d_model\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n\n def forward(\n self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Just return zero vector for interface compatibility\"\"\"\n pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)\n return self.dropout(x), pos_emb\n\n def position_encoding(\n self, offset: Union[int, torch.Tensor], size: int\n ) -> torch.Tensor:\n return torch.zeros(1, size, self.d_model)" }, { "identifier": "TransformerEncoderLayer", "path": "modules/wenet_extractor/transformer/encoder_layer.py", "snippet": "class TransformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer module.\n\n Args:\n size (int): Input dimension.\n self_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention`\n instance can be used as the argument.\n feed_forward (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward`, instance can be used as the argument.\n dropout_rate (float): Dropout rate.\n normalize_before (bool):\n True: use layer_norm before each sub-block.\n False: to use layer_norm after each sub-block.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n self_attn: torch.nn.Module,\n feed_forward: torch.nn.Module,\n dropout_rate: float,\n normalize_before: bool = True,\n ):\n \"\"\"Construct an EncoderLayer object.\"\"\"\n super().__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.norm1 = nn.LayerNorm(size, eps=1e-5)\n self.norm2 = nn.LayerNorm(size, eps=1e-5)\n self.dropout = nn.Dropout(dropout_rate)\n self.size = size\n self.normalize_before = normalize_before\n\n def forward(\n self,\n x: torch.Tensor,\n mask: torch.Tensor,\n pos_emb: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Compute encoded features.\n\n Args:\n x (torch.Tensor): (#batch, time, size)\n mask (torch.Tensor): Mask tensor for the input (#batch, time,time),\n (0, 0, 0) means fake mask.\n pos_emb (torch.Tensor): just for interface compatibility\n to ConformerEncoderLayer\n mask_pad (torch.Tensor): does not used in transformer layer,\n just for unified api with conformer.\n att_cache (torch.Tensor): Cache tensor of the KEY & VALUE\n (#batch=1, head, cache_t1, d_k * 2), head * d_k == size.\n cnn_cache (torch.Tensor): Convolution cache in conformer layer\n (#batch=1, size, cache_t2), not used here, it's for interface\n compatibility to ConformerEncoderLayer.\n Returns:\n torch.Tensor: Output tensor (#batch, time, size).\n torch.Tensor: Mask tensor (#batch, time, time).\n torch.Tensor: att_cache tensor,\n (#batch=1, head, cache_t1 + time, d_k * 2).\n torch.Tensor: cnn_cahce tensor (#batch=1, size, cache_t2).\n\n \"\"\"\n residual = x\n if self.normalize_before:\n x = self.norm1(x)\n x_att, new_att_cache = self.self_attn(x, x, x, mask, cache=att_cache)\n x = residual + self.dropout(x_att)\n if not self.normalize_before:\n x = self.norm1(x)\n\n residual = x\n if self.normalize_before:\n x = self.norm2(x)\n x = residual + self.dropout(self.feed_forward(x))\n if not self.normalize_before:\n x = self.norm2(x)\n\n fake_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n return x, mask, new_att_cache, fake_cnn_cache" }, { "identifier": "ConformerEncoderLayer", "path": "modules/wenet_extractor/transformer/encoder_layer.py", "snippet": "class ConformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer module.\n Args:\n size (int): Input dimension.\n self_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention`\n instance can be used as the argument.\n feed_forward (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward` instance can be used as the argument.\n feed_forward_macaron (torch.nn.Module): Additional feed-forward module\n instance.\n `PositionwiseFeedForward` instance can be used as the argument.\n conv_module (torch.nn.Module): Convolution module instance.\n `ConvlutionModule` instance can be used as the argument.\n dropout_rate (float): Dropout rate.\n normalize_before (bool):\n True: use layer_norm before each sub-block.\n False: use layer_norm after each sub-block.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n self_attn: torch.nn.Module,\n feed_forward: Optional[nn.Module] = None,\n feed_forward_macaron: Optional[nn.Module] = None,\n conv_module: Optional[nn.Module] = None,\n dropout_rate: float = 0.1,\n normalize_before: bool = True,\n ):\n \"\"\"Construct an EncoderLayer object.\"\"\"\n super().__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.feed_forward_macaron = feed_forward_macaron\n self.conv_module = conv_module\n self.norm_ff = nn.LayerNorm(size, eps=1e-5) # for the FNN module\n self.norm_mha = nn.LayerNorm(size, eps=1e-5) # for the MHA module\n if feed_forward_macaron is not None:\n self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-5)\n self.ff_scale = 0.5\n else:\n self.ff_scale = 1.0\n if self.conv_module is not None:\n self.norm_conv = nn.LayerNorm(size, eps=1e-5) # for the CNN module\n self.norm_final = nn.LayerNorm(\n size, eps=1e-5\n ) # for the final output of the block\n self.dropout = nn.Dropout(dropout_rate)\n self.size = size\n self.normalize_before = normalize_before\n\n def forward(\n self,\n x: torch.Tensor,\n mask: torch.Tensor,\n pos_emb: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Compute encoded features.\n\n Args:\n x (torch.Tensor): (#batch, time, size)\n mask (torch.Tensor): Mask tensor for the input (#batch, time,time),\n (0, 0, 0) means fake mask.\n pos_emb (torch.Tensor): positional encoding, must not be None\n for ConformerEncoderLayer.\n mask_pad (torch.Tensor): batch padding mask used for conv module.\n (#batch, 1,time), (0, 0, 0) means fake mask.\n att_cache (torch.Tensor): Cache tensor of the KEY & VALUE\n (#batch=1, head, cache_t1, d_k * 2), head * d_k == size.\n cnn_cache (torch.Tensor): Convolution cache in conformer layer\n (#batch=1, size, cache_t2)\n Returns:\n torch.Tensor: Output tensor (#batch, time, size).\n torch.Tensor: Mask tensor (#batch, time, time).\n torch.Tensor: att_cache tensor,\n (#batch=1, head, cache_t1 + time, d_k * 2).\n torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2).\n \"\"\"\n\n # whether to use macaron style\n if self.feed_forward_macaron is not None:\n residual = x\n if self.normalize_before:\n x = self.norm_ff_macaron(x)\n x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x))\n if not self.normalize_before:\n x = self.norm_ff_macaron(x)\n\n # multi-headed self-attention module\n residual = x\n if self.normalize_before:\n x = self.norm_mha(x)\n x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb, att_cache)\n x = residual + self.dropout(x_att)\n if not self.normalize_before:\n x = self.norm_mha(x)\n\n # convolution module\n # Fake new cnn cache here, and then change it in conv_module\n new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n if self.conv_module is not None:\n residual = x\n if self.normalize_before:\n x = self.norm_conv(x)\n x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache)\n x = residual + self.dropout(x)\n\n if not self.normalize_before:\n x = self.norm_conv(x)\n\n # feed forward module\n residual = x\n if self.normalize_before:\n x = self.norm_ff(x)\n\n x = residual + self.ff_scale * self.dropout(self.feed_forward(x))\n if not self.normalize_before:\n x = self.norm_ff(x)\n\n if self.conv_module is not None:\n x = self.norm_final(x)\n\n return x, mask, new_att_cache, new_cnn_cache" }, { "identifier": "PositionwiseFeedForward", "path": "modules/wenet_extractor/transformer/positionwise_feed_forward.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\n \"\"\"Positionwise feed forward layer.\n\n FeedForward are appied on each position of the sequence.\n The output dim is same with the input dim.\n\n Args:\n idim (int): Input dimenstion.\n hidden_units (int): The number of hidden units.\n dropout_rate (float): Dropout rate.\n activation (torch.nn.Module): Activation function\n \"\"\"\n\n def __init__(\n self,\n idim: int,\n hidden_units: int,\n dropout_rate: float,\n activation: torch.nn.Module = torch.nn.ReLU(),\n ):\n \"\"\"Construct a PositionwiseFeedForward object.\"\"\"\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = torch.nn.Linear(idim, hidden_units)\n self.activation = activation\n self.dropout = torch.nn.Dropout(dropout_rate)\n self.w_2 = torch.nn.Linear(hidden_units, idim)\n\n def forward(self, xs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward function.\n\n Args:\n xs: input tensor (B, L, D)\n Returns:\n output tensor, (B, L, D)\n \"\"\"\n return self.w_2(self.dropout(self.activation(self.w_1(xs))))" }, { "identifier": "Conv2dSubsampling4", "path": "modules/wenet_extractor/transformer/subsampling.py", "snippet": "class Conv2dSubsampling4(BaseSubsampling):\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(\n self, idim: int, odim: int, dropout_rate: float, pos_enc_class: torch.nn.Module\n ):\n \"\"\"Construct an Conv2dSubsampling4 object.\"\"\"\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim)\n )\n self.pos_enc = pos_enc_class\n # The right context for every conv layer is computed by:\n # (kernel_size - 1) * frame_rate_of_this_layer\n self.subsampling_rate = 4\n # 6 = (3 - 1) * 1 + (3 - 1) * 2\n self.right_context = 6\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 4.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 4.\n torch.Tensor: positional encoding\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c=1, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2]" }, { "identifier": "Conv2dSubsampling6", "path": "modules/wenet_extractor/transformer/subsampling.py", "snippet": "class Conv2dSubsampling6(BaseSubsampling):\n \"\"\"Convolutional 2D subsampling (to 1/6 length).\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n \"\"\"\n\n def __init__(\n self, idim: int, odim: int, dropout_rate: float, pos_enc_class: torch.nn.Module\n ):\n \"\"\"Construct an Conv2dSubsampling6 object.\"\"\"\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 5, 3),\n torch.nn.ReLU(),\n )\n self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), odim)\n self.pos_enc = pos_enc_class\n # 10 = (3 - 1) * 1 + (5 - 1) * 2\n self.subsampling_rate = 6\n self.right_context = 10\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Subsample x.\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 6.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 6.\n torch.Tensor: positional encoding\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3]" }, { "identifier": "Conv2dSubsampling8", "path": "modules/wenet_extractor/transformer/subsampling.py", "snippet": "class Conv2dSubsampling8(BaseSubsampling):\n \"\"\"Convolutional 2D subsampling (to 1/8 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(\n self, idim: int, odim: int, dropout_rate: float, pos_enc_class: torch.nn.Module\n ):\n \"\"\"Construct an Conv2dSubsampling8 object.\"\"\"\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.linear = torch.nn.Linear(\n odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim\n )\n self.pos_enc = pos_enc_class\n self.subsampling_rate = 8\n # 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4\n self.right_context = 14\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 8.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 8.\n torch.Tensor: positional encoding\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2]" }, { "identifier": "LinearNoSubsampling", "path": "modules/wenet_extractor/transformer/subsampling.py", "snippet": "class LinearNoSubsampling(BaseSubsampling):\n \"\"\"Linear transform the input without subsampling\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(\n self, idim: int, odim: int, dropout_rate: float, pos_enc_class: torch.nn.Module\n ):\n \"\"\"Construct an linear object.\"\"\"\n super().__init__()\n self.out = torch.nn.Sequential(\n torch.nn.Linear(idim, odim),\n torch.nn.LayerNorm(odim, eps=1e-5),\n torch.nn.Dropout(dropout_rate),\n )\n self.pos_enc = pos_enc_class\n self.right_context = 0\n self.subsampling_rate = 1\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Input x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: linear input tensor (#batch, time', odim),\n where time' = time .\n torch.Tensor: linear input mask (#batch, 1, time'),\n where time' = time .\n\n \"\"\"\n x = self.out(x)\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask" }, { "identifier": "get_activation", "path": "modules/wenet_extractor/utils/common.py", "snippet": "def get_activation(act):\n \"\"\"Return activation function.\"\"\"\n # Lazy load to avoid unused import\n from modules.wenet_extractor.transformer.swish import Swish\n\n activation_funcs = {\n \"hardtanh\": torch.nn.Hardtanh,\n \"tanh\": torch.nn.Tanh,\n \"relu\": torch.nn.ReLU,\n \"selu\": torch.nn.SELU,\n \"swish\": getattr(torch.nn, \"SiLU\", Swish),\n \"gelu\": torch.nn.GELU,\n }\n\n return activation_funcs[act]()" }, { "identifier": "make_pad_mask", "path": "modules/wenet_extractor/utils/mask.py", "snippet": "def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:\n \"\"\"Make mask tensor containing indices of padded part.\n\n See description of make_non_pad_mask.\n\n Args:\n lengths (torch.Tensor): Batch of lengths (B,).\n Returns:\n torch.Tensor: Mask tensor containing indices of padded part.\n\n Examples:\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n \"\"\"\n batch_size = lengths.size(0)\n max_len = max_len if max_len > 0 else lengths.max().item()\n seq_range = torch.arange(0, max_len, dtype=torch.int64, device=lengths.device)\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_length_expand = lengths.unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n return mask" }, { "identifier": "add_optional_chunk_mask", "path": "modules/wenet_extractor/utils/mask.py", "snippet": "def add_optional_chunk_mask(\n xs: torch.Tensor,\n masks: torch.Tensor,\n use_dynamic_chunk: bool,\n use_dynamic_left_chunk: bool,\n decoding_chunk_size: int,\n static_chunk_size: int,\n num_decoding_left_chunks: int,\n):\n \"\"\"Apply optional mask for encoder.\n\n Args:\n xs (torch.Tensor): padded input, (B, L, D), L for max length\n mask (torch.Tensor): mask for xs, (B, 1, L)\n use_dynamic_chunk (bool): whether to use dynamic chunk or not\n use_dynamic_left_chunk (bool): whether to use dynamic left chunk for\n training.\n decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's\n 0: default for training, use random dynamic chunk.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n static_chunk_size (int): chunk size for static chunk training/decoding\n if it's greater than 0, if use_dynamic_chunk is true,\n this parameter will be ignored\n num_decoding_left_chunks: number of left chunks, this is for decoding,\n the chunk size is decoding_chunk_size.\n >=0: use num_decoding_left_chunks\n <0: use all left chunks\n\n Returns:\n torch.Tensor: chunk mask of the input xs.\n \"\"\"\n # Whether to use chunk mask or not\n if use_dynamic_chunk:\n max_len = xs.size(1)\n if decoding_chunk_size < 0:\n chunk_size = max_len\n num_left_chunks = -1\n elif decoding_chunk_size > 0:\n chunk_size = decoding_chunk_size\n num_left_chunks = num_decoding_left_chunks\n else:\n # chunk size is either [1, 25] or full context(max_len).\n # Since we use 4 times subsampling and allow up to 1s(100 frames)\n # delay, the maximum frame is 100 / 4 = 25.\n chunk_size = torch.randint(1, max_len, (1,)).item()\n num_left_chunks = -1\n if chunk_size > max_len // 2:\n chunk_size = max_len\n else:\n chunk_size = chunk_size % 25 + 1\n if use_dynamic_left_chunk:\n max_left_chunks = (max_len - 1) // chunk_size\n num_left_chunks = torch.randint(0, max_left_chunks, (1,)).item()\n chunk_masks = subsequent_chunk_mask(\n xs.size(1), chunk_size, num_left_chunks, xs.device\n ) # (L, L)\n chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n chunk_masks = masks & chunk_masks # (B, L, L)\n elif static_chunk_size > 0:\n num_left_chunks = num_decoding_left_chunks\n chunk_masks = subsequent_chunk_mask(\n xs.size(1), static_chunk_size, num_left_chunks, xs.device\n ) # (L, L)\n chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n chunk_masks = masks & chunk_masks # (B, L, L)\n else:\n chunk_masks = masks\n return chunk_masks" } ]
from typing import Tuple from modules.wenet_extractor.transformer.attention import MultiHeadedAttention from modules.wenet_extractor.transformer.attention import ( RelPositionMultiHeadedAttention, ) from modules.wenet_extractor.transformer.convolution import ConvolutionModule from modules.wenet_extractor.transformer.embedding import PositionalEncoding from modules.wenet_extractor.transformer.embedding import RelPositionalEncoding from modules.wenet_extractor.transformer.embedding import NoPositionalEncoding from modules.wenet_extractor.transformer.encoder_layer import TransformerEncoderLayer from modules.wenet_extractor.transformer.encoder_layer import ConformerEncoderLayer from modules.wenet_extractor.transformer.positionwise_feed_forward import ( PositionwiseFeedForward, ) from modules.wenet_extractor.transformer.subsampling import Conv2dSubsampling4 from modules.wenet_extractor.transformer.subsampling import Conv2dSubsampling6 from modules.wenet_extractor.transformer.subsampling import Conv2dSubsampling8 from modules.wenet_extractor.transformer.subsampling import LinearNoSubsampling from modules.wenet_extractor.utils.common import get_activation from modules.wenet_extractor.utils.mask import make_pad_mask from modules.wenet_extractor.utils.mask import add_optional_chunk_mask import torch
14,261
"""Encoder definition.""" class BaseEncoder(torch.nn.Module): def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: str = "conv2d", pos_enc_layer_type: str = "abs_pos", normalize_before: bool = True, static_chunk_size: int = 0, use_dynamic_chunk: bool = False, global_cmvn: torch.nn.Module = None, use_dynamic_left_chunk: bool = False, ): """ Args: input_size (int): input dim output_size (int): dimension of attention attention_heads (int): the number of heads of multi head attention linear_units (int): the hidden units number of position-wise feed forward num_blocks (int): the number of decoder blocks dropout_rate (float): dropout rate attention_dropout_rate (float): dropout rate in attention positional_dropout_rate (float): dropout rate after adding positional encoding input_layer (str): input layer type. optional [linear, conv2d, conv2d6, conv2d8] pos_enc_layer_type (str): Encoder positional encoding layer type. opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos] normalize_before (bool): True: use layer_norm before each sub-block of a layer. False: use layer_norm after each sub-block of a layer. static_chunk_size (int): chunk size for static chunk training and decoding use_dynamic_chunk (bool): whether use dynamic chunk size for training or not, You can only use fixed chunk(chunk_size > 0) or dyanmic chunk size(use_dynamic_chunk = True) global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module use_dynamic_left_chunk (bool): whether use dynamic left chunk in dynamic chunk training """ super().__init__() self._output_size = output_size if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "rel_pos": pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "no_pos": pos_enc_class = NoPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) if input_layer == "linear": subsampling_class = LinearNoSubsampling elif input_layer == "conv2d": subsampling_class = Conv2dSubsampling4 elif input_layer == "conv2d6": subsampling_class = Conv2dSubsampling6 elif input_layer == "conv2d8": subsampling_class = Conv2dSubsampling8 else: raise ValueError("unknown input_layer: " + input_layer) self.global_cmvn = global_cmvn self.embed = subsampling_class( input_size, output_size, dropout_rate, pos_enc_class(output_size, positional_dropout_rate), ) self.normalize_before = normalize_before self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5) self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk def output_size(self) -> int: return self._output_size def forward( self, xs: torch.Tensor, xs_lens: torch.Tensor, decoding_chunk_size: int = 0, num_decoding_left_chunks: int = -1, ) -> Tuple[torch.Tensor, torch.Tensor]: """Embed positions in tensor. Args: xs: padded input tensor (B, T, D) xs_lens: input length (B) decoding_chunk_size: decoding chunk size for dynamic chunk 0: default for training, use random dynamic chunk. <0: for decoding, use full chunk. >0: for decoding, use fixed chunk size as set. num_decoding_left_chunks: number of left chunks, this is for decoding, the chunk size is decoding_chunk_size. >=0: use num_decoding_left_chunks <0: use all left chunks Returns: encoder output tensor xs, and subsampled masks xs: padded output tensor (B, T' ~= T/subsample_rate, D) masks: torch.Tensor batch padding mask after subsample (B, 1, T' ~= T/subsample_rate) """ T = xs.size(1)
# This module is from [WeNet](https://github.com/wenet-e2e/wenet). # ## Citations # ```bibtex # @inproceedings{yao2021wenet, # title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit}, # author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin}, # booktitle={Proc. Interspeech}, # year={2021}, # address={Brno, Czech Republic }, # organization={IEEE} # } # @article{zhang2022wenet, # title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit}, # author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei}, # journal={arXiv preprint arXiv:2203.15455}, # year={2022} # } # """Encoder definition.""" class BaseEncoder(torch.nn.Module): def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: str = "conv2d", pos_enc_layer_type: str = "abs_pos", normalize_before: bool = True, static_chunk_size: int = 0, use_dynamic_chunk: bool = False, global_cmvn: torch.nn.Module = None, use_dynamic_left_chunk: bool = False, ): """ Args: input_size (int): input dim output_size (int): dimension of attention attention_heads (int): the number of heads of multi head attention linear_units (int): the hidden units number of position-wise feed forward num_blocks (int): the number of decoder blocks dropout_rate (float): dropout rate attention_dropout_rate (float): dropout rate in attention positional_dropout_rate (float): dropout rate after adding positional encoding input_layer (str): input layer type. optional [linear, conv2d, conv2d6, conv2d8] pos_enc_layer_type (str): Encoder positional encoding layer type. opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos] normalize_before (bool): True: use layer_norm before each sub-block of a layer. False: use layer_norm after each sub-block of a layer. static_chunk_size (int): chunk size for static chunk training and decoding use_dynamic_chunk (bool): whether use dynamic chunk size for training or not, You can only use fixed chunk(chunk_size > 0) or dyanmic chunk size(use_dynamic_chunk = True) global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module use_dynamic_left_chunk (bool): whether use dynamic left chunk in dynamic chunk training """ super().__init__() self._output_size = output_size if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "rel_pos": pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "no_pos": pos_enc_class = NoPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) if input_layer == "linear": subsampling_class = LinearNoSubsampling elif input_layer == "conv2d": subsampling_class = Conv2dSubsampling4 elif input_layer == "conv2d6": subsampling_class = Conv2dSubsampling6 elif input_layer == "conv2d8": subsampling_class = Conv2dSubsampling8 else: raise ValueError("unknown input_layer: " + input_layer) self.global_cmvn = global_cmvn self.embed = subsampling_class( input_size, output_size, dropout_rate, pos_enc_class(output_size, positional_dropout_rate), ) self.normalize_before = normalize_before self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5) self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk def output_size(self) -> int: return self._output_size def forward( self, xs: torch.Tensor, xs_lens: torch.Tensor, decoding_chunk_size: int = 0, num_decoding_left_chunks: int = -1, ) -> Tuple[torch.Tensor, torch.Tensor]: """Embed positions in tensor. Args: xs: padded input tensor (B, T, D) xs_lens: input length (B) decoding_chunk_size: decoding chunk size for dynamic chunk 0: default for training, use random dynamic chunk. <0: for decoding, use full chunk. >0: for decoding, use fixed chunk size as set. num_decoding_left_chunks: number of left chunks, this is for decoding, the chunk size is decoding_chunk_size. >=0: use num_decoding_left_chunks <0: use all left chunks Returns: encoder output tensor xs, and subsampled masks xs: padded output tensor (B, T' ~= T/subsample_rate, D) masks: torch.Tensor batch padding mask after subsample (B, 1, T' ~= T/subsample_rate) """ T = xs.size(1)
masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
14
2023-11-15 09:19:27+00:00
16k
BobaZooba/xllm
tests/unit/run/test_train.py
[ { "identifier": "Config", "path": "src/xllm/core/config.py", "snippet": "class Config:\n \"\"\"\n The `Config` class serves as a comprehensive configuration schema for managing various parameters required during\n the setup and execution of experiments relating to language models, such as training, quantization, and\n optimization.\n\n Write more here:\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#config\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#detailed-config-explanation\n\n This dataclass is used to encapsulate and standardize the configuration for a diverse range of tasks including\n dataset preparation, tokenizer and model initialization, training, evaluation, and interactions with remote services\n like the Hugging Face Model Hub.\n\n Attributes in this class cover aspects like model name and path, tokenizer settings, dataset paths, training\n strategies, quantization methods, hardware acceleration, logging, output directories, and more. The class provides\n properties with custom logic to resolve specific configurations and validation checks to ensure the environment is\n appropriately set up before proceeding with the workflow.\n\n Customization and flexibility are core to this class, as it provides reasonable defaults while also allowing for\n detailed and scalable configurations catered to advanced tasks such as leveraging LoRA, FSDP, deepspeed stage\n setups, and applying incremental quantization techniques like GPTQ and bits-and-bytes.\n\n Methods within the class include:\n - `check`: Performs checks across various attributes for compatibility and correctness.\n - Property getters such as `correct_tokenizer_name_or_path`, `lora_target_modules`, `dtype`, `deepspeed`, `fsdp`,\n and `lora_model_name_or_path_for_fusing` to fetch calculated or defaulted values based on attribute settings.\n\n Subclassing can be done to extend or modify the functionality of the `Config` class to address specific experimental\n scenarios or customized workflows. It is the central piece for orchestrating experimental setups and is intimately\n integrated with the rest of the codebase that operates on top of these configurations.\n\n Attributes:\n\n General Settings:\n - `experiment_key`: An enumeration key to specify the experiment type.\n - `save_safetensors`: A boolean value to indicate whether to use safe serialization for tensors.\n - `max_shard_size`: The maximum shard size when pushing the model to the HuggingFace Hub.\n - `local_rank`: Local rank for distributed training, used for logging and saving.\n - `use_gradient_checkpointing`: If set to `True`, enables gradient checkpointing to reduce memory usage at\n the cost of a slower backward pass.\n - `trainer_key`: An enumeration key to select the trainer using the trainers_registry.\n - `force_fp32`: Forces loading the model in fp32 precision, if set to `True`.\n - `force_fp16`: Forces loading the model in fp16 precision, if set to `True`.\n - `from_gptq`: Indicates if a GPTQ quantized model is being loaded.\n - `huggingface_hub_token`: Token for uploading models to HuggingFace Hub.\n - `deepspeed_stage`: Predefined DeepSpeed stage for optimization.\n - `deepspeed_config_path`: Path to the DeepSpeed config file.\n - `fsdp_strategy`: The strategy to be used for Fully Sharded Data Parallelism (FSDP).\n - `fsdp_offload`: If set to `True`, offloads weights to CPU when using FSDP to save memory.\n - `seed`: Seed for random number generators to ensure reproducibility.\n - `stabilize`: Converts some model weights to fp32 and others to bf16 for stabilization.\n - `path_to_env_file`: Custom path to the .env file for reading environment variables.\n\n Data Preparation:\n - `prepare_dataset`: Flags whether to prepare the dataset during the \"prepare\" stage.\n\n LoRA Fusing:\n - `lora_hub_model_id`: Name of the LoRA model on the hub for fusion.\n - `lora_model_local_path`: Local path to LoRA model to be fused.\n - `fused_model_local_path`: Local path to save the fused model.\n - `fuse_after_training`: If `True`, will fuse the model post-training.\n\n GPTQ Quantization:\n - `quantization_dataset_id`: Dataset ID for GPTQ quantization.\n - `quantization_max_samples`: Maximum number of samples to use during GPTQ quantization.\n - `quantized_model_path`: Path to save the GPTQ quantized model.\n - `quantized_hub_model_id`: Name of the model at the hub post-GPTQ quantization.\n - `quantized_hub_private_repo`: If set to `True`, creates a private repository for the quantized model.\n\n Dataset Related:\n - `dataset_key`: Key to select the dataset from the datasets_registry.\n - `train_local_path_to_data`: Local path to the training data file.\n - `eval_local_path_to_data`: Local path to the evaluation data file.\n - `shuffle`: If `True`, shuffles the training data.\n - `max_eval_samples`: Maximum number of examples to use for evaluation.\n - `add_eval_to_train_if_no_path`: If `True`, adds evaluation data to training if there's no separate eval path.\n\n Tokenizer Settings:\n - `tokenizer_name_or_path`: Name or path to the tokenizer.\n - `tokenizer_use_fast`: If `True`, uses the fast version of the tokenizer.\n - `tokenizer_padding_side`: Sets padding side to 'right' or 'left'.\n\n Data Collator Settings:\n - `collator_key`: Key to select the collator from the collators_registry.\n - `max_length`: Maximum sequence length for the model.\n\n Model Configuration:\n - `model_name_or_path`: Name or path to the model to be used.\n - `push_to_hub_bos_add_bos_token`: Adds BOS token when uploading tokenization configuration to the hub.\n - `use_flash_attention_2`: Flags the use of flash attention 2.\n - `trust_remote_code`: If `True`, trusts remote code from the HuggingFace Hub.\n - `device_map`: Device map for placing model layers on specific devices.\n - `prepare_model_for_kbit_training`: If `True`, prepares the model for k-bit training.\n\n BitsAndBytes Integration:\n - `load_in_8bit`: Load the model in 8-bit mode using bitsandbytes.\n - `load_in_4bit`: Load the model in 4-bit mode using bitsandbytes.\n - `llm_int8_threshold`: Threshold for detecting outliers in the model weights.\n - `llm_int8_has_fp16_weight`: If `True`, the model will have fp16 weights.\n - `bnb_4bit_use_double_quant`: If `True`, a second quantization step is used for 4-bit weights.\n - `bnb_4bit_quant_type`: Specifies the quantization type used for 4-bit weights.\n - `bnb_quantize_after_model_init`: Determines when the quantization should occur.\n\n GPTQ Specific Parameters:\n - `gptq_bits`: Number of bits for GPTQ quantization.\n - `gptq_group_size`: Group size for GPTQ quantization.\n - `gptq_disable_exllama`: If `True`, disables ExLlama kernels during GPTQ quantization.\n\n LoRA Specific Parameters:\n - `apply_lora`: If `True`, applies LoRA to the model.\n - `lora_rank`: LoRA rank to define the size of the LoRA matrices.\n - `lora_alpha`: Multiplication factor for the resulting LoRA matrix.\n - `lora_dropout`: Dropout rate for LoRA.\n - `raw_lora_target_modules`: Comma-separated string of module names to apply LoRA, or 'all' to apply broadly.\n\n Training Arguments:\n - `output_dir`: Path to save training outputs.\n - `per_device_train_batch_size`: Batch size per device for training.\n - `do_eval`: If `True`, performs evaluation.\n - `per_device_eval_batch_size`: Batch size per device for evaluation.\n - `gradient_accumulation_steps`: Number of steps to accumulate gradients for larger effective batch size.\n - `eval_accumulation_steps`: Number of steps to accumulate gradients during evaluation.\n - `eval_delay`: Delay before the first evaluation.\n - `eval_steps`: Number of update steps between evaluations.\n - `warmup_steps`: Number of steps for learning rate warmup.\n - `max_steps`: Maximum number of training steps.\n - `num_train_epochs`: Number of epochs for training.\n - `learning_rate`: Learning rate for the optimizer.\n - `max_grad_norm`: Gradient clipping threshold.\n - `weight_decay`: Coefficient for weight decay regularization.\n - `label_smoothing_factor`: Label smoothing factor.\n - `logging_steps`: Number of steps between logging intermediate results.\n - `save_steps`: Number of training steps between checkpoints and model upload.\n - `save_total_limit`: Maximum number of checkpoints to keep.\n - `optim`: Optimizer name, overwritten by DeepSpeed if used.\n - `push_to_hub`: If `True`, model checkpoints are uploaded to HuggingFace Hub.\n - `hub_model_id`: Name of the model on the HuggingFace Hub.\n - `hub_private_repo`: If `True`, creates a private repository on the HuggingFace Hub.\n\n Weights & Biases Integration:\n - `report_to_wandb`: If `True`, logs metrics to Weights & Biases.\n - `wandb_api_key`: API key for Weights & Biases.\n - `wandb_project`: Project name on Weights & Biases.\n - `wandb_entity`: Entity name (user or organization) on Weights & Biases.\n\n Example of creating a `Config` object:\n ```python\n config = Config(\n model_name_or_path='gpt2',\n dataset_key='my_dataset',\n gradient_accumulation_steps=8,\n max_length=512,\n deepspeed_stage=\"3\",\n )\n ```\n\n Note:\n - Throughout the codebase, `Config` instances are passed around to provide a unified source of configurations\n for various components.\n - It is crucial to ensure all required settings are properly set in a `Config` object before it is utilized,\n particularly when overriding defaults or specifying custom configurations.\n \"\"\"\n\n # general\n experiment_key: str = field(\n default=enums.Experiments.base,\n metadata={\"help\": \"Experiment class key\"},\n )\n save_safetensors: bool = field(\n default=True,\n metadata={\n \"help\": \"Use safe serialization (safe tensors) or not\",\n },\n )\n max_shard_size: str = field(\n default=\"10GB\", metadata={\"help\": \"max_shard_size for the model pushing to the HuggingFace Hub\"}\n )\n local_rank: int = field(\n default=0,\n metadata={\n \"help\": \"Local rank for logging and saving. Works only in distributed training\",\n },\n )\n use_gradient_checkpointing: bool = field(\n default=False,\n metadata={\n \"help\": \"If True, use gradient checkpointing to save memory at the expense of slower backward pass\",\n },\n )\n trainer_key: str = field(\n default=enums.Trainers.lm,\n metadata={\n \"help\": \"Key of the trainer for loading from trainers_registry\",\n },\n )\n force_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp32 when model loading\",\n },\n )\n force_fp16: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp16 when model loading\",\n },\n )\n from_gptq: bool = field(\n default=False,\n metadata={\n \"help\": \"If you loadining GPTQ quantized model\",\n },\n )\n huggingface_hub_token: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"HuggingFace Hub token. You can also set this key using .env file\",\n },\n )\n single_gpu: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Indicates that you are learning on the same GPU. It is necessary to use DeepSpeed on a single GPU\",\n },\n )\n master_port: int = field(\n default=9994,\n metadata={\n \"help\": \"Master port for running DeepSpeed on a single GPU. Modify if RuntimeError: Address already in use\",\n },\n )\n deepspeed_stage: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Predifined DeepSpeed stage\",\n },\n )\n deepspeed_config_path: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Path to DeepSpeed config\",\n },\n )\n fsdp_strategy: str = field(\n default=\"\",\n metadata={\n \"help\": \"FSDP strategy\",\n },\n )\n fsdp_offload: bool = field(\n default=True,\n metadata={\n \"help\": \"Offload weights when using FSDP\",\n },\n )\n seed: int = field(\n default=42,\n metadata={\n \"help\": \"Seed value for random operations\",\n },\n )\n stabilize: bool = field(\n default=False,\n metadata={\n \"help\": \"Stabilize the model. Convert some weights (e.g. LoRA) to bf16\",\n },\n )\n norm_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Convert norm to fp32\",\n },\n )\n path_to_env_file: Optional[str] = field(\n default=\"./.env\",\n metadata={\"help\": \"Custom path to .env file\"},\n )\n\n # prepare\n prepare_dataset: bool = field(\n default=True,\n metadata={\n \"help\": 'Prepare the dataset. Works only at \"prepare\" stage',\n },\n )\n\n # fuse\n lora_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. The name of the LoRA model at the hub for fusing. Example: BobaZooba/Shurale\",\n },\n )\n lora_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. Local path to the LoRA model\",\n },\n )\n fused_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Local path to fused model. Useful if you want to quantize model after fusing on the same machine\",\n },\n )\n fuse_after_training: bool = field(\n default=False,\n metadata={\n \"help\": \"Fuse or not model after training\",\n },\n )\n\n # gptq quantization\n quantization_dataset_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Dataset id for GPTQ quantization. You can install either the idi dataset, or use any dataset\",\n },\n )\n quantization_max_samples: int = field(\n default=1024,\n metadata={\n \"help\": \"Max samples for GPTQ quantization if you use custom dataset\",\n },\n )\n quantized_model_path: str = field(\n default=\"./quantized_model/\",\n metadata={\n \"help\": \"Path to GPTQ quantized model\",\n },\n )\n quantized_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub for GPTQ quantization. Example: BobaZooba/Shurale-GPTQ\",\n },\n )\n quantized_hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository for GPTQ quantization model or not\",\n },\n )\n\n # dataset\n dataset_key: str = field(\n default=enums.Datasets.soda,\n metadata={\n \"help\": \"Key of the dataset for loading from datasets_registry\",\n },\n )\n train_local_path_to_data: str = field(\n default=\"./train.jsonl\",\n metadata={\n \"help\": \"The path to the local training data file\",\n },\n )\n eval_local_path_to_data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The path to the local eval data file\",\n },\n )\n shuffle: bool = field(\n default=True,\n metadata={\n \"help\": \"Shuffle training data\",\n },\n )\n max_eval_samples: int = field(\n default=1_000,\n metadata={\n \"help\": \"Maximum number of examples for evaluation\",\n },\n )\n add_eval_to_train_if_no_path: bool = field(\n default=False,\n metadata={\n \"help\": \"Add evaluation data to training data if their number is greater than max_eval_samples\",\n },\n )\n\n # tokenizer\n tokenizer_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Tokenizer name or path. If the value is not set, \"\n \"then it will be taken from the model_name_or_path\",\n },\n )\n tokenizer_use_fast: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Use fast flag for the tokenizer\",\n },\n )\n tokenizer_padding_side: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Padding side of the collator: None, right or left\",\n },\n )\n\n # collator\n collator_key: str = field(\n default=enums.Collators.lm,\n metadata={\n \"help\": \"Key of the collator for loading from collators_registry\",\n },\n )\n max_length: int = field(\n default=2048,\n metadata={\n \"help\": \"Max sequence length of the model\",\n },\n )\n\n # model\n model_name_or_path: str = field(\n default=\"mistralai/Mistral-7B-v0.1\",\n metadata={\n \"help\": \"Model name or path. It could be from HuggingFace or locally\",\n },\n )\n push_to_hub_bos_add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload to the hub tokenization config with add_bos_token equals to True. Might be helpful for TGI\"\n },\n )\n use_flash_attention_2: bool = field(\n default=False,\n metadata={\n \"help\": \"Use or not flash attention 2. Requires 1) CUDA >= 11.6; 2) install flash-attn 3) compatible model\",\n },\n )\n trust_remote_code: bool = field(\n default=False,\n metadata={\n \"help\": \"Trust remote code from HuggingFace\",\n },\n )\n device_map: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Device map for loading the model\",\n },\n )\n prepare_model_for_kbit_training: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Prepare or not for kbit training\",\n },\n )\n offload_folder: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Offloading folder. Helps for fusing in colab\",\n },\n )\n\n # bitsandbytes\n load_in_8bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 8 bit using bitsandbytes\",\n },\n )\n load_in_4bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 4 bit using bitsandbytes\",\n },\n )\n llm_int8_threshold: float = field(\n default=6.0,\n metadata={\n \"help\": \"Threshold for outlier detection\",\n },\n )\n llm_int8_has_fp16_weight: bool = field(\n default=True,\n metadata={\n \"help\": \"LLM has weights in fp16\",\n },\n )\n bnb_4bit_use_double_quant: bool = field(\n default=True,\n metadata={\n \"help\": \"Double quantization. \"\n \"This will enable a second quantization after the first \"\n \"one to save an additional 0.4 bits per parameter\",\n },\n )\n bnb_4bit_quant_type: str = field(\n default=\"nf4\",\n metadata={\n \"help\": \"Quantization type for 4 bit\",\n },\n )\n bnb_quantize_after_model_init: bool = field(\n default=False, metadata={\"help\": \"If False, quantization will be at model init\"}\n )\n\n # gptq\n gptq_bits: int = field(\n default=4,\n metadata={\n \"help\": \"Bits for GPTQ quantization\",\n },\n )\n gptq_group_size: int = field(\n default=128,\n metadata={\n \"help\": \"Group size for GPTQ quantization\",\n },\n )\n gptq_disable_exllama: bool = field(\n default=True,\n metadata={\n \"help\": \"Disable ExLlama kernels for GPTQ quantization\",\n },\n )\n\n # lora\n apply_lora: bool = field(\n default=False,\n metadata={\n \"help\": \"Apply LoRA to the model or not\",\n },\n )\n lora_rank: int = field(\n default=8,\n metadata={\n \"help\": \"LoRA rank value. LoRA matrices W_A x R and R x W_B, where R is LoRA rank\",\n },\n )\n lora_alpha: int = field(\n default=32,\n metadata={\n \"help\": \"LoRA alpha value. The resulting LoRA matrix will be multiplied by this value\",\n },\n )\n lora_dropout: float = field(\n default=0.1,\n metadata={\n \"help\": \"LoRA dropout value\",\n },\n )\n raw_lora_target_modules: str = field(\n default=\"all\",\n metadata={\n \"help\": 'Names of modules to apply LoRA. A comma-separated string, for example: \"k,q,v\". '\n 'When setting the value \"all\", LoRA will be applied to all linear layers, except for the '\n \"input embeddings and the lm_head.\",\n },\n )\n\n # training arguments\n output_dir: str = field(\n default=\"./outputs/\",\n metadata={\n \"help\": \"The path to the directory where the artifacts will be saved\",\n },\n )\n per_device_train_batch_size: int = field(\n default=2,\n metadata={\n \"help\": \"Batch size on each GPU\",\n },\n )\n do_eval: bool = field(\n default=False,\n metadata={\n \"help\": \"Run eval or not\",\n },\n )\n per_device_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Batch size on each GPU for evaluation. \"\n \"If None per_device_eval_batch_size equals to per_device_train_batch_size\",\n },\n )\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\n \"help\": \"Number of steps to accumulate gradients\",\n },\n )\n eval_accumulation_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of steps to accumulate gradients at evaluation.\"\n \"If None eval_accumulation_steps equals to gradient_accumulation_steps\",\n },\n )\n eval_delay: int = field(\n default=0,\n metadata={\n \"help\": \"Number of epochs or steps to wait for before the first \"\n \"evaluation can be performed, depending on the evaluation_strategy\"\n },\n )\n eval_steps: Optional[int] = field(\n default=1_000, metadata={\"help\": \"Number of update steps between two evaluations\"}\n )\n warmup_steps: int = field(\n default=1_000,\n metadata={\n \"help\": \"Number of steps to warm up\",\n },\n )\n max_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Maximum number of training steps\",\n },\n )\n num_train_epochs: int = field(\n default=1,\n metadata={\n \"help\": \"Number of training epochs\",\n },\n )\n learning_rate: float = field(\n default=2e-4,\n metadata={\n \"help\": \"Learning rate value\",\n },\n )\n max_grad_norm: float = field(\n default=1.0,\n metadata={\n \"help\": \"Clip grad value\",\n },\n )\n weight_decay: float = field(\n default=0.001,\n metadata={\n \"help\": \"Weight decay value\",\n },\n )\n label_smoothing_factor: float = field(\n default=0.0,\n metadata={\n \"help\": \"Label smoothing value\",\n },\n )\n logging_steps: int = field(\n default=10,\n metadata={\n \"help\": \"Number of steps between logging\",\n },\n )\n save_steps: int = field(\n default=100,\n metadata={\n \"help\": \"The number of training steps between saving the checkpoint and uploading to the hub\",\n },\n )\n save_total_limit: int = field(\n default=1,\n metadata={\n \"help\": \"The number of checkpoints that are saved locally\",\n },\n )\n optim: Optional[str] = field(\n default=\"paged_adamw_8bit\",\n metadata={\n \"help\": \"Optimizer name. It will be overwritten if you use deepspeed\",\n },\n )\n push_to_hub: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload the model to the hub. \"\n \"The model will be uploaded to the hub every save_steps. \"\n \"If LoRA is used, then LoRA's weights will be loaded onto the hub\",\n },\n )\n hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub. Example: BobaZooba/Shurale\",\n },\n )\n hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository or not\",\n },\n )\n neftune_noise_alpha: Optional[float] = field(\n default=None,\n metadata={\n \"help\": \"If not None, this will activate NEFTune noise embeddings. \"\n \"This can drastically improve model performance for instruction fine-tuning\",\n },\n )\n\n # training traction\n project_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Project name for training traction services like W&B\",\n },\n )\n report_to_wandb: bool = field(\n default=False,\n metadata={\n \"help\": \"Report or not to Weight & Biases\",\n },\n )\n wandb_api_key: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases API key. You can also set this key using .env file\",\n },\n )\n wandb_project: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Depreacted, use project_name. Weight & Biases project name\",\n },\n )\n wandb_entity: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases entity name (user or company)\",\n },\n )\n\n def __post_init__(self):\n if self.huggingface_hub_token is not None:\n os.environ[enums.EnvironmentVariables.huggingface_hub_token] = self.huggingface_hub_token\n dist_logger(message=f\"Environment variable {enums.EnvironmentVariables.huggingface_hub_token} set\")\n\n if self.report_to_wandb:\n for key, value in zip(\n [\n enums.EnvironmentVariables.wandb_api_key,\n enums.EnvironmentVariables.wandb_project,\n enums.EnvironmentVariables.wandb_entity,\n ],\n [\n self.wandb_api_key,\n self.correct_project_name,\n self.wandb_entity,\n ],\n ):\n if value is not None:\n os.environ[key] = value\n dist_logger(message=f\"Environment variable {key} set\")\n else:\n os.environ[enums.EnvironmentVariables.wandb_disabled] = \"true\"\n\n @property\n def correct_project_name(self) -> Optional[str]:\n if self.project_name is not None and self.wandb_project is not None:\n dist_logger.warning(\n message=\"You set both project_name and wandb_project.\"\n \"Priority set to project_name for experiment tracking\"\n )\n return self.project_name\n elif self.project_name is not None:\n return self.project_name\n elif self.wandb_project is not None:\n dist_logger.warning(message=\"wandb_project is depreacted, please use project_name instead\")\n return self.wandb_project\n else:\n return None\n\n def check_hub(self) -> None:\n if self.push_to_hub and self.hub_model_id is None:\n raise ValueError(\"You want to push to HF hub, but hub_model_id is None\")\n elif self.hub_model_id is not None and not self.push_to_hub:\n dist_logger.warning(\"You set hub_model_id, but push_to_hub is False\")\n\n return None\n\n def apply_deepspeed_single_gpu(self) -> None:\n os.environ[enums.EnvironmentVariables.master_address] = \"localhost\"\n os.environ[enums.EnvironmentVariables.master_port] = str(self.master_port)\n os.environ[enums.EnvironmentVariables.rank] = \"0\"\n os.environ[enums.EnvironmentVariables.local_rank] = \"0\"\n os.environ[enums.EnvironmentVariables.world_size] = \"1\"\n\n def check_deepspeed(self) -> None:\n if self.deepspeed is not None:\n spec = find_spec(\"deepspeed\")\n\n if spec is None:\n raise ImportError(\"Deepspeed is not None, but failed to import deepspeed. Please install deepspeed.\")\n\n if self.single_gpu:\n self.apply_deepspeed_single_gpu()\n\n return None\n\n def check_flash_attention(self) -> None:\n if self.use_flash_attention_2:\n if not torch.cuda.is_available():\n raise ImportError(\"You want to use flash_attention_2, but CUDA is not available\")\n\n spec = find_spec(\"flash_attn\")\n\n if spec is None:\n raise ImportError(\n \"You want to use flash_attention_2, but flash-attn is not installed. Please install flash-attn.\"\n )\n\n return None\n\n def check_auto_gptq(self) -> None:\n spec = find_spec(\"auto_gptq\")\n\n if spec is None:\n raise ImportError(\n \"You want to quantize model using GPTQ, but auto-gptq is not installed. Please install auto-gptq.\"\n )\n\n return None\n\n def check(self) -> None:\n \"\"\"\n Performs a series of checks to validate the configuration for compatibility with the training environment.\n\n This method is responsible for ensuring that the environment is properly set up for the actions specified in\n the configuration object, such as pushing to Hugging Face's hub, using deepspeed, and using flash attention.\n\n It includes the following checks:\n - Verifies that credentials for Hugging Face hub are provided if the model is intended to be pushed to the hub.\n - Validates that deepspeed is installed if it is specified in the configuration.\n - Ensures that the necessary packages are installed for using flash attention if configured to do so.\n\n Does not return any value.\n\n Raises:\n - ValueError: If the configuration for hub interaction is incorrect.\n - ImportError: If any of the required libraries (e.g., deepspeed, flash-attn, auto-gptq) are not installed.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(...)\n # Before proceeding with training or other operations, run checks to ensure environment compatibility.\n config.check()\n ```\n\n Note:\n - Always invoke this method after initializing a `Config` object and before proceeding with model training\n or other operations that rely on the configuration settings.\n \"\"\"\n self.check_hub()\n self.check_deepspeed()\n self.check_flash_attention()\n\n return None\n\n @property\n def correct_tokenizer_name_or_path(self) -> str:\n \"\"\"\n Resolves the tokenizer name or path to be used for initializing the tokenizer.\n\n This property ensures that if a specific tokenizer name or path is not provided in the configuration object,\n the model name or path is used instead, maintaining consistency between model and tokenizer.\n\n Returns:\n `str`: The name or path of the tokenizer to be used. If `tokenizer_name_or_path` is specified in `Config`\n object, that value is used. Otherwise, `model_name_or_path` is returned as the default tokenizer identifier.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(model_name_or_path=\"gpt2\", tokenizer_name_or_path=None)\n tokenizer_name_or_path = config.correct_tokenizer_name_or_path\n # tokenizer_name_or_path now holds the value \"gpt2\"\n ```\n\n Note:\n - It is a common practice to use the same identifier for both the model and its corresponding tokenizer.\n This property handles such a case automatically when the `tokenizer_name_or_path` is not explicitly set.\n \"\"\"\n if self.tokenizer_name_or_path is not None:\n return self.tokenizer_name_or_path\n else:\n return self.model_name_or_path\n\n @property\n def lora_target_modules(self) -> Optional[List[str]]:\n \"\"\"\n Interprets the LoRA target modules setting from the configuration to determine which model modules to apply\n LoRA to.\n\n LoRA (Low-Rank Adaptation) is a parameter-efficient training method that modifies specific layers within a\n model. This property is responsible for parsing the `raw_lora_target_modules` configuration to identify\n the specific modules (like attention key, query, and value matrices) that LoRA will be applied to.\n\n Returns:\n Optional[List[str]]: A list of module names to apply LoRA to if specified, otherwise `None` if LoRA should\n be applied to all eligible modules as determined by the string \"all\" in `raw_lora_target_modules`.\n\n Raises:\n ValueError: If `raw_lora_target_modules` is not set.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with LoRA targets specified.\n config = Config(raw_lora_target_modules=\"k,q,v\")\n lora_modules = config.lora_target_modules\n # lora_modules now holds the list ['k', 'q', 'v'].\n ```\n\n Note:\n - The `raw_lora_target_modules` should be provided as a comma-separated string specifying the target\n modules. If LoRA should be applied broadly, the value \"all\" can be used.\n \"\"\"\n if self.raw_lora_target_modules == \"all\":\n return None\n elif self.raw_lora_target_modules is not None:\n modules_names = [module_name.strip() for module_name in self.raw_lora_target_modules.split(\",\")]\n return modules_names\n else:\n raise ValueError(\"raw_lora_target_modules doesn't set\")\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Determines the appropriate PyTorch data type for the model based on availability of CUDA and configuration\n settings.\n\n This property assists in setting computational precision for training and inference (e.g., FP32, FP16, BF16),\n basing the decision on system capabilities and user preferences as defined in the `Config` object. The selected\n data type can impact both the computational efficiency and memory usage of the model operations.\n\n Returns:\n `torch.dtype`: The data type to be used for the model tensors. This can be one of the following based on the\n system's CUDA support and configuration flags: `torch.float32` (FP32), `torch.float16` (FP16), or\n `torch.bfloat16` (BF16).\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(force_fp32=False, force_fp16=True)\n model_dtype = config.dtype\n # If CUDA is available and BF16 is supported, model_dtype will be `torch.bfloat16`.\n # Otherwise, it falls back to `torch.float16` due to the forced FP16 configuration.\n ```\n\n Note:\n - This property plays a critical role in memory management and computational efficiency, especially when\n working with large models or limited system resources.\n \"\"\"\n if not torch.cuda.is_available() or self.force_fp32:\n return torch.float32\n elif self.force_fp16:\n return torch.float16\n elif torch.cuda.is_bf16_supported():\n return torch.bfloat16\n else:\n return torch.float16\n\n @property\n def deepspeed(self) -> Optional[Dict[str, Any]]:\n \"\"\"\n Retrieves the deepspeed configuration dictionary based on settings within the `Config` object.\n\n This property parses the deepspeed settings from the configuration to construct the configuration dictionary\n used for ing up deepspeed in the model's training environment. It determines whether a predefined stage\n or a custom configuration file path should be utilized.\n\n Returns:\n `Optional[Dict[str, Any]]`: A dictionary containing deepspeed configurations, or `None` if deepspeed is not\n to be used.\n\n Raises:\n ValueError: If the `deepspeed_stage` specified does not correspond to a known configuration,\n or if a custom deepspeed configuration file path does not exist.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with deepspeed specifications.\n config = Config(deepspeed_stage=\"2\")\n ds_config = config.deepspeed\n # ds_config now contains the deepspeed configuration for stage 2.\n ```\n\n Note:\n - A deepspeed stage is a set of predefined configurations. If this is set, the corresponding configuration\n will be used and any custom deepspeed configuration file will be ignored.\n - If a custom deepspeed configuration file path is given and it exists, that configuration will be loaded\n and used.\n \"\"\"\n deepspeed_config: Optional[Dict[str, Any]] = None\n\n if self.deepspeed_config_path is not None:\n if os.path.isfile(self.deepspeed_config_path):\n with open(self.deepspeed_config_path) as file_object:\n deepspeed_config = json.load(file_object)\n return deepspeed_config\n else:\n raise ValueError(f\"deepspeed_config_path set to {self.deepspeed_config_path}, but not found\")\n\n if self.deepspeed_stage in [0, \"0\", \"stage_0\"]:\n return None\n\n if self.deepspeed_stage is not None:\n deepspeed_config = DS_CONFIG_MAPPER.get(self.deepspeed_stage, None)\n if deepspeed_config is None:\n raise ValueError(\n f'Deepspeed stage \"{self.deepspeed_stage}\" not found in keys: {list(DS_CONFIG_MAPPER.keys())}'\n )\n\n return deepspeed_config\n\n @property\n def fsdp(self) -> Union[str, List[str]]:\n \"\"\"\n Compiles the configurations for Fully Sharded Data Parallel (FSDP) based on the settings in the `Config` object.\n\n This property creates a list containing FSDP-related options, which informs the training process whether to\n enable FSDP and which FSDP strategy to employ.\n\n A list of options (fsdp_strategy) along the following:\n \"full_shard\": Shard parameters, gradients and optimizer states.\n \"shard_grad_op\": Shard optimizer states and gradients.\n \"offload\": Offload parameters and gradients to CPUs (only compatible with \"full_shard\" and \"shard_grad_op\").\n \"auto_wrap\": Automatically recursively wrap layers with FSDP using default_auto_wrap_policy.\n\n Returns:\n `Union[str, List[str]]`: A list of FSDP options as strings. It can be an empty string if FSDP is not used or\n a list with the specified FSDP strategy and options such as offloading.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with FSDP specifications.\n config = Config(fsdp_strategy=\"full_shard\", fsdp_offload=True)\n fsdp_options = config.fsdp\n ```\n\n Note:\n - FSDP strategies and options improve memory efficiency during distributed training by sharding the model's\n parameters across multiple devices.\n - The FSDP settings in the configuration should match the target training environment and system\n capabilities.\n \"\"\"\n fsdp_options = list()\n\n if self.fsdp_strategy is not None and self.fsdp_strategy != \"\":\n fsdp_options.append(self.fsdp_strategy)\n else:\n return \"\"\n\n if self.fsdp_offload:\n fsdp_options.append(FSDPOption.OFFLOAD)\n\n return fsdp_options\n\n @property\n def lora_model_name_or_path_for_fusing(self) -> str:\n \"\"\"\n Determines the name or path of the LoRA model to be used for the fusing process.\n\n This property resolves which model should be fused by checking whether a model ID from the Hugging Face hub or a\n local path to a LoRA model is provided in the configuration object. It is essential for the fusing operation\n when LoRA weights need to be integrated into the base model.\n\n Returns:\n `str`: The Hugging Face hub model ID or the local file path to the LoRA model, depending on which is\n specified.\n\n Raises:\n ValueError: If neither `lora_hub_model_id` nor `lora_model_local_path` is set, indicating that there is no\n model specified for fusing.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with a specified LoRA model on Hugging Face Hub or locally.\n config = Config(lora_hub_model_id=\"username/model-id\", lora_model_local_path=None)\n model_name_or_path = config.lora_model_name_or_path_for_fusing\n # model_name_or_path will hold the value \"username/model-id\".\n ```\n\n Note:\n - This property is specifically used during the model fusing step and should be configured correctly in\n scenarios where LoRA is utilized.\n \"\"\"\n if self.lora_hub_model_id is not None:\n return self.lora_hub_model_id\n elif self.lora_model_local_path is not None:\n return self.lora_model_local_path\n else:\n raise ValueError(\"Please set lora_hub_model_id or lora_model_local_path for fusing\")\n\n @property\n def need_to_prepare_model_for_kbit_training(self) -> bool:\n if self.prepare_model_for_kbit_training is not None:\n return self.prepare_model_for_kbit_training\n else:\n return self.from_gptq or self.load_in_4bit or self.load_in_8bit" }, { "identifier": "train", "path": "src/xllm/run/train.py", "snippet": "def train(\n config: Config,\n train_dataset: Optional[GeneralDataset] = None,\n eval_dataset: Optional[GeneralDataset] = None,\n) -> Experiment:\n \"\"\"\n Initiates the training process for an experiment based on the provided configuration and optional datasets.\n\n Utilizing the configuration, the method selects the appropriate experiment class from the `experiments_registry`\n and orchestrates the construction, setup and execution of the experiment's training routine.\n\n Args:\n config (`Config`):\n The configuration object that contains settings and parameters driving the training process.\n train_dataset (`Optional[GeneralDataset]`, defaults to `None`):\n An optional dataset to be used for training. If provided, it is used instead of building a new dataset.\n eval_dataset (`Optional[GeneralDataset]`, defaults to `None`):\n An optional dataset for evaluation. If provided, it is used for evaluating the model performance\n during training.\n\n Returns:\n Experiment:\n An instance of the `Experiment` class representing the completed training process, including the trained\n model, training history, and any relevant outputs or metrics.\n\n The `train` function follows this sequence of steps:\n - Retrieves the appropriate experiment class using the key specified in the `config.experiment_key`. Raises a\n ValueError if no matching class is found in the `experiments_registry`.\n - Instantiates the experiment class, passing the configuration and additional keyword arguments for provided\n datasets.\n - Builds the experiment, setting up the necessary environment, model, tokenizer, and datasets.\n - Executes the run method of the experiment, which encompasses the actual training and evaluation routine.\n\n Raises:\n ValueError:\n If no experiment class corresponding to `config.experiment_key` is found in the `experiments_registry`.\n\n Example usage:\n ```python\n from some_module.config import Config\n\n # Assuming we have a predefined Config object set up for an experiment.\n config = Config(...)\n experiment = train(config=config)\n\n # After training, `experiment` holds the trained model and results,\n # which can then be used for further analysis or deployment.\n ```\n\n Note:\n - If the train or evaluation datasets are not provided, the function expects the experiment's `build` method\n to handle their construction based on the provided configuration.\n - This function abstracts away the specifics of the training routine to a higher level, allowing users to work\n with experiments through a uniform interface.\n \"\"\"\n experiment_cls = experiments_registry.get(config.experiment_key)\n\n if experiment_cls is None:\n raise ValueError(f\"Experiment class {config.experiment_key} not found\")\n\n additional_kwargs = {}\n\n if train_dataset is not None:\n additional_kwargs[\"train_dataset\"] = train_dataset\n\n if eval_dataset is not None:\n additional_kwargs[\"train_dataset\"] = eval_dataset\n\n experiment: Experiment = experiment_cls(config=config, **additional_kwargs)\n\n experiment.build()\n\n experiment.run()\n\n return experiment" }, { "identifier": "LLAMA_TOKENIZER_DIR", "path": "tests/helpers/constants.py", "snippet": "LLAMA_TOKENIZER_DIR: str = os.path.join(TOKENIZERS_DIR, \"llama/\")" }, { "identifier": "patch_from_pretrained_auto_causal_lm", "path": "tests/helpers/patches.py", "snippet": "@contextmanager\ndef patch_from_pretrained_auto_causal_lm(monkeypatch: MonkeyPatch) -> Any:\n def from_pretrained(\n pretrained_model_name_or_path: str,\n quantization_config: Union[BitsAndBytesConfig, GPTQConfig, None] = None,\n torch_dtype: dtype = torch.float16,\n trust_remote_code: bool = True,\n device_map: Union[str, Dict[str, Any], None] = None,\n use_cache: bool = False,\n use_flash_attention_2: bool = True,\n ) -> LlamaForCausalLM:\n config = LlamaConfig(\n vocab_size=32_000,\n hidden_size=8,\n intermediate_size=32,\n num_hidden_layers=2,\n num_attention_heads=2,\n max_position_embeddings=32,\n )\n model = LlamaForCausalLM(config=config)\n return model\n\n monkeypatch.setattr(AutoModelForCausalLM, \"from_pretrained\", from_pretrained)\n yield True\n monkeypatch.undo()" }, { "identifier": "patch_trainer_train", "path": "tests/helpers/patches.py", "snippet": "@contextmanager\ndef patch_trainer_train(monkeypatch: MonkeyPatch) -> Any:\n def train(*args, **kwargs):\n return None\n\n monkeypatch.setattr(LMTrainer, \"train\", train)\n yield True\n monkeypatch.undo()" } ]
from pytest import MonkeyPatch from src.xllm.core.config import Config from src.xllm.run.train import train from tests.helpers.constants import LLAMA_TOKENIZER_DIR from tests.helpers.patches import patch_from_pretrained_auto_causal_lm, patch_trainer_train
11,899
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_train(monkeypatch: MonkeyPatch, path_to_train_prepared_dummy_data: str, path_to_outputs: str): config = Config( push_to_hub=False, deepspeed_stage="0", train_local_path_to_data=path_to_train_prepared_dummy_data, report_to_wandb=False, save_total_limit=0, max_steps=2, tokenizer_name_or_path=LLAMA_TOKENIZER_DIR, output_dir=path_to_outputs, )
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_train(monkeypatch: MonkeyPatch, path_to_train_prepared_dummy_data: str, path_to_outputs: str): config = Config( push_to_hub=False, deepspeed_stage="0", train_local_path_to_data=path_to_train_prepared_dummy_data, report_to_wandb=False, save_total_limit=0, max_steps=2, tokenizer_name_or_path=LLAMA_TOKENIZER_DIR, output_dir=path_to_outputs, )
with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch):
3
2023-11-10 17:55:03+00:00
16k
AMAAI-Lab/mustango
diffusers/src/diffusers/schedulers/scheduling_ddpm.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all\n methods for loading/downloading/saving classes inheriting from [`ConfigMixin`] with\n - [`~ConfigMixin.from_config`]\n - [`~ConfigMixin.save_config`]\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the init function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class will be instantiated. Make sure to only load\n configuration files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the Python class.\n `**kwargs` will be directly passed to the underlying scheduler/model's `__init__` method and eventually\n overwrite same named arguments of `config`.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing model weights saved using [`~ConfigMixin.save_config`], e.g.,\n `./my_model_directory/`.\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config shall be returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the commit_hash of the loaded configuration shall be returned.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "register_to_config", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)" }, { "identifier": "BaseOutput", "path": "diffusers/src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a `BaseOutput` directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n before.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" }, { "identifier": "randn_tensor", "path": "diffusers/src/diffusers/utils/torch_utils.py", "snippet": "def randn_tensor(\n shape: Union[Tuple, List],\n generator: Optional[Union[List[\"torch.Generator\"], \"torch.Generator\"]] = None,\n device: Optional[\"torch.device\"] = None,\n dtype: Optional[\"torch.dtype\"] = None,\n layout: Optional[\"torch.layout\"] = None,\n):\n \"\"\"This is a helper function that allows to create random tensors on the desired `device` with the desired `dtype`. When\n passing a list of generators one can seed each batched size individually. If CPU generators are passed the tensor\n will always be created on CPU.\n \"\"\"\n # device on which tensor is created defaults to device\n rand_device = device\n batch_size = shape[0]\n\n layout = layout or torch.strided\n device = device or torch.device(\"cpu\")\n\n if generator is not None:\n gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type\n if gen_device_type != device.type and gen_device_type == \"cpu\":\n rand_device = \"cpu\"\n if device != \"mps\":\n logger.info(\n f\"The passed generator was created on 'cpu' even though a tensor on {device} was expected.\"\n f\" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably\"\n f\" slighly speed up this function by passing a generator that was created on the {device} device.\"\n )\n elif gen_device_type != device.type and gen_device_type == \"cuda\":\n raise ValueError(f\"Cannot generate a {device} tensor from a generator of type {gen_device_type}.\")\n\n if isinstance(generator, list):\n shape = (1,) + shape[1:]\n latents = [\n torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)\n for i in range(batch_size)\n ]\n latents = torch.cat(latents, dim=0).to(device)\n else:\n latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)\n\n return latents" }, { "identifier": "KarrasDiffusionSchedulers", "path": "diffusers/src/diffusers/schedulers/scheduling_utils.py", "snippet": "class KarrasDiffusionSchedulers(Enum):\n DDIMScheduler = 1\n DDPMScheduler = 2\n PNDMScheduler = 3\n LMSDiscreteScheduler = 4\n EulerDiscreteScheduler = 5\n HeunDiscreteScheduler = 6\n EulerAncestralDiscreteScheduler = 7\n DPMSolverMultistepScheduler = 8\n DPMSolverSinglestepScheduler = 9\n KDPM2DiscreteScheduler = 10\n KDPM2AncestralDiscreteScheduler = 11\n DEISMultistepScheduler = 12\n UniPCMultistepScheduler = 13" }, { "identifier": "SchedulerMixin", "path": "diffusers/src/diffusers/schedulers/scheduling_utils.py", "snippet": "class SchedulerMixin:\n \"\"\"\n Mixin containing common functions for the schedulers.\n\n Class attributes:\n - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that\n `from_config` can be used from a class different than the one used to save the config (should be overridden\n by parent class).\n \"\"\"\n\n config_name = SCHEDULER_CONFIG_NAME\n _compatibles = []\n has_compatibles = True\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Dict[str, Any] = None,\n subfolder: Optional[str] = None,\n return_unused_kwargs=False,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a Scheduler class from a pre-defined JSON configuration file inside a directory or Hub repo.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing the schedluer configurations saved using\n [`~SchedulerMixin.save_pretrained`], e.g., `./my_model_directory/`.\n subfolder (`str`, *optional*):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n\n \"\"\"\n config, kwargs, commit_hash = cls.load_config(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n subfolder=subfolder,\n return_unused_kwargs=True,\n return_commit_hash=True,\n **kwargs,\n )\n return cls.from_config(config, return_unused_kwargs=return_unused_kwargs, **kwargs)\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~SchedulerMixin.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n \"\"\"\n self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)\n\n @property\n def compatibles(self):\n \"\"\"\n Returns all schedulers that are compatible with this scheduler\n\n Returns:\n `List[SchedulerMixin]`: List of compatible schedulers\n \"\"\"\n return self._get_compatibles()\n\n @classmethod\n def _get_compatibles(cls):\n compatible_classes_str = list(set([cls.__name__] + cls._compatibles))\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n compatible_classes = [\n getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)\n ]\n return compatible_classes" } ]
import math import numpy as np import torch from dataclasses import dataclass from typing import List, Optional, Tuple, Union from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
11,615
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t if variance_type is None: variance_type = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": variance = torch.clamp(variance, min=1e-20) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": variance = torch.log(torch.clamp(variance, min=1e-20)) variance = torch.exp(0.5 * variance) elif variance_type == "fixed_large": variance = current_beta_t elif variance_type == "fixed_large_log": # Glide max_log variance = torch.log(current_beta_t) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": min_log = torch.log(variance) max_log = torch.log(self.betas[t]) frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log return variance def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: # Dynamic thresholding in https://arxiv.org/abs/2205.11487 dynamic_max_val = ( sample.flatten(1) .abs() .quantile(self.config.dynamic_thresholding_ratio, dim=1) .clamp_min(self.config.sample_max_value) .view(-1, *([1] * (sample.ndim - 1))) ) return sample.clamp(-dynamic_max_val, dynamic_max_val) / dynamic_max_val def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, generator=None, return_dict: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class Returns: [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ t = timestep num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps prev_t = timestep - self.config.num_train_timesteps // num_inference_steps if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev current_alpha_t = alpha_prod_t / alpha_prod_t_prev current_beta_t = 1 - current_alpha_t # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif self.config.prediction_type == "sample": pred_original_sample = model_output elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" " `v_prediction` for the DDPMScheduler." ) # 3. Clip or threshold "predicted x_0" if self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise variance = 0 if t > 0: device = model_output.device
# Copyright 2023 UC Berkeley Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim @dataclass class DDPMSchedulerOutput(BaseOutput): """ Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. """ prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up to that part of the diffusion process. Args: num_diffusion_timesteps (`int`): the number of betas to produce. max_beta (`float`): the maximum beta to use; use values lower than 1 to prevent singularities. Returns: betas (`np.ndarray`): the betas used by the scheduler to step the model outputs """ def alpha_bar(time_step): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return torch.tensor(betas, dtype=torch.float32) class DDPMScheduler(SchedulerMixin, ConfigMixin): """ Denoising diffusion probabilistic models (DDPMs) explores the connections between denoising score matching and Langevin dynamics sampling. [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and [`~SchedulerMixin.from_pretrained`] functions. For more details, see the original paper: https://arxiv.org/abs/2006.11239 Args: num_train_timesteps (`int`): number of diffusion steps used to train the model. beta_start (`float`): the starting `beta` value of inference. beta_end (`float`): the final `beta` value. beta_schedule (`str`): the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from `linear`, `scaled_linear`, or `squaredcos_cap_v2`. trained_betas (`np.ndarray`, optional): option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. variance_type (`str`): options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. clip_sample (`bool`, default `True`): option to clip predicted sample for numerical stability. clip_sample_range (`float`, default `1.0`): the maximum magnitude for sample clipping. Valid only when `clip_sample=True`. prediction_type (`str`, default `epsilon`, optional): prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 https://imagen.research.google/video/paper.pdf) thresholding (`bool`, default `False`): whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487). Note that the thresholding method is unsuitable for latent-space diffusion models (such as stable-diffusion). dynamic_thresholding_ratio (`float`, default `0.995`): the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`. sample_max_value (`float`, default `1.0`): the threshold value for dynamic thresholding. Valid only when `thresholding=True`. """ _compatibles = [e.name for e in KarrasDiffusionSchedulers] order = 1 @register_to_config def __init__( self, num_train_timesteps: int = 1000, beta_start: float = 0.0001, beta_end: float = 0.02, beta_schedule: str = "linear", trained_betas: Optional[Union[np.ndarray, List[float]]] = None, variance_type: str = "fixed_small", clip_sample: bool = True, prediction_type: str = "epsilon", thresholding: bool = False, dynamic_thresholding_ratio: float = 0.995, clip_sample_range: float = 1.0, sample_max_value: float = 1.0, ): if trained_betas is not None: self.betas = torch.tensor(trained_betas, dtype=torch.float32) elif beta_schedule == "linear": self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. self.betas = ( torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule self.betas = betas_for_alpha_bar(num_train_timesteps) elif beta_schedule == "sigmoid": # GeoDiff sigmoid schedule betas = torch.linspace(-6, 6, num_train_timesteps) self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start else: raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") self.alphas = 1.0 - self.betas self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) self.one = torch.tensor(1.0) # standard deviation of the initial noise distribution self.init_noise_sigma = 1.0 # setable values self.num_inference_steps = None self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) self.variance_type = variance_type def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: """ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the current timestep. Args: sample (`torch.FloatTensor`): input sample timestep (`int`, optional): current timestep Returns: `torch.FloatTensor`: scaled input sample """ return sample def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): """ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. Args: num_inference_steps (`int`): the number of diffusion steps used when generating samples with a pre-trained model. """ if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" f" maximal {self.config.num_train_timesteps} timesteps." ) self.num_inference_steps = num_inference_steps step_ratio = self.config.num_train_timesteps // self.num_inference_steps timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64) self.timesteps = torch.from_numpy(timesteps).to(device) def _get_variance(self, t, predicted_variance=None, variance_type=None): num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps prev_t = t - self.config.num_train_timesteps // num_inference_steps alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one current_beta_t = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * current_beta_t if variance_type is None: variance_type = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": variance = torch.clamp(variance, min=1e-20) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": variance = torch.log(torch.clamp(variance, min=1e-20)) variance = torch.exp(0.5 * variance) elif variance_type == "fixed_large": variance = current_beta_t elif variance_type == "fixed_large_log": # Glide max_log variance = torch.log(current_beta_t) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": min_log = torch.log(variance) max_log = torch.log(self.betas[t]) frac = (predicted_variance + 1) / 2 variance = frac * max_log + (1 - frac) * min_log return variance def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: # Dynamic thresholding in https://arxiv.org/abs/2205.11487 dynamic_max_val = ( sample.flatten(1) .abs() .quantile(self.config.dynamic_thresholding_ratio, dim=1) .clamp_min(self.config.sample_max_value) .view(-1, *([1] * (sample.ndim - 1))) ) return sample.clamp(-dynamic_max_val, dynamic_max_val) / dynamic_max_val def step( self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, generator=None, return_dict: bool = True, ) -> Union[DDPMSchedulerOutput, Tuple]: """ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion process from the learned model outputs (most often the predicted noise). Args: model_output (`torch.FloatTensor`): direct output from learned diffusion model. timestep (`int`): current discrete timestep in the diffusion chain. sample (`torch.FloatTensor`): current instance of sample being created by diffusion process. generator: random number generator. return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class Returns: [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`: [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor. """ t = timestep num_inference_steps = self.num_inference_steps if self.num_inference_steps else self.config.num_train_timesteps prev_t = timestep - self.config.num_train_timesteps // num_inference_steps if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1) else: predicted_variance = None # 1. compute alphas, betas alpha_prod_t = self.alphas_cumprod[t] alpha_prod_t_prev = self.alphas_cumprod[prev_t] if prev_t >= 0 else self.one beta_prod_t = 1 - alpha_prod_t beta_prod_t_prev = 1 - alpha_prod_t_prev current_alpha_t = alpha_prod_t / alpha_prod_t_prev current_beta_t = 1 - current_alpha_t # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5) elif self.config.prediction_type == "sample": pred_original_sample = model_output elif self.config.prediction_type == "v_prediction": pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` or" " `v_prediction` for the DDPMScheduler." ) # 3. Clip or threshold "predicted x_0" if self.config.clip_sample: pred_original_sample = pred_original_sample.clamp( -self.config.clip_sample_range, self.config.clip_sample_range ) if self.config.thresholding: pred_original_sample = self._threshold_sample(pred_original_sample) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * current_beta_t) / beta_prod_t current_sample_coeff = current_alpha_t ** (0.5) * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise variance = 0 if t > 0: device = model_output.device
variance_noise = randn_tensor(
3
2023-11-14 23:29:31+00:00
16k
BraveGroup/Drive-WM
src/diffusers/pipelines/pipeline_flax_utils.py
[ { "identifier": "ConfigMixin", "path": "src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. All configuration parameters are stored under `self.config`. Also\n provides the [`~ConfigMixin.from_config`] and [`~ConfigMixin.save_config`] methods for loading, downloading, and\n saving classes that inherit from [`ConfigMixin`].\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the `init` function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def __getattr__(self, name: str) -> Any:\n \"\"\"The only reason we overwrite `getattr` here is to gracefully deprecate accessing\n config attributes directly. See https://github.com/huggingface/diffusers/pull/3129\n\n Tihs funtion is mostly copied from PyTorch's __getattr__ overwrite:\n https://pytorch.org/docs/stable/_modules/torch/nn/modules/module.html#Module\n \"\"\"\n\n is_in_config = \"_internal_dict\" in self.__dict__ and hasattr(self.__dict__[\"_internal_dict\"], name)\n is_attribute = name in self.__dict__\n\n if is_in_config and not is_attribute:\n deprecation_message = f\"Accessing config attribute `{name}` directly via '{type(self).__name__}' object attribute is deprecated. Please access '{name}' over '{type(self).__name__}'s config object instead, e.g. 'scheduler.config.{name}'.\"\n deprecate(\"direct config name access\", \"1.0.0\", deprecation_message, standard_warn=False)\n return self._internal_dict[name]\n\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory specified in `save_directory` so that it can be reloaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file is saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary.\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class is instantiated. Make sure to only load configuration\n files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it is loaded) and initiate the Python class.\n `**kwargs` are passed directly to the underlying scheduler/model's `__init__` method and eventually\n overwrite the same named arguments in `config`.\n\n Returns:\n [`ModelMixin`] or [`SchedulerMixin`]:\n A model or scheduler object instantiated from a config dictionary.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Load a model or scheduler configuration.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing model weights saved with\n [`~ConfigMixin.save_config`].\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config are returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the `commit_hash` of the loaded configuration are returned.\n\n Returns:\n `dict`:\n A dictionary of all the parameters stored in a JSON configuration file.\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # Skip keys that were not present in the original config, so default __init__ values were used\n used_defaults = config_dict.get(\"_use_default_values\", [])\n config_dict = {k: v for k, v in config_dict.items() if k not in used_defaults and k != \"_use_default_values\"}\n\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if (\n isinstance(orig_cls_name, str)\n and orig_cls_name != cls.__name__\n and hasattr(diffusers_library, orig_cls_name)\n ):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n elif not isinstance(orig_cls_name, str) and not isinstance(orig_cls_name, (list, tuple)):\n raise ValueError(\n \"Make sure that the `_class_name` is of type string or list of string (for custom pipelines).\"\n )\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes the configuration instance to a JSON string.\n\n Returns:\n `str`:\n String containing all the attributes that make up the configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n # Don't save \"_ignore_files\" or \"_use_default_values\"\n config_dict.pop(\"_ignore_files\", None)\n config_dict.pop(\"_use_default_values\", None)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save the configuration instance's parameters to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file to save a configuration instance's parameters.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "FLAX_WEIGHTS_NAME", "path": "src/diffusers/models/modeling_flax_utils.py", "snippet": "class FlaxModelMixin(PushToHubMixin):\n def _from_config(cls, config, **kwargs):\n def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:\n def conditional_cast(param):\n def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):\n def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n def init_weights(self, rng: jax.Array) -> Dict:\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n dtype: jnp.dtype = jnp.float32,\n *model_args,\n **kwargs,\n ):\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n params: Union[Dict, FrozenDict],\n is_main_process: bool = True,\n push_to_hub: bool = False,\n **kwargs,\n ):" }, { "identifier": "SCHEDULER_CONFIG_NAME", "path": "src/diffusers/schedulers/scheduling_utils_flax.py", "snippet": "SCHEDULER_CONFIG_NAME = \"scheduler_config.json\"" }, { "identifier": "FlaxSchedulerMixin", "path": "src/diffusers/schedulers/scheduling_utils_flax.py", "snippet": "class FlaxSchedulerMixin(PushToHubMixin):\n \"\"\"\n Mixin containing common functions for the schedulers.\n\n Class attributes:\n - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that\n `from_config` can be used from a class different than the one used to save the config (should be overridden\n by parent class).\n \"\"\"\n\n config_name = SCHEDULER_CONFIG_NAME\n ignore_for_config = [\"dtype\"]\n _compatibles = []\n has_compatibles = True\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Optional[Union[str, os.PathLike]] = None,\n subfolder: Optional[str] = None,\n return_unused_kwargs=False,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a Scheduler class from a pre-defined JSON-file.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing model weights saved using [`~SchedulerMixin.save_pretrained`],\n e.g., `./my_model_directory/`.\n subfolder (`str`, *optional*):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n\n \"\"\"\n config, kwargs = cls.load_config(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n subfolder=subfolder,\n return_unused_kwargs=True,\n **kwargs,\n )\n scheduler, unused_kwargs = cls.from_config(config, return_unused_kwargs=True, **kwargs)\n\n if hasattr(scheduler, \"create_state\") and getattr(scheduler, \"has_state\", False):\n state = scheduler.create_state()\n\n if return_unused_kwargs:\n return scheduler, state, unused_kwargs\n\n return scheduler, state\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~FlaxSchedulerMixin.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face Hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)\n\n @property\n def compatibles(self):\n \"\"\"\n Returns all schedulers that are compatible with this scheduler\n\n Returns:\n `List[SchedulerMixin]`: List of compatible schedulers\n \"\"\"\n return self._get_compatibles()\n\n @classmethod\n def _get_compatibles(cls):\n compatible_classes_str = list(set([cls.__name__] + cls._compatibles))\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n compatible_classes = [\n getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)\n ]\n return compatible_classes" }, { "identifier": "logging", "path": "src/diffusers/utils/logging.py", "snippet": "def _get_default_logging_level() -> int:\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict() -> Dict[str, int]:\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info() -> None:\ndef set_verbosity_warning() -> None:\ndef set_verbosity_debug() -> None:\ndef set_verbosity_error() -> None:\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs) -> None:\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar() -> None:\ndef disable_progress_bar() -> None:\nclass EmptyTqdm:\nclass _tqdm_cls:" }, { "identifier": "CONFIG_NAME", "path": "src/diffusers/utils/constants.py", "snippet": "CONFIG_NAME = \"config.json\"" }, { "identifier": "DIFFUSERS_CACHE", "path": "src/diffusers/utils/constants.py", "snippet": "DIFFUSERS_CACHE = default_cache_path" }, { "identifier": "PushToHubMixin", "path": "src/diffusers/utils/hub_utils.py", "snippet": "class PushToHubMixin:\n \"\"\"\n A Mixin to push a model, scheduler, or pipeline to the Hugging Face Hub.\n \"\"\"\n\n def _upload_folder(\n self,\n working_dir: Union[str, os.PathLike],\n repo_id: str,\n token: Optional[str] = None,\n commit_message: Optional[str] = None,\n create_pr: bool = False,\n ):\n \"\"\"\n Uploads all files in `working_dir` to `repo_id`.\n \"\"\"\n if commit_message is None:\n if \"Model\" in self.__class__.__name__:\n commit_message = \"Upload model\"\n elif \"Scheduler\" in self.__class__.__name__:\n commit_message = \"Upload scheduler\"\n else:\n commit_message = f\"Upload {self.__class__.__name__}\"\n\n logger.info(f\"Uploading the files of {working_dir} to {repo_id}.\")\n return upload_folder(\n repo_id=repo_id, folder_path=working_dir, token=token, commit_message=commit_message, create_pr=create_pr\n )\n\n def push_to_hub(\n self,\n repo_id: str,\n commit_message: Optional[str] = None,\n private: Optional[bool] = None,\n token: Optional[str] = None,\n create_pr: bool = False,\n safe_serialization: bool = True,\n variant: Optional[str] = None,\n ) -> str:\n \"\"\"\n Upload model, scheduler, or pipeline files to the 🤗 Hugging Face Hub.\n\n Parameters:\n repo_id (`str`):\n The name of the repository you want to push your model, scheduler, or pipeline files to. It should\n contain your organization name when pushing to an organization. `repo_id` can also be a path to a local\n directory.\n commit_message (`str`, *optional*):\n Message to commit while pushing. Default to `\"Upload {object}\"`.\n private (`bool`, *optional*):\n Whether or not the repository created should be private.\n token (`str`, *optional*):\n The token to use as HTTP bearer authorization for remote files. The token generated when running\n `huggingface-cli login` (stored in `~/.huggingface`).\n create_pr (`bool`, *optional*, defaults to `False`):\n Whether or not to create a PR with the uploaded files or directly commit.\n safe_serialization (`bool`, *optional*, defaults to `True`):\n Whether or not to convert the model weights to the `safetensors` format.\n variant (`str`, *optional*):\n If specified, weights are saved in the format `pytorch_model.<variant>.bin`.\n\n Examples:\n\n ```python\n from diffusers import UNet2DConditionModel\n\n unet = UNet2DConditionModel.from_pretrained(\"stabilityai/stable-diffusion-2\", subfolder=\"unet\")\n\n # Push the `unet` to your namespace with the name \"my-finetuned-unet\".\n unet.push_to_hub(\"my-finetuned-unet\")\n\n # Push the `unet` to an organization with the name \"my-finetuned-unet\".\n unet.push_to_hub(\"your-org/my-finetuned-unet\")\n ```\n \"\"\"\n repo_id = create_repo(repo_id, private=private, token=token, exist_ok=True).repo_id\n\n # Save all files.\n save_kwargs = {\"safe_serialization\": safe_serialization}\n if \"Scheduler\" not in self.__class__.__name__:\n save_kwargs.update({\"variant\": variant})\n\n with tempfile.TemporaryDirectory() as tmpdir:\n self.save_pretrained(tmpdir, **save_kwargs)\n\n return self._upload_folder(\n tmpdir,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )" }, { "identifier": "http_user_agent", "path": "src/diffusers/utils/hub_utils.py", "snippet": "def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:\n \"\"\"\n Formats a user-agent string with basic info about a request.\n \"\"\"\n ua = f\"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}\"\n if DISABLE_TELEMETRY or HF_HUB_OFFLINE:\n return ua + \"; telemetry/off\"\n if is_torch_available():\n ua += f\"; torch/{_torch_version}\"\n if is_flax_available():\n ua += f\"; jax/{_jax_version}\"\n ua += f\"; flax/{_flax_version}\"\n if is_onnx_available():\n ua += f\"; onnxruntime/{_onnxruntime_version}\"\n # CI will set this value to True\n if os.environ.get(\"DIFFUSERS_IS_CI\", \"\").upper() in ENV_VARS_TRUE_VALUES:\n ua += \"; is_ci/true\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua" }, { "identifier": "is_transformers_available", "path": "src/diffusers/utils/import_utils.py", "snippet": "def is_transformers_available():\n return _transformers_available" }, { "identifier": "BaseOutput", "path": "src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n Python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a [`BaseOutput`] directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n first.\n\n </Tip>\n \"\"\"\n\n def __init_subclass__(cls) -> None:\n \"\"\"Register subclasses as pytree nodes.\n\n This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with\n `static_graph=True` with modules that output `ModelOutput` subclasses.\n \"\"\"\n if is_torch_available():\n import torch.utils._pytree\n\n torch.utils._pytree._register_pytree_node(\n cls,\n torch.utils._pytree._dict_flatten,\n lambda values, context: cls(**torch.utils._pytree._dict_unflatten(values, context)),\n )\n\n def __post_init__(self) -> None:\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k: Any) -> Any:\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name: Any, value: Any) -> None:\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def __reduce__(self):\n if not is_dataclass(self):\n return super().__reduce__()\n callable, _args, *remaining = super().__reduce__()\n args = tuple(getattr(self, field.name) for field in fields(self))\n return callable, args, *remaining\n\n def to_tuple(self) -> Tuple[Any, ...]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" } ]
import importlib import inspect import os import flax import numpy as np import PIL.Image from typing import Any, Dict, List, Optional, Union from flax.core.frozen_dict import FrozenDict from huggingface_hub import create_repo, snapshot_download from PIL import Image from tqdm.auto import tqdm from ..configuration_utils import ConfigMixin from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin from ..utils import ( CONFIG_NAME, DIFFUSERS_CACHE, BaseOutput, PushToHubMixin, http_user_agent, is_transformers_available, logging, ) from transformers import FlaxPreTrainedModel from diffusers import pipelines from diffusers import pipelines
10,923
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_transformers_available(): INDEX_FILE = "diffusion_flax_model.bin" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "FlaxModelMixin": ["save_pretrained", "from_pretrained"], "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: # 1. First make sure that if a Flax object is present, import this one class_obj = getattr(module, "Flax" + class_name) except AttributeError: # 2. If this doesn't work, it's not a model and we don't append "Flax" class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") return class_obj @flax.struct.dataclass
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_transformers_available(): INDEX_FILE = "diffusion_flax_model.bin" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "FlaxModelMixin": ["save_pretrained", "from_pretrained"], "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: # 1. First make sure that if a Flax object is present, import this one class_obj = getattr(module, "Flax" + class_name) except AttributeError: # 2. If this doesn't work, it's not a model and we don't append "Flax" class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") return class_obj @flax.struct.dataclass
class FlaxImagePipelineOutput(BaseOutput):
10
2023-11-18 01:40:55+00:00
16k
BAAI-DCAI/SegVol
inference_demo.py
[ { "identifier": "sam_model_registry", "path": "segment_anything_volumetric/build_sam.py", "snippet": "def build_sam_vit_3d(args, checkpoint=None):\ndef _build_sam(\n image_encoder_type,\n embed_dim,\n patch_size,\n checkpoint,\n image_size,\n):" }, { "identifier": "SegVol", "path": "network/model.py", "snippet": "class SegVol(nn.Module):\n def __init__(self, \n image_encoder, \n mask_decoder,\n prompt_encoder,\n clip_ckpt,\n roi_size,\n patch_size,\n test_mode=False,\n ):\n super().__init__()\n self.image_encoder = image_encoder\n self.mask_decoder = mask_decoder\n self.prompt_encoder = prompt_encoder\n self.text_encoder = TextEncoder(clip_ckpt)\n self.feat_shape = np.array(roi_size)/np.array(patch_size)\n self.test_mode = test_mode\n self.dice_loss = BinaryDiceLoss().cuda()\n self.bce_loss = BCELoss().cuda()\n self.decoder_iter = 6\n\n def forward(self, image, text=None, boxes=None, points=None, **kwargs):\n bs = image.shape[0]\n img_shape = (image.shape[2], image.shape[3], image.shape[4])\n image_embedding, _ = self.image_encoder(image)\n image_embedding = image_embedding.transpose(1, 2).view(bs, -1, \n int(self.feat_shape[0]), int(self.feat_shape[1]), int(self.feat_shape[2]))\n # test mode\n if self.test_mode:\n return self.forward_decoder(image_embedding, img_shape, text, boxes, points)\n \n # train mode\n ## sl\n sl_loss = self.supervised_forward(image, image_embedding, img_shape, kwargs['train_organs'], kwargs['train_labels'])\n ## ssl\n ssl_loss = self.unsupervised_forward(image, image_embedding, kwargs['pseudo_seg_cleaned'], img_shape)\n return sl_loss, ssl_loss\n\n def forward_decoder(self, image_embedding, img_shape, text=None, boxes=None, points=None):\n with torch.no_grad():\n if boxes is not None:\n if len(boxes.shape) == 2:\n boxes = boxes[:, None, :] # (B, 1, 6)\n if text is not None:\n text_embedding = self.text_encoder(text) # (B, 768)\n else:\n text_embedding = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=None,\n text_embedding=text_embedding,\n )\n\n dense_pe = self.prompt_encoder.get_dense_pe()\n low_res_masks, _ = self.mask_decoder(\n image_embeddings=image_embedding,\n text_embedding = text_embedding,\n image_pe=dense_pe,\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=False,\n )\n logits = F.interpolate(low_res_masks, size=img_shape, mode='trilinear', align_corners=False)\n return logits\n\n def supervised_forward(self, image, image_embedding, img_shape, training_organs, train_labels):\n iter_points, iter_bboxes, iter_organs = self.build_prompt_label(image.shape[0], training_organs, train_labels)\n # select prompt\n prompt_options = [[None, iter_points, iter_organs], [iter_bboxes, None, iter_organs], \n [None, None, iter_organs], [iter_bboxes, None, None], [None, iter_points, None],\n [iter_bboxes, iter_points, None]]\n sl_loss = 0\n for prompt in prompt_options:\n bboxes, points, organs = prompt\n logits = self.forward_decoder(image_embedding, img_shape, text=organs, boxes=bboxes, points=points)\n # cal loss\n sl_loss_dice = self.dice_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss_bce = self.bce_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss += sl_loss_dice + sl_loss_bce\n return sl_loss\n \n def unsupervised_forward(self, image, image_embedding, pseudo_seg_cleaned, img_shape):\n sll_loss = 0\n for iter in range(self.decoder_iter):\n if iter % 2 == 0:\n pseudo_labels, pseudo_points_prompt = self.build_pseudo_point_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=None, points=pseudo_points_prompt)\n else:\n pseudo_labels, pseudo_bboxes_prompt = self.build_pseudo_box_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=pseudo_bboxes_prompt, points=None)\n # cal loss\n sll_loss_dice = self.dice_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss_bce = self.bce_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss += sll_loss_dice + sll_loss_bce\n return sll_loss\n\n def build_prompt_label(self, bs, training_organs, train_labels):\n # generate prompt & label\n iter_organs = []\n iter_bboxes = []\n iter_points_ax = []\n iter_point_labels = []\n for sample_idx in range(bs):\n # organ prompt\n iter_organs.append(training_organs)\n # box prompt\n box = generate_box(train_labels[sample_idx])\n iter_bboxes.append(box)\n # point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(0, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n point, point_label = select_points(\n train_labels[sample_idx],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n iter_points_ax.append(point)\n iter_point_labels.append(point_label)\n # batched prompt\n iter_points_ax = torch.stack(iter_points_ax, dim=0).cuda()\n iter_point_labels = torch.stack(iter_point_labels, dim=0).cuda()\n iter_points = (iter_points_ax, iter_point_labels)\n iter_bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return iter_points, iter_bboxes, iter_organs\n \n def build_pseudo_point_prompt_label(self, input_shape, seg_labels):\n pseudo_labels = torch.zeros(input_shape).cuda()\n # generate points\n points = []\n point_labels = []\n for batch_idx in range(input_shape[0]):\n # generate pseudo label\n unique_ids = torch.unique(seg_labels[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels[batch_idx]==region_id] = 1\n # generate point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(4, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n assert len(pseudo_labels[batch_idx][0].shape) == 3\n point, point_label = select_points(\n pseudo_labels[batch_idx][0],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n points.append(point)\n point_labels.append(point_label)\n points = torch.stack(points, dim=0).cuda()\n point_labels = torch.stack(point_labels, dim=0).cuda()\n pseudo_points_prompt = (points, point_labels)\n return pseudo_labels, pseudo_points_prompt\n\n def build_pseudo_box_prompt_label(self, input_shape, seg_labels_cleaned):\n pseudo_labels = torch.zeros(input_shape).cuda()\n iter_bboxes = []\n # generate boxes\n for batch_idx in range(input_shape[0]):\n # generate ori pseudo label\n unique_ids = torch.unique(seg_labels_cleaned[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==region_id] = 1\n # generate box prompt\n box = generate_box(pseudo_labels[batch_idx][0])\n iter_bboxes.append(box)\n # refine pseudo label\n x_min, y_min, z_min, x_max, y_max, z_max = box\n binary_cube = torch.zeros_like(pseudo_labels[batch_idx][0]).int()\n binary_cube[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1] = 1\n # cal iou\n mask_label = seg_labels_cleaned[batch_idx][0]\n assert binary_cube.shape == mask_label.shape, str(binary_cube.shape) + ' ' + str(mask_label.shape)\n mask_values_in_binary_cube = mask_label[binary_cube == 1]\n unique_mask_values = torch.unique(mask_values_in_binary_cube)\n # print('unique_mask_values ', unique_mask_values)\n for value in unique_mask_values:\n if value == -1: continue\n mask_area = (mask_label == value)\n intersection = (binary_cube & mask_area)\n iou = intersection.float().sum() / mask_area.float().sum()\n if iou > 0.90:\n # print(f\"Mask value {value} has IOU > 0.90 in binary cube.\")\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==value] = 1\n\n bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return pseudo_labels, bboxes" }, { "identifier": "process_ct_gt", "path": "data_process/demo_data_process.py", "snippet": "def process_ct_gt(case_path, label_path, category, spatial_size):\n print('Data preprocessing...')\n # transform\n img_loader = transforms.LoadImage()\n transform = transforms.Compose(\n [\n transforms.Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n ForegroundNormalization(keys=[\"image\"]),\n DimTranspose(keys=[\"image\", \"label\"]),\n MinMaxNormalization(),\n transforms.SpatialPadd(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='constant'),\n transforms.CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n transforms.ToTensord(keys=[\"image\", \"label\"]),\n ]\n )\n zoom_out_transform = transforms.Resized(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='nearest-exact')\n\n ###\n item = {}\n # generate ct_voxel_ndarray\n ct_voxel_ndarray, _ = img_loader(case_path)\n print(type(ct_voxel_ndarray))\n ct_voxel_ndarray = np.array(ct_voxel_ndarray).squeeze()\n ct_shape = ct_voxel_ndarray.shape\n ct_voxel_ndarray = np.expand_dims(ct_voxel_ndarray, axis=0)\n item['image'] = ct_voxel_ndarray\n\n # generate gt_voxel_ndarray\n gt_voxel_ndarray, _ = img_loader(label_path)\n gt_voxel_ndarray = np.array(gt_voxel_ndarray)\n present_categories = np.unique(gt_voxel_ndarray)\n gt_masks = []\n for cls_idx in range(len(category)):\n # ignore background\n cls = cls_idx + 1\n if cls not in present_categories:\n gt_voxel_ndarray_category = np.zeros(ct_shape)\n gt_masks.append(gt_voxel_ndarray_category)\n else:\n gt_voxel_ndarray_category = gt_voxel_ndarray.copy()\n gt_voxel_ndarray_category[gt_voxel_ndarray != cls] = 0\n gt_voxel_ndarray_category[gt_voxel_ndarray == cls] = 1\n gt_masks.append(gt_voxel_ndarray_category)\n gt_voxel_ndarray = np.stack(gt_masks, axis=0)\n assert gt_voxel_ndarray.shape[0] == len(category) and gt_voxel_ndarray.shape[1:] == ct_voxel_ndarray.shape[1:]\n item['label'] = gt_voxel_ndarray.astype(np.int32)\n\n # transform\n item = transform(item)\n item_zoom_out = zoom_out_transform(item)\n item['zoom_out_image'] = item_zoom_out['image']\n item['zoom_out_label'] = item_zoom_out['label']\n print( 'Zoom_in image shape: ', item['image'].shape, \n '\\nZoom_in label shape: ', item['label'].shape,\n '\\nZoom_out image shape: ', item['zoom_out_image'].shape,\n '\\nZoom_out label shape: ', item['zoom_out_label'].shape,\n )\n return item" }, { "identifier": "sliding_window_inference", "path": "utils/monai_inferers_utils.py", "snippet": "def sliding_window_inference(\n inputs: torch.Tensor,\n prompt_reflection: Union[torch.Tensor, Tuple[torch.Tensor, ...]],\n roi_size: Union[Sequence[int], int],\n sw_batch_size: int,\n predictor: Callable[..., Union[torch.Tensor, Sequence[torch.Tensor], Dict[Any, torch.Tensor]]],\n overlap: float = 0.25,\n mode: Union[BlendMode, str] = BlendMode.CONSTANT,\n sigma_scale: Union[Sequence[float], float] = 0.125,\n padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,\n cval: float = 0.0,\n sw_device: Union[torch.device, str, None] = None,\n device: Union[torch.device, str, None] = None,\n progress: bool = False,\n roi_weight_map: Union[torch.Tensor, None] = None,\n *args: Any,\n **kwargs: Any,\n) -> Union[torch.Tensor, Tuple[torch.Tensor, ...], Dict[Any, torch.Tensor]]:\n \"\"\"\n Sliding window inference on `inputs` with `predictor`.\n\n The outputs of `predictor` could be a tensor, a tuple, or a dictionary of tensors.\n Each output in the tuple or dict value is allowed to have different resolutions with respect to the input.\n e.g., the input patch spatial size is [128,128,128], the output (a tuple of two patches) patch sizes\n could be ([128,64,256], [64,32,128]).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen to ensure the output ROI is still\n an integer. If the predictor's input and output spatial sizes are not equal, we recommend choosing the parameters\n so that `overlap*roi_size*output_size/input_size` is an integer (for each spatial dimension).\n\n When roi_size is larger than the inputs' spatial size, the input image are padded during inference.\n To maintain the same spatial sizes, the output image will be cropped to the original input size.\n\n Args:\n inputs: input image to be processed (assuming NCHW[D])\n roi_size: the spatial window size for inferences.\n When its components have None or non-positives, the corresponding inputs dimension will be used.\n if the components of the `roi_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n sw_batch_size: the batch size to run window slices.\n predictor: given input tensor ``patch_data`` in shape NCHW[D],\n The outputs of the function call ``predictor(patch_data)`` should be a tensor, a tuple, or a dictionary\n with Tensor values. Each output in the tuple or dict value should have the same batch_size, i.e. NM'H'W'[D'];\n where H'W'[D'] represents the output patch's spatial size, M is the number of output channels,\n N is `sw_batch_size`, e.g., the input shape is (7, 1, 128,128,128),\n the output could be a tuple of two tensors, with shapes: ((7, 5, 128, 64, 256), (7, 4, 64, 32, 128)).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen\n to ensure the scaled output ROI sizes are still integers.\n If the `predictor`'s input and output spatial sizes are different,\n we recommend choosing the parameters so that ``overlap*roi_size*zoom_scale`` is an integer for each dimension.\n overlap: Amount of overlap between scans.\n mode: {``\"constant\"``, ``\"gaussian\"``}\n How to blend output of overlapping windows. Defaults to ``\"constant\"``.\n\n - ``\"constant``\": gives equal weight to all predictions.\n - ``\"gaussian``\": gives less weight to predictions on edges of windows.\n\n sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``\"gaussian\"``.\n Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.\n When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding\n spatial dimensions.\n padding_mode: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}\n Padding mode for ``inputs``, when ``roi_size`` is larger than inputs. Defaults to ``\"constant\"``\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n cval: fill value for 'constant' padding mode. Default: 0\n sw_device: device for the window data.\n By default the device (and accordingly the memory) of the `inputs` is used.\n Normally `sw_device` should be consistent with the device where `predictor` is defined.\n device: device for the stitched output prediction.\n By default the device (and accordingly the memory) of the `inputs` is used. If for example\n set to device=torch.device('cpu') the gpu memory consumption is less and independent of the\n `inputs` and `roi_size`. Output is on the `device`.\n progress: whether to print a `tqdm` progress bar.\n roi_weight_map: pre-computed (non-negative) weight map for each ROI.\n If not given, and ``mode`` is not `constant`, this map will be computed on the fly.\n args: optional args to be passed to ``predictor``.\n kwargs: optional keyword args to be passed to ``predictor``.\n\n Note:\n - input must be channel-first and have a batch dim, supports N-D sliding window.\n\n \"\"\"\n print('sliding window inference for ROI')\n text = kwargs['text']\n use_box = kwargs['use_box']\n use_point = kwargs['use_point']\n assert not (use_box and use_point)\n compute_dtype = inputs.dtype\n num_spatial_dims = len(inputs.shape) - 2\n if overlap < 0 or overlap >= 1:\n raise ValueError(\"overlap must be >= 0 and < 1.\")\n\n # determine image spatial size and batch size\n # Note: all input images must have the same image size and batch size\n batch_size, _, *image_size_ = inputs.shape\n\n if device is None:\n device = inputs.device\n if sw_device is None:\n sw_device = inputs.device\n\n roi_size = fall_back_tuple(roi_size, image_size_)\n # in case that image size is smaller than roi size\n image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims))\n pad_size = []\n for k in range(len(inputs.shape) - 1, 1, -1):\n diff = max(roi_size[k - 2] - inputs.shape[k], 0)\n half = diff // 2\n pad_size.extend([half, diff - half])\n inputs = F.pad(inputs, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n if use_point or use_box:\n binary_prompt_map, global_preds = prompt_reflection\n global_preds = F.pad(global_preds, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap)\n\n # Store all slices in list\n slices = dense_patch_slices(image_size, roi_size, scan_interval)\n num_win = len(slices) # number of windows per image\n total_slices = num_win * batch_size # total number of windows\n\n # Create window-level importance map\n valid_patch_size = get_valid_patch_size(image_size, roi_size)\n if valid_patch_size == roi_size and (roi_weight_map is not None):\n importance_map = roi_weight_map\n else:\n try:\n importance_map = compute_importance_map(valid_patch_size, mode=mode, sigma_scale=sigma_scale, device=device)\n except BaseException as e:\n raise RuntimeError(\n \"Seems to be OOM. Please try smaller patch size or mode='constant' instead of mode='gaussian'.\"\n ) from e\n importance_map = convert_data_type(importance_map, torch.Tensor, device, compute_dtype)[0] # type: ignore\n # handle non-positive weights\n min_non_zero = max(importance_map[importance_map != 0].min().item(), 1e-3)\n importance_map = torch.clamp(importance_map.to(torch.float32), min=min_non_zero).to(compute_dtype)\n\n # Perform predictions\n dict_key, output_image_list, count_map_list = None, [], []\n _initialized_ss = -1\n is_tensor_output = True # whether the predictor's output is a tensor (instead of dict/tuple)\n\n # for each patch\n for slice_g in tqdm(range(0, total_slices, sw_batch_size)) if progress else range(0, total_slices, sw_batch_size):\n slice_range = range(slice_g, min(slice_g + sw_batch_size, total_slices))\n unravel_slice = [\n [slice(int(idx / num_win), int(idx / num_win) + 1), slice(None)] + list(slices[idx % num_win])\n for idx in slice_range\n ]\n window_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)\n #############\n \n boxes = None\n points = None\n if use_point:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n point, point_label = select_points(window_binary_prompt_map.squeeze())\n points = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) \n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n if use_box:\n if num_win == 1:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(window_binary_prompt_map.squeeze()).unsqueeze(0).float().cuda()\n else:\n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n seg_prob_out = predictor(window_data, text, boxes, points) # batched patch segmentation\n #############\n # convert seg_prob_out to tuple seg_prob_tuple, this does not allocate new memory.\n seg_prob_tuple: Tuple[torch.Tensor, ...]\n if isinstance(seg_prob_out, torch.Tensor):\n seg_prob_tuple = (seg_prob_out,)\n elif isinstance(seg_prob_out, Mapping):\n if dict_key is None:\n dict_key = sorted(seg_prob_out.keys()) # track predictor's output keys\n seg_prob_tuple = tuple(seg_prob_out[k] for k in dict_key)\n is_tensor_output = False\n else:\n seg_prob_tuple = ensure_tuple(seg_prob_out)\n is_tensor_output = False\n\n # for each output in multi-output list\n for ss, seg_prob in enumerate(seg_prob_tuple):\n seg_prob = seg_prob.to(device) # BxCxMxNxP or BxCxMxN\n\n # compute zoom scale: out_roi_size/in_roi_size\n zoom_scale = []\n for axis, (img_s_i, out_w_i, in_w_i) in enumerate(\n zip(image_size, seg_prob.shape[2:], window_data.shape[2:])\n ):\n _scale = out_w_i / float(in_w_i)\n if not (img_s_i * _scale).is_integer():\n warnings.warn(\n f\"For spatial axis: {axis}, output[{ss}] will have non-integer shape. Spatial \"\n f\"zoom_scale between output[{ss}] and input is {_scale}. Please pad inputs.\"\n )\n zoom_scale.append(_scale)\n\n if _initialized_ss < ss: # init. the ss-th buffer at the first iteration\n # construct multi-resolution outputs\n output_classes = seg_prob.shape[1]\n output_shape = [batch_size, output_classes] + [\n int(image_size_d * zoom_scale_d) for image_size_d, zoom_scale_d in zip(image_size, zoom_scale)\n ]\n # allocate memory to store the full output and the count for overlapping parts\n output_image_list.append(torch.zeros(output_shape, dtype=compute_dtype, device=device))\n count_map_list.append(torch.zeros([1, 1] + output_shape[2:], dtype=compute_dtype, device=device))\n _initialized_ss += 1\n\n # resizing the importance_map\n resizer = Resize(spatial_size=seg_prob.shape[2:], mode=\"nearest\", anti_aliasing=False)\n\n # store the result in the proper location of the full output. Apply weights from importance map.\n for idx, original_idx in zip(slice_range, unravel_slice):\n # zoom roi\n original_idx_zoom = list(original_idx) # 4D for 2D image, 5D for 3D image\n for axis in range(2, len(original_idx_zoom)):\n zoomed_start = original_idx[axis].start * zoom_scale[axis - 2]\n zoomed_end = original_idx[axis].stop * zoom_scale[axis - 2]\n if not zoomed_start.is_integer() or (not zoomed_end.is_integer()):\n warnings.warn(\n f\"For axis-{axis-2} of output[{ss}], the output roi range is not int. \"\n f\"Input roi range is ({original_idx[axis].start}, {original_idx[axis].stop}). \"\n f\"Spatial zoom_scale between output[{ss}] and input is {zoom_scale[axis - 2]}. \"\n f\"Corresponding output roi range is ({zoomed_start}, {zoomed_end}).\\n\"\n f\"Please change overlap ({overlap}) or roi_size ({roi_size[axis-2]}) for axis-{axis-2}. \"\n \"Tips: if overlap*roi_size*zoom_scale is an integer, it usually works.\"\n )\n original_idx_zoom[axis] = slice(int(zoomed_start), int(zoomed_end), None)\n importance_map_zoom = resizer(importance_map.unsqueeze(0))[0].to(compute_dtype)\n # store results and weights\n output_image_list[ss][original_idx_zoom] += importance_map_zoom * seg_prob[idx - slice_g]\n count_map_list[ss][original_idx_zoom] += (\n importance_map_zoom.unsqueeze(0).unsqueeze(0).expand(count_map_list[ss][original_idx_zoom].shape)\n )\n\n # account for any overlapping sections\n for ss in range(len(output_image_list)):\n output_image_list[ss] = (output_image_list[ss] / count_map_list.pop(0)).to(compute_dtype)\n\n # remove padding if image_size smaller than roi_size\n for ss, output_i in enumerate(output_image_list):\n if torch.isnan(output_i).any() or torch.isinf(output_i).any():\n warnings.warn(\"Sliding window inference results contain NaN or Inf.\")\n\n zoom_scale = [\n seg_prob_map_shape_d / roi_size_d for seg_prob_map_shape_d, roi_size_d in zip(output_i.shape[2:], roi_size)\n ]\n\n final_slicing: List[slice] = []\n for sp in range(num_spatial_dims):\n slice_dim = slice(pad_size[sp * 2], image_size_[num_spatial_dims - sp - 1] + pad_size[sp * 2])\n slice_dim = slice(\n int(round(slice_dim.start * zoom_scale[num_spatial_dims - sp - 1])),\n int(round(slice_dim.stop * zoom_scale[num_spatial_dims - sp - 1])),\n )\n final_slicing.insert(0, slice_dim)\n while len(final_slicing) < len(output_i.shape):\n final_slicing.insert(0, slice(None))\n output_image_list[ss] = output_i[final_slicing]\n\n if dict_key is not None: # if output of predictor is a dict\n final_output = dict(zip(dict_key, output_image_list))\n else:\n final_output = tuple(output_image_list) # type: ignore\n return final_output[0] if is_tensor_output else final_output # type: ignore" }, { "identifier": "generate_box", "path": "utils/monai_inferers_utils.py", "snippet": "def generate_box(pred_pre, bbox_shift=None):\n meaning_post_label = pred_pre # [h, w, d]\n ones_idx = (meaning_post_label > 0).nonzero(as_tuple=True)\n if all(tensor.nelement() == 0 for tensor in ones_idx):\n bboxes = torch.tensor([-1,-1,-1,-1,-1,-1])\n # print(bboxes, bboxes.shape)\n return bboxes\n min_coords = [dim.min() for dim in ones_idx] # [x_min, y_min, z_min]\n max_coords = [dim.max() for dim in ones_idx] # [x_max, y_max, z_max]\n\n\n if bbox_shift is None:\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor)\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor)\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)\n else:\n # add perturbation to bounding box coordinates\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor + random.randint(-bbox_shift, bbox_shift))\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor + random.randint(-bbox_shift, bbox_shift))\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)" }, { "identifier": "select_points", "path": "utils/monai_inferers_utils.py", "snippet": "def select_points(preds, num_positive_extra=4, num_negative_extra=0, fix_extra_point_num=None):\n spacial_dim = 3\n points = torch.zeros((0, 3))\n labels = torch.zeros((0))\n pos_thred = 0.9\n neg_thred = 0.1\n \n # get pos/net indices\n positive_indices = torch.nonzero(preds > pos_thred, as_tuple=True) # ([pos x], [pos y], [pos z])\n negative_indices = torch.nonzero(preds < neg_thred, as_tuple=True)\n\n ones_idx = (preds > pos_thred).nonzero(as_tuple=True)\n if all(tmp.nelement() == 0 for tmp in ones_idx):\n # all neg\n num_positive_extra = 0\n selected_positive_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n else:\n # random select a pos point\n random_idx = torch.randint(len(positive_indices[0]), (1,))\n selected_positive_point = torch.tensor([positive_indices[i][random_idx] for i in range(spacial_dim)]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.ones((1))))\n\n if num_positive_extra > 0:\n pos_idx_list = torch.randperm(len(positive_indices[0]))[:num_positive_extra]\n extra_positive_points = []\n for pos_idx in pos_idx_list:\n extra_positive_points.append([positive_indices[i][pos_idx] for i in range(spacial_dim)])\n extra_positive_points = torch.tensor(extra_positive_points).reshape(-1, 3)\n points = torch.cat((points, extra_positive_points), dim=0)\n labels = torch.cat((labels, torch.ones((extra_positive_points.shape[0]))))\n\n if num_negative_extra > 0:\n neg_idx_list = torch.randperm(len(negative_indices[0]))[:num_negative_extra]\n extra_negative_points = []\n for neg_idx in neg_idx_list:\n extra_negative_points.append([negative_indices[i][neg_idx] for i in range(spacial_dim)])\n extra_negative_points = torch.tensor(extra_negative_points).reshape(-1, 3)\n points = torch.cat((points, extra_negative_points), dim=0)\n labels = torch.cat((labels, torch.zeros((extra_negative_points.shape[0]))))\n # print('extra_negative_points ', extra_negative_points, extra_negative_points.shape)\n # print('==> points ', points.shape, labels)\n \n if fix_extra_point_num is None:\n left_point_num = num_positive_extra + num_negative_extra + 1 - labels.shape[0]\n else:\n left_point_num = fix_extra_point_num + 1 - labels.shape[0]\n\n for _ in range(left_point_num):\n ignore_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, ignore_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n\n return (points, labels)" }, { "identifier": "build_binary_cube", "path": "utils/monai_inferers_utils.py", "snippet": "def build_binary_cube(bbox, binary_cube_shape):\n min_coord = bbox[0][:3].int().tolist()\n max_coord = bbox[0][3:].int().tolist()\n binary_cube = torch.zeros(binary_cube_shape)\n binary_cube[min_coord[0]:max_coord[0]+1, min_coord[1]:max_coord[1]+1, min_coord[2]:max_coord[2]+1] = 1\n return binary_cube" }, { "identifier": "build_binary_points", "path": "utils/monai_inferers_utils.py", "snippet": "def build_binary_points(points, labels, shape):\n binary_points = torch.zeros(shape, dtype=torch.int16)\n binary_points[points[labels == 1, 0].long(), points[labels == 1, 1].long(), points[labels == 1, 2].long()] = 1\n return binary_points" }, { "identifier": "logits2roi_coor", "path": "utils/monai_inferers_utils.py", "snippet": "def logits2roi_coor(spatial_size, logits_global_single):\n # crop predict\n pred_global_single = torch.sigmoid(logits_global_single) > 0.5\n ## get all pos idx\n nonzero_indices = torch.nonzero(pred_global_single)\n if nonzero_indices.shape[0] == 0:\n return None, None, None, None, None, None\n ## get boundary\n min_d, max_d = nonzero_indices[:, 0].min(), nonzero_indices[:, 0].max()\n min_h, max_h = nonzero_indices[:, 1].min(), nonzero_indices[:, 1].max()\n min_w, max_w = nonzero_indices[:, 2].min(), nonzero_indices[:, 2].max()\n ## padding\n crop_d, crop_h, crop_w = max_d - min_d + 1, max_h - min_h + 1, max_w - min_w + 1,\n window_d, window_h, window_w = spatial_size\n padding_d, padding_h, padding_w = max(0, window_d-crop_d), max(0, window_h-crop_h), max(0, window_w-crop_w)\n global_d, global_h, global_w = logits_global_single.shape\n min_d = max(0, min_d - int(padding_d)//2)\n min_h = max(0, min_h - int(padding_h)//2)\n min_w = max(0, min_w - int(padding_w)//2)\n max_d = min(global_d, max_d + int(padding_d)//2)\n max_h = min(global_h, max_h + int(padding_h)//2)\n max_w = min(global_w, max_w + int(padding_w)//2)\n return min_d, min_h, min_w, max_d, max_h, max_w" }, { "identifier": "draw_result", "path": "utils/visualize.py", "snippet": "def draw_result(category, image, bboxes, points, logits, gt3D, spatial_size, work_dir):\n zoom_out_transform = transforms.Compose([\n transforms.AddChanneld(keys=[\"image\", \"label\", \"logits\"]),\n transforms.Resized(keys=[\"image\", \"label\", \"logits\"], spatial_size=spatial_size, mode='nearest-exact')\n ])\n post_item = zoom_out_transform({\n 'image': image,\n 'label': gt3D,\n 'logits': logits\n })\n image, gt3D, logits = post_item['image'][0], post_item['label'][0], post_item['logits'][0]\n preds = torch.sigmoid(logits)\n preds = (preds > 0.5).int()\n\n root_dir=os.path.join(work_dir, f'fig_examples/{category}/') \n\n if not os.path.exists(root_dir):\n os.makedirs(root_dir)\n if bboxes is not None:\n x1, y1, z1, x2, y2, z2 = bboxes[0].cpu().numpy()\n if points is not None:\n points = (points[0].cpu().numpy(), points[1].cpu().numpy())\n points_ax = points[0][0] # [n, 3]\n points_label = points[1][0] # [n]\n\n for j in range(image.shape[0]):\n img_2d = image[j, :, :].detach().cpu().numpy()\n preds_2d = preds[j, :, :].detach().cpu().numpy()\n label_2d = gt3D[j, :, :].detach().cpu().numpy()\n if np.sum(label_2d) == 0 or np.sum(preds_2d) == 0:\n continue\n\n img_2d = img_2d * 255\n # orginal img\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n ax1.imshow(img_2d, cmap='gray')\n ax1.set_title('Image with prompt') \n ax1.axis('off')\n\n # gt\n ax2.imshow(img_2d, cmap='gray')\n show_mask(label_2d, ax2)\n ax2.set_title('Ground truth') \n ax2.axis('off')\n\n # preds\n ax3.imshow(img_2d, cmap='gray')\n show_mask(preds_2d, ax3)\n ax3.set_title('Prediction') \n ax3.axis('off')\n\n # boxes\n if bboxes is not None:\n if j >= x1 and j <= x2:\n show_box((z1, y1, z2, y2), ax1)\n # points\n if points is not None:\n for point_idx in range(points_label.shape[0]):\n point = points_ax[point_idx]\n label = points_label[point_idx] # [1]\n if j == point[0]:\n show_points(point, label, ax1)\n \n fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)\n plt.savefig(os.path.join(root_dir, f'{category}_{j}.png'), bbox_inches='tight')\n plt.close()" } ]
import argparse import os import torch import torch.nn.functional as F import json import monai.transforms as transforms from segment_anything_volumetric import sam_model_registry from network.model import SegVol from data_process.demo_data_process import process_ct_gt from utils.monai_inferers_utils import sliding_window_inference, generate_box, select_points, build_binary_cube, build_binary_points, logits2roi_coor from utils.visualize import draw_result
11,656
logits_global_single.cpu(), size=ori_shape, mode='nearest')[0][0] # build prompt reflection for zoom-in if args.use_point_prompt: binary_points = F.interpolate( binary_points_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] if args.use_box_prompt: binary_cube = F.interpolate( binary_cube_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] zoom_out_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_out_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'zoom out inference done with zoom_out_dice: {zoom_out_dice:.4f}') if not args.use_zoom_in: continue #################### # zoom-in inference: min_d, min_h, min_w, max_d, max_h, max_w = logits2roi_coor(args.spatial_size, logits_global_single) if min_d is None: print('Fail to detect foreground!') continue # Crop roi image_single_cropped = image_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1].unsqueeze(0).unsqueeze(0) global_preds = (torch.sigmoid(logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1])>0.5).long() assert not (args.use_box_prompt and args.use_point_prompt) prompt_reflection = None if args.use_box_prompt: binary_cube_cropped = binary_cube[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_cube_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) if args.use_point_prompt: binary_points_cropped = binary_points[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_points_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) ## inference with torch.no_grad(): logits_single_cropped = sliding_window_inference( image_single_cropped.cuda(), prompt_reflection, args.spatial_size, 1, segvol_model, args.infer_overlap, text=text_single, use_box=args.use_box_prompt, use_point=args.use_point_prompt, ) logits_single_cropped = logits_single_cropped.cpu().squeeze() logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] = logits_single_cropped zoom_in_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_in_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'===> zoom out dice {zoom_out_dice:.4f} -> zoom-out-zoom-in dice {zoom_in_dice:.4f} <===') return logits_labels_record def inference_single_ct(args, segvol_model, data_item, categories): segvol_model.eval() image, gt3D = data_item["image"].float(), data_item["label"] image_zoom_out, gt3D__zoom_out = data_item["zoom_out_image"].float(), data_item['zoom_out_label'] logits_labels_record = zoom_in_zoom_out( args, segvol_model, image.unsqueeze(0), image_zoom_out.unsqueeze(0), gt3D.unsqueeze(0), gt3D__zoom_out.unsqueeze(0), categories=categories) # visualize if args.visualize: for target, values in logits_labels_record.items(): dice_score, image, point_prompt, box_prompt, logits, labels = values print(f'{target} result with Dice score {dice_score:.4f} visualizing') draw_result(target + f"-Dice {dice_score:.4f}", image, box_prompt, point_prompt, logits, labels, args.spatial_size, args.work_dir) def main(args): gpu = 0 torch.cuda.set_device(gpu) # build model sam_model = sam_model_registry['vit'](args=args) segvol_model = SegVol( image_encoder=sam_model.image_encoder, mask_decoder=sam_model.mask_decoder, prompt_encoder=sam_model.prompt_encoder, clip_ckpt=args.clip_ckpt, roi_size=args.spatial_size, patch_size=args.patch_size, test_mode=args.test_mode, ).cuda() segvol_model = torch.nn.DataParallel(segvol_model, device_ids=[gpu]) # load param if os.path.isfile(args.resume): ## Map model to be loaded to specified single GPU loc = 'cuda:{}'.format(gpu) checkpoint = torch.load(args.resume, map_location=loc) segvol_model.load_state_dict(checkpoint['model'], strict=True) print("loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) # load demo config with open(args.demo_config, 'r') as file: config_dict = json.load(file) ct_path, gt_path, categories = config_dict['demo_case']['ct_path'], config_dict['demo_case']['gt_path'], config_dict['categories'] # preprocess for data
def set_parse(): # %% set up parser parser = argparse.ArgumentParser() parser.add_argument("--test_mode", default=True, type=bool) parser.add_argument("--resume", type = str, default = '') parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap") parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple) parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple) parser.add_argument('-work_dir', type=str, default='./work_dir') ### demo parser.add_argument('--demo_config', type=str, required=True) parser.add_argument("--clip_ckpt", type = str, default = './config/clip') args = parser.parse_args() return args def dice_score(preds, labels): # on GPU assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape) predict = preds.view(1, -1) target = labels.view(1, -1) if target.shape[1] < 1e8: predict = predict.cuda() target = target.cuda() predict = torch.sigmoid(predict) predict = torch.where(predict > 0.5, 1., 0.) tp = torch.sum(torch.mul(predict, target)) den = torch.sum(predict) + torch.sum(target) + 1 dice = 2 * tp / den if target.shape[1] < 1e8: predict = predict.cpu() target = target.cpu() return dice def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None): logits_labels_record = {} image_single_resize = image_resize image_single = image[0,0] ori_shape = image_single.shape for item_idx in range(len(categories)): # get label to generate prompts label_single = gt3D[0][item_idx] label_single_resize = gt3D_resize[0][item_idx] # skip meaningless categories if torch.sum(label_single) == 0: print('No object, skip') continue # generate prompts text_single = categories[item_idx] if args.use_text_prompt else None if categories is not None: print(f'inference |{categories[item_idx]}| target...') points_single = None box_single = None if args.use_point_prompt: point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3) points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape) if args.use_box_prompt: box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() binary_cube_resize = build_binary_cube(box_single, binary_cube_shape=label_single_resize.shape) #################### # zoom-out inference: print('--- zoom out inference ---') print(f'use text-prompt [{text_single!=None}], use box-prompt [{box_single!=None}], use point-prompt [{points_single!=None}]') with torch.no_grad(): logits_global_single = segvol_model(image_single_resize.cuda(), text=text_single, boxes=box_single, points=points_single) # resize back global logits logits_global_single = F.interpolate( logits_global_single.cpu(), size=ori_shape, mode='nearest')[0][0] # build prompt reflection for zoom-in if args.use_point_prompt: binary_points = F.interpolate( binary_points_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] if args.use_box_prompt: binary_cube = F.interpolate( binary_cube_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] zoom_out_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_out_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'zoom out inference done with zoom_out_dice: {zoom_out_dice:.4f}') if not args.use_zoom_in: continue #################### # zoom-in inference: min_d, min_h, min_w, max_d, max_h, max_w = logits2roi_coor(args.spatial_size, logits_global_single) if min_d is None: print('Fail to detect foreground!') continue # Crop roi image_single_cropped = image_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1].unsqueeze(0).unsqueeze(0) global_preds = (torch.sigmoid(logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1])>0.5).long() assert not (args.use_box_prompt and args.use_point_prompt) prompt_reflection = None if args.use_box_prompt: binary_cube_cropped = binary_cube[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_cube_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) if args.use_point_prompt: binary_points_cropped = binary_points[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_points_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) ## inference with torch.no_grad(): logits_single_cropped = sliding_window_inference( image_single_cropped.cuda(), prompt_reflection, args.spatial_size, 1, segvol_model, args.infer_overlap, text=text_single, use_box=args.use_box_prompt, use_point=args.use_point_prompt, ) logits_single_cropped = logits_single_cropped.cpu().squeeze() logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] = logits_single_cropped zoom_in_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_in_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'===> zoom out dice {zoom_out_dice:.4f} -> zoom-out-zoom-in dice {zoom_in_dice:.4f} <===') return logits_labels_record def inference_single_ct(args, segvol_model, data_item, categories): segvol_model.eval() image, gt3D = data_item["image"].float(), data_item["label"] image_zoom_out, gt3D__zoom_out = data_item["zoom_out_image"].float(), data_item['zoom_out_label'] logits_labels_record = zoom_in_zoom_out( args, segvol_model, image.unsqueeze(0), image_zoom_out.unsqueeze(0), gt3D.unsqueeze(0), gt3D__zoom_out.unsqueeze(0), categories=categories) # visualize if args.visualize: for target, values in logits_labels_record.items(): dice_score, image, point_prompt, box_prompt, logits, labels = values print(f'{target} result with Dice score {dice_score:.4f} visualizing') draw_result(target + f"-Dice {dice_score:.4f}", image, box_prompt, point_prompt, logits, labels, args.spatial_size, args.work_dir) def main(args): gpu = 0 torch.cuda.set_device(gpu) # build model sam_model = sam_model_registry['vit'](args=args) segvol_model = SegVol( image_encoder=sam_model.image_encoder, mask_decoder=sam_model.mask_decoder, prompt_encoder=sam_model.prompt_encoder, clip_ckpt=args.clip_ckpt, roi_size=args.spatial_size, patch_size=args.patch_size, test_mode=args.test_mode, ).cuda() segvol_model = torch.nn.DataParallel(segvol_model, device_ids=[gpu]) # load param if os.path.isfile(args.resume): ## Map model to be loaded to specified single GPU loc = 'cuda:{}'.format(gpu) checkpoint = torch.load(args.resume, map_location=loc) segvol_model.load_state_dict(checkpoint['model'], strict=True) print("loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch'])) # load demo config with open(args.demo_config, 'r') as file: config_dict = json.load(file) ct_path, gt_path, categories = config_dict['demo_case']['ct_path'], config_dict['demo_case']['gt_path'], config_dict['categories'] # preprocess for data
data_item = process_ct_gt(ct_path, gt_path, categories, args.spatial_size)
2
2023-11-10 08:25:37+00:00
16k
theroyallab/tabbyAPI
main.py
[ { "identifier": "convert_args_to_dict", "path": "args.py", "snippet": "def convert_args_to_dict(args: argparse.Namespace, parser: argparse.ArgumentParser):\n \"\"\"Broad conversion of surface level arg groups to dictionaries\"\"\"\n\n arg_groups = {}\n for group in parser._action_groups:\n group_dict = {}\n for arg in group._group_actions:\n value = getattr(args, arg.dest, None)\n if value is not None:\n group_dict[arg.dest] = value\n\n arg_groups[group.title] = group_dict\n\n return arg_groups" }, { "identifier": "init_argparser", "path": "args.py", "snippet": "def init_argparser():\n \"\"\"Creates an argument parser that any function can use\"\"\"\n\n parser = argparse.ArgumentParser(\n epilog=\"These args are only for a subset of the config. \"\n + \"Please edit config.yml for all options!\"\n )\n add_network_args(parser)\n add_model_args(parser)\n add_logging_args(parser)\n add_config_args(parser)\n\n return parser" }, { "identifier": "check_admin_key", "path": "auth.py", "snippet": "def check_admin_key(x_admin_key: str = Header(None), authorization: str = Header(None)):\n \"\"\"Check if the admin key is valid.\"\"\"\n\n # Allow request if auth is disabled\n if DISABLE_AUTH:\n return\n\n if x_admin_key:\n if not AUTH_KEYS.verify_key(x_admin_key, \"admin_key\"):\n raise HTTPException(401, \"Invalid admin key\")\n return x_admin_key\n\n if authorization:\n split_key = authorization.split(\" \")\n if len(split_key) < 2:\n raise HTTPException(401, \"Invalid admin key\")\n if split_key[0].lower() != \"bearer\" or not AUTH_KEYS.verify_key(\n split_key[1], \"admin_key\"\n ):\n raise HTTPException(401, \"Invalid admin key\")\n return authorization\n\n raise HTTPException(401, \"Please provide an admin key\")" }, { "identifier": "check_api_key", "path": "auth.py", "snippet": "def check_api_key(x_api_key: str = Header(None), authorization: str = Header(None)):\n \"\"\"Check if the API key is valid.\"\"\"\n\n # Allow request if auth is disabled\n if DISABLE_AUTH:\n return\n\n if x_api_key:\n if not AUTH_KEYS.verify_key(x_api_key, \"api_key\"):\n raise HTTPException(401, \"Invalid API key\")\n return x_api_key\n\n if authorization:\n split_key = authorization.split(\" \")\n if len(split_key) < 2:\n raise HTTPException(401, \"Invalid API key\")\n if split_key[0].lower() != \"bearer\" or not AUTH_KEYS.verify_key(\n split_key[1], \"api_key\"\n ):\n raise HTTPException(401, \"Invalid API key\")\n\n return authorization\n\n raise HTTPException(401, \"Please provide an API key\")" }, { "identifier": "load_auth_keys", "path": "auth.py", "snippet": "def load_auth_keys(disable_from_config: bool):\n \"\"\"Load the authentication keys from api_tokens.yml. If the file does not\n exist, generate new keys and save them to api_tokens.yml.\"\"\"\n global AUTH_KEYS\n global DISABLE_AUTH\n\n DISABLE_AUTH = disable_from_config\n if disable_from_config:\n logger.warning(\n \"Disabling authentication makes your instance vulnerable. \"\n \"Set the `disable_auth` flag to False in config.yml if you \"\n \"want to share this instance with others.\"\n )\n\n return\n\n try:\n with open(\"api_tokens.yml\", \"r\", encoding=\"utf8\") as auth_file:\n auth_keys_dict = yaml.safe_load(auth_file)\n AUTH_KEYS = AuthKeys.model_validate(auth_keys_dict)\n except OSError:\n new_auth_keys = AuthKeys(\n api_key=secrets.token_hex(16), admin_key=secrets.token_hex(16)\n )\n AUTH_KEYS = new_auth_keys\n\n with open(\"api_tokens.yml\", \"w\", encoding=\"utf8\") as auth_file:\n yaml.safe_dump(AUTH_KEYS.model_dump(), auth_file, default_flow_style=False)\n\n logger.info(\n f\"Your API key is: {AUTH_KEYS.api_key}\\n\"\n f\"Your admin key is: {AUTH_KEYS.admin_key}\\n\\n\"\n \"If these keys get compromised, make sure to delete api_tokens.yml \"\n \"and restart the server. Have fun!\"\n )" }, { "identifier": "override_config_from_args", "path": "config.py", "snippet": "def override_config_from_args(args: dict):\n \"\"\"Overrides the config based on a dict representation of args\"\"\"\n\n config_override = unwrap(args.get(\"options\", {}).get(\"config\"))\n if config_override:\n logger.info(\"Attempting to override config.yml from args.\")\n read_config_from_file(pathlib.Path(config_override))\n return\n\n # Network config\n network_override = args.get(\"network\")\n if network_override:\n network_config = get_network_config()\n GLOBAL_CONFIG[\"network\"] = {**network_config, **network_override}\n\n # Model config\n model_override = args.get(\"model\")\n if model_override:\n model_config = get_model_config()\n GLOBAL_CONFIG[\"model\"] = {**model_config, **model_override}\n\n # Logging config\n logging_override = args.get(\"logging\")\n if logging_override:\n logging_config = get_gen_logging_config()\n GLOBAL_CONFIG[\"logging\"] = {\n **logging_config,\n **{k.replace(\"log_\", \"\"): logging_override[k] for k in logging_override},\n }" }, { "identifier": "read_config_from_file", "path": "config.py", "snippet": "def read_config_from_file(config_path: pathlib.Path):\n \"\"\"Sets the global config from a given file path\"\"\"\n global GLOBAL_CONFIG\n\n try:\n with open(str(config_path.resolve()), \"r\", encoding=\"utf8\") as config_file:\n GLOBAL_CONFIG = unwrap(yaml.safe_load(config_file), {})\n except Exception as exc:\n logger.error(\n \"The YAML config couldn't load because of the following error: \"\n f\"\\n\\n{exc}\"\n \"\\n\\nTabbyAPI will start anyway and not parse this config file.\"\n )\n GLOBAL_CONFIG = {}" }, { "identifier": "get_gen_logging_config", "path": "config.py", "snippet": "def get_gen_logging_config():\n \"\"\"Returns the generation logging config from the global config\"\"\"\n return unwrap(GLOBAL_CONFIG.get(\"logging\"), {})" }, { "identifier": "get_model_config", "path": "config.py", "snippet": "def get_model_config():\n \"\"\"Returns the model config from the global config\"\"\"\n return unwrap(GLOBAL_CONFIG.get(\"model\"), {})" }, { "identifier": "get_draft_model_config", "path": "config.py", "snippet": "def get_draft_model_config():\n \"\"\"Returns the draft model config from the global config\"\"\"\n model_config = unwrap(GLOBAL_CONFIG.get(\"model\"), {})\n return unwrap(model_config.get(\"draft\"), {})" }, { "identifier": "get_lora_config", "path": "config.py", "snippet": "def get_lora_config():\n \"\"\"Returns the lora config from the global config\"\"\"\n model_config = unwrap(GLOBAL_CONFIG.get(\"model\"), {})\n return unwrap(model_config.get(\"lora\"), {})" }, { "identifier": "get_network_config", "path": "config.py", "snippet": "def get_network_config():\n \"\"\"Returns the network config from the global config\"\"\"\n return unwrap(GLOBAL_CONFIG.get(\"network\"), {})" }, { "identifier": "call_with_semaphore", "path": "generators.py", "snippet": "async def call_with_semaphore(callback: partialmethod):\n if inspect.iscoroutinefunction(callback):\n return await callback()\n async with generate_semaphore:\n return callback()" }, { "identifier": "generate_with_semaphore", "path": "generators.py", "snippet": "async def generate_with_semaphore(generator: AsyncGenerator):\n \"\"\"Generate with a semaphore.\"\"\"\n async with generate_semaphore:\n if inspect.isasyncgenfunction:\n async for result in generator():\n yield result\n else:\n for result in generator():\n yield result" }, { "identifier": "ModelContainer", "path": "model.py", "snippet": "class ModelContainer:\n \"\"\"The model container class for ExLlamaV2 models.\"\"\"\n\n config: Optional[ExLlamaV2Config] = None\n draft_config: Optional[ExLlamaV2Config] = None\n model: Optional[ExLlamaV2] = None\n draft_model: Optional[ExLlamaV2] = None\n cache: Optional[ExLlamaV2Cache] = None\n draft_cache: Optional[ExLlamaV2Cache] = None\n tokenizer: Optional[ExLlamaV2Tokenizer] = None\n generator: Optional[ExLlamaV2StreamingGenerator] = None\n prompt_template: Optional[PromptTemplate] = None\n\n cache_fp8: bool = False\n gpu_split_auto: bool = True\n gpu_split: Optional[list] = None\n use_cfg: bool = False\n\n active_loras: List[ExLlamaV2Lora] = []\n\n def __init__(self, model_directory: pathlib.Path, quiet=False, **kwargs):\n \"\"\"\n Create model container\n\n Args:\n model_dir (int): Model directory containing config.json,\n tokenizer.model etc.\n quiet (bool): Suppress console output\n load_progress_callback (function, optional): A function to call for\n each module loaded. Prototype:\n def progress(loaded_modules: int, total_modules: int,\n loading_draft: bool)\n **kwargs:\n `cache_mode` (str): Sets cache mode, \"FP16\" or \"FP8\"\n (defaulf: \"FP16\")\n 'max_seq_len' (int): Override model's default max sequence\n length (default: 4096)\n 'rope_scale' (float): Set RoPE scaling factor for model\n (default: 1.0)\n 'rope_alpha' (float): Set RoPE alpha (NTK) factor for model\n (default: 1.0)\n 'prompt_template' (str): Manually sets the prompt template for\n this model (default: None)\n 'chunk_size' (int): Sets the maximum chunk size for the model\n (default: 2048)\n Inferencing in chunks reduces overall VRAM overhead by\n processing very long sequences in smaller batches. This\n limits the size of temporary buffers needed for the hidden\n state and attention weights.\n 'draft_model_dir' (str): Draft model directory\n 'draft_rope_scale' (float): Set RoPE scaling factor for draft\n model (default: 1.0)\n 'draft_rope_alpha' (float): RoPE alpha (NTK) factor for draft\n model. By default, the draft model's alpha value is\n calculated automatically to scale to the size of the\n full model.\n 'lora_dir' (str): LoRA directory\n 'loras' (list[dict]): List of loras to be loaded, consisting of\n 'name' and 'scaling'\n 'gpu_split_auto' (bool): Automatically split model across\n available devices (default: True)\n 'gpu_split' (list[float]): Allocation for weights and (some)\n tensors, per device\n 'no_flash_attn' (bool): Turns off flash attention\n (increases vram usage) (default: False)\n 'use_cfg\" (bool): Enables CFG support. Disables flash attention\n (default: False)\n \"\"\"\n\n self.quiet = quiet\n\n self.cache_fp8 = \"cache_mode\" in kwargs and kwargs[\"cache_mode\"] == \"FP8\"\n self.gpu_split = kwargs.get(\"gpu_split\")\n self.gpu_split_auto = unwrap(kwargs.get(\"gpu_split_auto\"), True)\n\n self.config = ExLlamaV2Config()\n self.config.model_dir = str(model_directory.resolve())\n\n # Make the max seq len 4096 before preparing the config\n # This is a better default than 2038\n self.config.max_seq_len = 4096\n self.config.prepare()\n\n # Then override the base_seq_len if present\n override_base_seq_len = kwargs.get(\"override_base_seq_len\")\n if override_base_seq_len:\n self.config.max_seq_len = override_base_seq_len\n\n # Grab the base model's sequence length before overrides for\n # rope calculations\n base_seq_len = self.config.max_seq_len\n\n # Set the target seq len if present\n target_max_seq_len = kwargs.get(\"max_seq_len\")\n if target_max_seq_len:\n self.config.max_seq_len = target_max_seq_len\n\n # Set the rope scale\n self.config.scale_pos_emb = unwrap(\n kwargs.get(\"rope_scale\"), self.config.scale_pos_emb\n )\n\n # Automatically calculate rope alpha\n self.config.scale_alpha_value = unwrap(\n kwargs.get(\"rope_alpha\"), self.calculate_rope_alpha(base_seq_len)\n )\n\n if hasattr(ExLlamaV2Sampler.Settings, \"cfg_scale\"):\n self.use_cfg = unwrap(kwargs.get(\"use_cfg\"), False)\n else:\n logger.warning(\n \"CFG is not supported by the currently installed ExLlamaV2 version.\"\n )\n\n # Turn off flash attention if CFG is on\n # Workaround until batched FA2 is fixed in exllamav2 upstream\n self.config.no_flash_attn = (\n True if self.use_cfg else unwrap(kwargs.get(\"no_flash_attention\"), False)\n )\n\n # low_mem is currently broken in exllamav2. Don't use it until it's\n # fixed.\n \"\"\"\n if \"low_mem\" in kwargs and kwargs[\"low_mem\"]:\n self.config.set_low_mem()\n \"\"\"\n\n # Set prompt template override if provided\n prompt_template_name = kwargs.get(\"prompt_template\")\n if prompt_template_name:\n logger.info(\"Loading prompt template with name \" f\"{prompt_template_name}\")\n # Read the template\n self.prompt_template = get_template_from_file(prompt_template_name)\n else:\n # Then try finding the template from the tokenizer_config.json\n self.prompt_template = get_template_from_model_json(\n pathlib.Path(self.config.model_dir) / \"tokenizer_config.json\",\n \"chat_template\",\n \"from_tokenizer_config\",\n )\n\n # Try finding the chat template from the model's config.json\n # TODO: This may not even be used with huggingface models,\n # mark for removal.\n if self.prompt_template is None:\n self.prompt_template = get_template_from_model_json(\n pathlib.Path(self.config.model_config),\n \"chat_template\",\n \"from_model_config\",\n )\n\n # If that fails, attempt fetching from model name\n if self.prompt_template is None:\n template_match = find_template_from_model(model_directory)\n if template_match:\n self.prompt_template = get_template_from_file(template_match)\n\n # Catch all for template lookup errors\n if self.prompt_template:\n logger.info(\n f\"Using template {self.prompt_template.name} \" \"for chat completions.\"\n )\n else:\n logger.warning(\n \"Chat completions are disabled because a prompt \"\n \"template wasn't provided or auto-detected.\"\n )\n\n # Set num of experts per token if provided\n num_experts_override = kwargs.get(\"num_experts_per_token\")\n if num_experts_override:\n if hasattr(self.config, \"num_experts_per_token\"):\n self.config.num_experts_per_token = num_experts_override\n else:\n logger.warning(\n \"MoE experts per token override is not \"\n \"supported by the current ExLlamaV2 version.\"\n )\n\n chunk_size = min(\n unwrap(kwargs.get(\"chunk_size\"), 2048), self.config.max_seq_len\n )\n self.config.max_input_len = chunk_size\n self.config.max_attn_size = chunk_size**2\n\n draft_args = unwrap(kwargs.get(\"draft\"), {})\n draft_model_name = draft_args.get(\"draft_model_name\")\n enable_draft = draft_args and draft_model_name\n\n # Always disable draft if params are incorrectly configured\n if draft_args and draft_model_name is None:\n logger.warning(\n \"Draft model is disabled because a model name \"\n \"wasn't provided. Please check your config.yml!\"\n )\n enable_draft = False\n\n if enable_draft:\n self.draft_config = ExLlamaV2Config()\n draft_model_path = pathlib.Path(\n unwrap(draft_args.get(\"draft_model_dir\"), \"models\")\n )\n draft_model_path = draft_model_path / draft_model_name\n\n self.draft_config.model_dir = str(draft_model_path.resolve())\n self.draft_config.prepare()\n\n self.draft_config.scale_pos_emb = unwrap(\n draft_args.get(\"draft_rope_scale\"), 1.0\n )\n\n # Automatically calculate draft rope alpha\n self.draft_config.scale_alpha_value = unwrap(\n draft_args.get(\"draft_rope_alpha\"),\n self.calculate_rope_alpha(self.draft_config.max_seq_len),\n )\n self.draft_config.max_seq_len = self.config.max_seq_len\n\n if \"chunk_size\" in kwargs:\n self.draft_config.max_input_len = kwargs[\"chunk_size\"]\n self.draft_config.max_attn_size = kwargs[\"chunk_size\"] ** 2\n\n def calculate_rope_alpha(self, base_seq_len):\n \"\"\"Calculate the rope alpha value for a given sequence length.\"\"\"\n ratio = self.config.max_seq_len / base_seq_len\n\n # Default to a 1 alpha if the sequence length is ever less\n # than or equal to 1\n if ratio <= 1.0:\n alpha = 1\n else:\n alpha = -0.13436 + 0.80541 * ratio + 0.28833 * ratio**2\n return alpha\n\n def get_model_path(self, is_draft: bool = False):\n \"\"\"Get the path for this model.\"\"\"\n model_path = pathlib.Path(\n self.draft_config.model_dir if is_draft else self.config.model_dir\n )\n return model_path\n\n def load(self, progress_callback=None):\n \"\"\"\n Load model\n\n Args:\n progress_callback (function, optional): A function to call for each\n module loaded. Prototype:\n def progress(loaded_modules: int, total_modules: int)\n \"\"\"\n for _ in self.load_gen(progress_callback):\n pass\n\n def load_loras(self, lora_directory: pathlib.Path, **kwargs):\n \"\"\"\n Load loras\n \"\"\"\n\n loras = unwrap(kwargs.get(\"loras\"), [])\n success: List[str] = []\n failure: List[str] = []\n\n for lora in loras:\n lora_name = lora.get(\"name\")\n lora_scaling = unwrap(lora.get(\"scaling\"), 1.0)\n\n if lora_name is None:\n logger.warning(\n \"One of your loras does not have a name. Please check your \"\n \"config.yml! Skipping lora load.\"\n )\n failure.append(lora_name)\n continue\n\n logger.info(f\"Loading lora: {lora_name} at scaling {lora_scaling}\")\n lora_path = lora_directory / lora_name\n # FIXME(alpin): Does self.model need to be passed here?\n self.active_loras.append(\n ExLlamaV2Lora.from_directory(self.model, lora_path, lora_scaling)\n )\n logger.info(f\"Lora successfully loaded: {lora_name}\")\n success.append(lora_name)\n\n # Return success and failure names\n return {\"success\": success, \"failure\": failure}\n\n def load_gen(self, progress_callback=None):\n \"\"\"\n Load model, generator function\n\n Args:\n progress_callback (function, optional): A function to call for each\n module loaded. Prototype:\n def progress(loaded_modules: int, total_modules: int)\n \"\"\"\n\n # Load tokenizer\n self.tokenizer = ExLlamaV2Tokenizer(self.config)\n\n # Load draft model if a config is present\n if self.draft_config:\n self.draft_model = ExLlamaV2(self.draft_config)\n if not self.quiet:\n logger.info(\"Loading draft model: \" + self.draft_config.model_dir)\n\n self.draft_cache = ExLlamaV2Cache(self.draft_model, lazy=True)\n reserve = [AUTO_SPLIT_RESERVE_BYTES] + [0] * 16\n yield from self.draft_model.load_autosplit_gen(\n self.draft_cache,\n reserve_vram=reserve,\n last_id_only=True,\n callback_gen=progress_callback,\n )\n\n # Test VRAM allocation with a full-length forward pass\n input_ids = torch.zeros((1, self.config.max_input_len), dtype=torch.long)\n self.draft_model.forward(input_ids, cache=self.cache, preprocess_only=True)\n\n # Load model\n self.model = ExLlamaV2(self.config)\n if not self.quiet:\n logger.info(\"Loading model: \" + self.config.model_dir)\n\n if not self.gpu_split_auto:\n for value in self.model.load_gen(\n self.gpu_split, callback_gen=progress_callback\n ):\n if isinstance(value, str):\n yield value\n\n batch_size = 2 if self.use_cfg else 1\n if self.cache_fp8:\n self.cache = ExLlamaV2Cache_8bit(\n self.model, lazy=self.gpu_split_auto, batch_size=batch_size\n )\n else:\n self.cache = ExLlamaV2Cache(\n self.model, lazy=self.gpu_split_auto, batch_size=batch_size\n )\n\n if self.gpu_split_auto:\n reserve = [AUTO_SPLIT_RESERVE_BYTES] + [0] * 16\n yield from self.model.load_autosplit_gen(\n self.cache,\n reserve_vram=reserve,\n last_id_only=True,\n callback_gen=progress_callback,\n )\n\n # Test VRAM allocation with a full-length forward pass\n input_ids = torch.zeros((1, self.config.max_input_len), dtype=torch.long)\n self.model.forward(input_ids, cache=self.cache, preprocess_only=True)\n\n # Create generator\n self.generator = ExLlamaV2StreamingGenerator(\n self.model,\n self.cache,\n self.tokenizer,\n self.draft_model,\n self.draft_cache,\n )\n\n logger.info(\"Model successfully loaded.\")\n\n def unload(self, loras_only: bool = False):\n \"\"\"\n Free all VRAM resources used by this model\n \"\"\"\n\n for lora in self.active_loras:\n lora.unload()\n\n self.active_loras = []\n\n # Unload the entire model if not just unloading loras\n if not loras_only:\n if self.model:\n self.model.unload()\n self.model = None\n\n if self.draft_model:\n self.draft_model.unload()\n self.draft_model = None\n\n self.config = None\n self.cache = None\n self.tokenizer = None\n self.generator = None\n\n gc.collect()\n torch.cuda.empty_cache()\n\n def get_tokens(self, text: Optional[str], ids: Optional[List[int]], **kwargs):\n \"\"\"Common function for token operations\"\"\"\n if text:\n # Assume token encoding\n return self.tokenizer.encode(\n text,\n add_bos=unwrap(kwargs.get(\"add_bos_token\"), True),\n encode_special_tokens=unwrap(kwargs.get(\"encode_special_tokens\"), True),\n )\n if ids:\n # Assume token decoding\n ids = torch.tensor([ids])\n return self.tokenizer.decode(\n ids,\n decode_special_tokens=unwrap(kwargs.get(\"decode_special_tokens\"), True),\n )[0]\n\n return None\n\n def get_special_tokens(self, add_bos_token: bool, ban_eos_token: bool):\n return {\n \"bos_token\": self.tokenizer.bos_token if add_bos_token else \"\",\n \"eos_token\": self.tokenizer.eos_token if not ban_eos_token else \"\",\n \"pad_token\": self.tokenizer.pad_token,\n \"unk_token\": self.tokenizer.unk_token,\n }\n\n def check_unsupported_settings(self, **kwargs):\n # Warn of unsupported settings if the setting is enabled\n if (unwrap(kwargs.get(\"mirostat\"), False)) and not hasattr(\n ExLlamaV2Sampler.Settings, \"mirostat\"\n ):\n logger.warning(\n \"Mirostat sampling is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"min_p\"), 0.0)) not in [0.0, 1.0] and not hasattr(\n ExLlamaV2Sampler.Settings, \"min_p\"\n ):\n logger.warning(\n \"Min-P sampling is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"tfs\"), 0.0)) not in [0.0, 1.0] and not hasattr(\n ExLlamaV2Sampler.Settings, \"tfs\"\n ):\n logger.warning(\n \"Tail-free sampling (TFS) is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"temperature_last\"), False)) and not hasattr(\n ExLlamaV2Sampler.Settings, \"temperature_last\"\n ):\n logger.warning(\n \"Temperature last is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"top_a\"), False)) and not hasattr(\n ExLlamaV2Sampler.Settings, \"top_a\"\n ):\n logger.warning(\n \"Top-A is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"presence_penalty\"), 0.0)) != 0.0 and not hasattr(\n ExLlamaV2Sampler.Settings, \"token_presence_penalty\"\n ):\n logger.warning(\n \"Presence penalty is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n def generate(self, prompt: str, **kwargs):\n \"\"\"Generate a response to a prompt\"\"\"\n generation = list(self.generate_gen(prompt, **kwargs))\n if generation:\n response = \"\".join(map(lambda chunk: chunk[0], generation))\n return response, generation[-1][1], generation[-1][2]\n\n return \"\", 0, 0\n\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n def generate_gen(self, prompt: str, **kwargs):\n \"\"\"\n Create generator function for prompt completion\n\n Args:\n prompt (str): Input prompt\n **kwargs:\n 'token_healing' (bool): Use token healing (default: False)\n 'temperature' (float): Sampling temperature (default: 1.0)\n 'temperature_last' (bool): Apply temperature after all other\n samplers (default: False)\n 'top_k' (int): Sampling top-K (default: 0)\n 'top_p' (float): Sampling top-P (default: 1.0)\n 'min_p' (float): Sampling min-P (default: 0.0)\n 'tfs' (float): Tail-free sampling (default: 0.0)\n 'typical' (float): Sampling typical (default: 0.0)\n 'mirostat' (bool): Use Mirostat (default: False)\n 'mirostat_tau' (float) Mirostat tau parameter (default: 1.5)\n 'mirostat_eta' (float) Mirostat eta parameter (default: 0.1)\n 'frequency_penalty' (float): Token frequency penalty (default: 0.0)\n 'presence_penalty' (float): Token presence penalty (default: 0.0)\n 'repetition_penalty' (float): Token repetition penalty\n (default: 1.15)\n 'penalty_range' (int): Penalty range\n (default: whole context)\n 'repetition_decay' (int): Repetition penalty range\n (default: same as range)\n 'stop' (List[Union[str, int]]): List of stop strings/tokens to\n end response (default: [EOS])\n 'max_tokens' (int): Max no. tokens in response (default: 150)\n 'add_bos_token' (bool): Adds the BOS token to the start of the\n prompt (default: True)\n 'ban_eos_token' (bool): Bans the EOS token from generation\n (default: False)\n 'logit_bias' (Dict[int, float]): Biases specific tokens to\n either show up more or less (default: None)\n 'stream_interval' (float): Interval in seconds between each\n output chunk (default: immediate)\n 'generate_window' (int): Space to reserve at the end of the\n model's context when generating. Rolls context window by\n the same amount if context length is exceeded to allow\n generating pastthe models max_seq_len.\n \"\"\"\n\n token_healing = unwrap(kwargs.get(\"token_healing\"), False)\n max_tokens = unwrap(kwargs.get(\"max_tokens\"), 150)\n stream_interval = unwrap(kwargs.get(\"stream_interval\"), 0)\n generate_window = min(unwrap(kwargs.get(\"generate_window\"), 512), max_tokens)\n\n # Sampler settings\n gen_settings = ExLlamaV2Sampler.Settings()\n\n self.check_unsupported_settings(**kwargs)\n\n # Apply settings\n gen_settings.temperature = unwrap(kwargs.get(\"temperature\"), 1.0)\n gen_settings.temperature_last = unwrap(kwargs.get(\"temperature_last\"), False)\n gen_settings.top_k = unwrap(kwargs.get(\"top_k\"), 0)\n gen_settings.top_p = unwrap(kwargs.get(\"top_p\"), 1.0)\n gen_settings.top_a = unwrap(kwargs.get(\"top_a\"), 0.0)\n gen_settings.min_p = unwrap(kwargs.get(\"min_p\"), 0.0)\n gen_settings.tfs = unwrap(kwargs.get(\"tfs\"), 1.0)\n gen_settings.typical = unwrap(kwargs.get(\"typical\"), 1.0)\n gen_settings.mirostat = unwrap(kwargs.get(\"mirostat\"), False)\n\n # Default tau and eta fallbacks don't matter if mirostat is off\n gen_settings.mirostat_tau = unwrap(kwargs.get(\"mirostat_tau\"), 1.5)\n gen_settings.mirostat_eta = unwrap(kwargs.get(\"mirostat_eta\"), 0.1)\n\n # Set CFG scale and negative prompt\n cfg_scale = unwrap(kwargs.get(\"cfg_scale\"), 1.0)\n negative_prompt = None\n if cfg_scale not in [None, 1.0]:\n if self.use_cfg:\n gen_settings.cfg_scale = cfg_scale\n\n # If the negative prompt is empty, use the BOS token\n negative_prompt = unwrap(\n kwargs.get(\"negative_prompt\"), self.tokenizer.bos_token\n )\n else:\n logger.warn(\n \"CFG is currently disabled. \"\n + \"Please reload your model with use_cfg = True.\",\n )\n\n gen_settings.token_presence_penalty = unwrap(\n kwargs.get(\"presence_penalty\"), 0.0\n )\n gen_settings.token_repetition_penalty = unwrap(\n kwargs.get(\"repetition_penalty\"), 1.0\n )\n\n # Applies for all penalties despite being called token_repetition_range\n gen_settings.token_repetition_range = unwrap(\n kwargs.get(\"penalty_range\"), self.config.max_seq_len\n )\n auto_scale_penalty_range = False\n\n frequency_penalty = unwrap(kwargs.get(\"frequency_penalty\"), 0.0)\n if hasattr(gen_settings, \"token_frequency_penalty\"):\n gen_settings.token_frequency_penalty = frequency_penalty\n\n # Dynamically scale penalty range to output tokens\n # Only do this if freq/pres pen is enabled\n # and the repetition range is -1\n auto_scale_penalty_range = (\n gen_settings.token_frequency_penalty != 0\n or gen_settings.token_presence_penalty != 0\n ) and gen_settings.token_repetition_range == -1\n elif frequency_penalty != 0.0:\n logger.warning(\n \"Frequency penalty is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n # Override the repetition penalty value if it isn't set already\n # if the user is on an older exl2 version\n if unwrap(gen_settings.token_repetition_penalty, 1.0) == 1.0:\n gen_settings.token_repetition_penalty = frequency_penalty\n logger.warning(\"Setting this value to repetition penalty instead.\")\n\n # Always make sure the fallback is 0 if range < 0\n # It's technically fine to use -1, but this just validates the passed\n # fallback\n # Always default to 0 if something goes wrong\n if gen_settings.token_repetition_range < 0:\n fallback_decay = 0\n else:\n fallback_decay = gen_settings.token_repetition_range\n gen_settings.token_repetition_decay = coalesce(\n kwargs.get(\"repetition_decay\"), fallback_decay, 0\n )\n\n stop_conditions: List[Union[str, int]] = unwrap(kwargs.get(\"stop\"), [])\n add_bos_token = unwrap(kwargs.get(\"add_bos_token\"), True)\n ban_eos_token = unwrap(kwargs.get(\"ban_eos_token\"), False)\n logit_bias = kwargs.get(\"logit_bias\")\n\n # Override sampler settings for temp = 0\n if gen_settings.temperature == 0:\n gen_settings.temperature = 1.0\n gen_settings.top_k = 1\n gen_settings.top_p = 0\n gen_settings.typical = 0\n\n # Log generation options to console\n # Some options are too large, so log the args instead\n log_generation_params(\n max_tokens=max_tokens,\n **vars(gen_settings),\n token_healing=token_healing,\n auto_scale_penalty_range=auto_scale_penalty_range,\n add_bos_token=add_bos_token,\n ban_eos_token=ban_eos_token,\n stop_conditions=stop_conditions,\n logit_bias=logit_bias,\n )\n\n # Log prompt to console\n log_prompt(prompt, negative_prompt)\n\n # Set logit bias\n if logit_bias:\n # Create a vocab tensor if it doesn't exist for token biasing\n if gen_settings.token_bias is None:\n padding = -self.tokenizer.config.vocab_size % 32\n gen_settings.token_bias = torch.zeros(\n (self.tokenizer.config.vocab_size + padding,),\n dtype=torch.float,\n )\n\n # Map logits to the tensor with their biases\n for token, bias in logit_bias.items():\n gen_settings.token_bias[token] = bias\n\n # Ban the EOS token if specified. If not, append to stop conditions\n # as well.\n # Set this below logging to avoid polluting the stop strings array\n if ban_eos_token:\n gen_settings.disallow_tokens(self.tokenizer, [self.tokenizer.eos_token_id])\n else:\n stop_conditions.append(self.tokenizer.eos_token_id)\n\n # Stop conditions\n self.generator.set_stop_conditions(stop_conditions)\n\n # Tokenized context\n ids, offsets = self.tokenizer.encode(\n [prompt, negative_prompt]\n if negative_prompt and gen_settings.cfg_scale not in [None, 1.0]\n else prompt,\n add_bos=add_bos_token,\n encode_special_tokens=True,\n return_offsets=True,\n )\n mask = (\n self.tokenizer.padding_mask(ids)\n if self.use_cfg and gen_settings.cfg_scale not in [None, 1.0]\n else None\n )\n context_len = len(ids[0])\n\n if context_len > self.config.max_seq_len:\n logger.warning(\n f\"Context length {context_len} is greater than max_seq_len \"\n f\"{self.config.max_seq_len}. Generation is truncated and \"\n \"metrics may not be accurate.\"\n )\n\n prompt_tokens = ids.shape[-1]\n\n # Begin\n generated_tokens = 0\n full_response = \"\"\n start_time = time.time()\n last_chunk_time = start_time\n\n save_tokens = torch.empty((ids.shape[0], 0), dtype=torch.bool)\n chunk_buffer = \"\"\n chunk_tokens = 0\n\n while True:\n # Ingest prompt\n if chunk_tokens == 0:\n ids = torch.cat((ids, save_tokens), dim=-1)\n save_tokens = torch.empty((ids.shape[0], 0), dtype=torch.bool)\n overflow = ids.shape[-1] + generate_window - self.config.max_seq_len\n active_ids = ids[:, max(0, overflow) :]\n chunk_tokens = self.config.max_seq_len - active_ids.shape[-1]\n\n # Split for exllama versions that have CFG\n if self.use_cfg:\n self.generator.begin_stream(\n active_ids,\n gen_settings,\n token_healing=token_healing,\n loras=self.active_loras,\n input_mask=mask,\n position_offsets=offsets,\n )\n else:\n self.generator.begin_stream(\n active_ids,\n gen_settings,\n token_healing=token_healing,\n loras=self.active_loras,\n )\n\n # Reset offsets for subsequent passes if the context is truncated\n offsets = None\n\n if auto_scale_penalty_range:\n gen_settings.token_repetition_range = generated_tokens\n\n # Generate\n chunk, eos, tokens = self.generator.stream()\n\n if token_healing:\n # Extract healed token\n ids[:, -1] = self.generator.sequence_ids[:, -2]\n token_healing = False\n\n save_tokens = torch.cat(\n (save_tokens, tokens.expand(save_tokens.shape[0], -1)), dim=-1\n )\n chunk_buffer += chunk\n\n generated_tokens += 1\n chunk_tokens -= 1\n\n # Yield output\n now = time.time()\n elapsed = now - last_chunk_time\n\n if chunk_buffer != \"\" and (\n elapsed > stream_interval or eos or generated_tokens == max_tokens\n ):\n yield chunk_buffer, prompt_tokens, generated_tokens\n full_response += chunk_buffer\n chunk_buffer = \"\"\n last_chunk_time = now\n\n if eos or generated_tokens == max_tokens:\n break\n\n # Print response\n log_response(full_response)\n\n elapsed_time = last_chunk_time - start_time\n\n initial_response = (\n f\"Metrics: {generated_tokens} tokens generated in \"\n f\"{round(elapsed_time, 2)} seconds\"\n )\n itemization = []\n extra_parts = []\n\n # Add tokens per second\n tokens_per_second = (\n \"Indeterminate\"\n if elapsed_time == 0\n else round(generated_tokens / elapsed_time, 2)\n )\n itemization.append(f\"{tokens_per_second} T/s\")\n\n # Add context (original token count)\n if ids is not None:\n itemization.append(f\"context {context_len} tokens\")\n\n if context_len > self.config.max_seq_len:\n extra_parts.append(\"<-- Not accurate (truncated)\")\n\n # Print output\n logger.info(\n initial_response\n + \" (\"\n + \", \".join(itemization)\n + \") \"\n + \" \".join(extra_parts)\n )" }, { "identifier": "CompletionRequest", "path": "OAI/types/completion.py", "snippet": "class CompletionRequest(CommonCompletionRequest):\n \"\"\"Represents a completion request.\"\"\"\n\n # Prompt can also contain token ids, but that's out of scope\n # for this project.\n prompt: Union[str, List[str]]" }, { "identifier": "ChatCompletionRequest", "path": "OAI/types/chat_completion.py", "snippet": "class ChatCompletionRequest(CommonCompletionRequest):\n # Messages\n # Take in a string as well even though it's not part of the OAI spec\n messages: Union[str, List[Dict[str, str]]]\n prompt_template: Optional[str] = None\n add_generation_prompt: Optional[bool] = True" }, { "identifier": "LoraCard", "path": "OAI/types/lora.py", "snippet": "class LoraCard(BaseModel):\n \"\"\"Represents a single Lora card.\"\"\"\n\n id: str = \"test\"\n object: str = \"lora\"\n created: int = Field(default_factory=lambda: int(time()))\n owned_by: str = \"tabbyAPI\"\n scaling: Optional[float] = None" }, { "identifier": "LoraList", "path": "OAI/types/lora.py", "snippet": "class LoraList(BaseModel):\n \"\"\"Represents a list of Lora cards.\"\"\"\n\n object: str = \"list\"\n data: List[LoraCard] = Field(default_factory=list)" }, { "identifier": "LoraLoadRequest", "path": "OAI/types/lora.py", "snippet": "class LoraLoadRequest(BaseModel):\n \"\"\"Represents a Lora load request.\"\"\"\n\n loras: List[LoraLoadInfo]" }, { "identifier": "LoraLoadResponse", "path": "OAI/types/lora.py", "snippet": "class LoraLoadResponse(BaseModel):\n \"\"\"Represents a Lora load response.\"\"\"\n\n success: List[str] = Field(default_factory=list)\n failure: List[str] = Field(default_factory=list)" }, { "identifier": "ModelCard", "path": "OAI/types/model.py", "snippet": "class ModelCard(BaseModel):\n \"\"\"Represents a single model card.\"\"\"\n\n id: str = \"test\"\n object: str = \"model\"\n created: int = Field(default_factory=lambda: int(time()))\n owned_by: str = \"tabbyAPI\"\n logging: Optional[LogPreferences] = None\n parameters: Optional[ModelCardParameters] = None" }, { "identifier": "ModelLoadRequest", "path": "OAI/types/model.py", "snippet": "class ModelLoadRequest(BaseModel):\n \"\"\"Represents a model load request.\"\"\"\n\n name: str\n\n # Max seq len is fetched from config.json of the model by default\n max_seq_len: Optional[int] = Field(\n description=\"Leave this blank to use the model's base sequence length\",\n default=None,\n examples=[4096],\n )\n override_base_seq_len: Optional[int] = Field(\n description=(\n \"Overrides the model's base sequence length. \" \"Leave blank if unsure\"\n ),\n default=None,\n examples=[4096],\n )\n gpu_split_auto: Optional[bool] = True\n gpu_split: Optional[List[float]] = Field(\n default_factory=list, examples=[[24.0, 20.0]]\n )\n rope_scale: Optional[float] = Field(\n description=\"Automatically pulled from the model's config if not present\",\n default=None,\n examples=[1.0],\n )\n rope_alpha: Optional[float] = Field(\n description=\"Automatically calculated if not present\",\n default=None,\n examples=[1.0],\n )\n no_flash_attention: Optional[bool] = False\n # low_mem: Optional[bool] = False\n cache_mode: Optional[str] = \"FP16\"\n prompt_template: Optional[str] = None\n num_experts_per_token: Optional[int] = None\n use_cfg: Optional[bool] = None\n draft: Optional[DraftModelLoadRequest] = None" }, { "identifier": "ModelLoadResponse", "path": "OAI/types/model.py", "snippet": "class ModelLoadResponse(BaseModel):\n \"\"\"Represents a model load response.\"\"\"\n\n # Avoids pydantic namespace warning\n model_config = ConfigDict(protected_namespaces=[])\n\n model_type: str = \"model\"\n module: int\n modules: int\n status: str" }, { "identifier": "ModelCardParameters", "path": "OAI/types/model.py", "snippet": "class ModelCardParameters(BaseModel):\n \"\"\"Represents model card parameters.\"\"\"\n\n # Safe to do this since it's guaranteed to fetch a max seq len\n # from model_container\n max_seq_len: Optional[int] = None\n rope_scale: Optional[float] = 1.0\n rope_alpha: Optional[float] = 1.0\n cache_mode: Optional[str] = \"FP16\"\n prompt_template: Optional[str] = None\n num_experts_per_token: Optional[int] = None\n use_cfg: Optional[bool] = None\n draft: Optional[\"ModelCard\"] = None" }, { "identifier": "TemplateList", "path": "OAI/types/template.py", "snippet": "class TemplateList(BaseModel):\n \"\"\"Represents a list of templates.\"\"\"\n\n object: str = \"list\"\n data: List[str] = Field(default_factory=list)" }, { "identifier": "TokenEncodeRequest", "path": "OAI/types/token.py", "snippet": "class TokenEncodeRequest(CommonTokenRequest):\n \"\"\"Represents a tokenization request.\"\"\"\n\n text: str" }, { "identifier": "TokenEncodeResponse", "path": "OAI/types/token.py", "snippet": "class TokenEncodeResponse(BaseModel):\n \"\"\"Represents a tokenization response.\"\"\"\n\n tokens: List[int]\n length: int" }, { "identifier": "TokenDecodeRequest", "path": "OAI/types/token.py", "snippet": "class TokenDecodeRequest(CommonTokenRequest):\n \"\"\" \" Represents a detokenization request.\"\"\"\n\n tokens: List[int]" }, { "identifier": "TokenDecodeResponse", "path": "OAI/types/token.py", "snippet": "class TokenDecodeResponse(BaseModel):\n \"\"\"Represents a detokenization response.\"\"\"\n\n text: str" }, { "identifier": "create_completion_response", "path": "OAI/utils_oai.py", "snippet": "def create_completion_response(\n text: str,\n prompt_tokens: int,\n completion_tokens: int,\n model_name: Optional[str],\n):\n \"\"\"Create a completion response from the provided text.\"\"\"\n choice = CompletionRespChoice(finish_reason=\"Generated\", text=text)\n\n response = CompletionResponse(\n choices=[choice],\n model=unwrap(model_name, \"\"),\n usage=UsageStats(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n ),\n )\n\n return response" }, { "identifier": "get_model_list", "path": "OAI/utils_oai.py", "snippet": "def get_model_list(model_path: pathlib.Path, draft_model_path: Optional[str] = None):\n \"\"\"Get the list of models from the provided path.\"\"\"\n\n # Convert the provided draft model path to a pathlib path for\n # equality comparisons\n if draft_model_path:\n draft_model_path = pathlib.Path(draft_model_path).resolve()\n\n model_card_list = ModelList()\n for path in model_path.iterdir():\n # Don't include the draft models path\n if path.is_dir() and path != draft_model_path:\n model_card = ModelCard(id=path.name)\n model_card_list.data.append(model_card) # pylint: disable=no-member\n\n return model_card_list" }, { "identifier": "get_lora_list", "path": "OAI/utils_oai.py", "snippet": "def get_lora_list(lora_path: pathlib.Path):\n \"\"\"Get the list of Lora cards from the provided path.\"\"\"\n lora_list = LoraList()\n for path in lora_path.iterdir():\n if path.is_dir():\n lora_card = LoraCard(id=path.name)\n lora_list.data.append(lora_card) # pylint: disable=no-member\n\n return lora_list" }, { "identifier": "create_chat_completion_response", "path": "OAI/utils_oai.py", "snippet": "def create_chat_completion_response(\n text: str,\n prompt_tokens: int,\n completion_tokens: int,\n model_name: Optional[str],\n):\n \"\"\"Create a chat completion response from the provided text.\"\"\"\n message = ChatCompletionMessage(role=\"assistant\", content=text)\n\n choice = ChatCompletionRespChoice(finish_reason=\"Generated\", message=message)\n\n response = ChatCompletionResponse(\n choices=[choice],\n model=unwrap(model_name, \"\"),\n usage=UsageStats(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n ),\n )\n\n return response" }, { "identifier": "create_chat_completion_stream_chunk", "path": "OAI/utils_oai.py", "snippet": "def create_chat_completion_stream_chunk(\n const_id: str,\n text: Optional[str] = None,\n model_name: Optional[str] = None,\n finish_reason: Optional[str] = None,\n):\n \"\"\"Create a chat completion stream chunk from the provided text.\"\"\"\n if finish_reason:\n message = {}\n else:\n message = ChatCompletionMessage(role=\"assistant\", content=text)\n\n # The finish reason can be None\n choice = ChatCompletionStreamChoice(finish_reason=finish_reason, delta=message)\n\n chunk = ChatCompletionStreamChunk(\n id=const_id, choices=[choice], model=unwrap(model_name, \"\")\n )\n\n return chunk" }, { "identifier": "get_all_templates", "path": "templating.py", "snippet": "def get_all_templates():\n \"\"\"Fetches all templates from the templates directory\"\"\"\n\n template_directory = pathlib.Path(\"templates\")\n return template_directory.glob(\"*.jinja\")" }, { "identifier": "get_prompt_from_template", "path": "templating.py", "snippet": "def get_prompt_from_template(\n messages,\n prompt_template: PromptTemplate,\n add_generation_prompt: bool,\n special_tokens: Optional[Dict[str, str]] = None,\n):\n \"\"\"Get a prompt from a template and a list of messages.\"\"\"\n if version.parse(package_version(\"jinja2\")) < version.parse(\"3.0.0\"):\n raise ImportError(\n \"Parsing these chat completion messages requires jinja2 3.0.0 \"\n f\"or greater. Current version: {package_version('jinja2')}\\n\"\n \"Please upgrade jinja by running the following command: \"\n \"pip install --upgrade jinja2\"\n )\n\n compiled_template = _compile_template(prompt_template.template)\n return compiled_template.render(\n messages=messages,\n add_generation_prompt=add_generation_prompt,\n **special_tokens,\n )" }, { "identifier": "get_generator_error", "path": "utils.py", "snippet": "def get_generator_error(message: str):\n \"\"\"Get a generator error.\"\"\"\n error_message = TabbyGeneratorErrorMessage(\n message=message, trace=traceback.format_exc()\n )\n\n generator_error = TabbyGeneratorError(error=error_message)\n\n # Log and send the exception\n logger.error(generator_error.error.message)\n return get_sse_packet(generator_error.model_dump_json())" }, { "identifier": "get_sse_packet", "path": "utils.py", "snippet": "def get_sse_packet(json_data: str):\n \"\"\"Get an SSE packet.\"\"\"\n return f\"data: {json_data}\\n\\n\"" }, { "identifier": "load_progress", "path": "utils.py", "snippet": "def load_progress(module, modules):\n \"\"\"Wrapper callback for load progress.\"\"\"\n yield module, modules" }, { "identifier": "unwrap", "path": "utils.py", "snippet": "def unwrap(wrapped, default=None):\n \"\"\"Unwrap function for Optionals.\"\"\"\n if wrapped is None:\n return default\n\n return wrapped" }, { "identifier": "init_logger", "path": "logger.py", "snippet": "def init_logger(name: str):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(_default_handler)\n logger.propagate = False\n return logger" } ]
import pathlib import uvicorn import gen_logging from asyncio import CancelledError from typing import Optional from uuid import uuid4 from jinja2 import TemplateError from fastapi import FastAPI, Depends, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse from functools import partial from progress.bar import IncrementalBar from args import convert_args_to_dict, init_argparser from auth import check_admin_key, check_api_key, load_auth_keys from config import ( override_config_from_args, read_config_from_file, get_gen_logging_config, get_model_config, get_draft_model_config, get_lora_config, get_network_config, ) from generators import call_with_semaphore, generate_with_semaphore from model import ModelContainer from OAI.types.completion import CompletionRequest from OAI.types.chat_completion import ChatCompletionRequest from OAI.types.lora import LoraCard, LoraList, LoraLoadRequest, LoraLoadResponse from OAI.types.model import ( ModelCard, ModelLoadRequest, ModelLoadResponse, ModelCardParameters, ) from OAI.types.template import TemplateList from OAI.types.token import ( TokenEncodeRequest, TokenEncodeResponse, TokenDecodeRequest, TokenDecodeResponse, ) from OAI.utils_oai import ( create_completion_response, get_model_list, get_lora_list, create_chat_completion_response, create_chat_completion_stream_chunk, ) from templating import get_all_templates, get_prompt_from_template from utils import get_generator_error, get_sse_packet, load_progress, unwrap from logger import init_logger
12,983
"""The main tabbyAPI module. Contains the FastAPI server and endpoints.""" logger = init_logger(__name__) app = FastAPI( title="TabbyAPI", summary="An OAI compatible exllamav2 API that's both lightweight and fast", description=( "This docs page is not meant to send requests! Please use a service " "like Postman or a frontend UI." ), ) # Globally scoped variables. Undefined until initalized in main MODEL_CONTAINER: Optional[ModelContainer] = None def _check_model_container(): if MODEL_CONTAINER is None or MODEL_CONTAINER.model is None: raise HTTPException(400, "No models are loaded.") # ALlow CORS requests app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Model list endpoint @app.get("/v1/models", dependencies=[Depends(check_api_key)]) @app.get("/v1/model/list", dependencies=[Depends(check_api_key)]) async def list_models(): """Lists all models in the model directory.""" model_config = get_model_config() model_dir = unwrap(model_config.get("model_dir"), "models") model_path = pathlib.Path(model_dir) draft_model_dir = get_draft_model_config().get("draft_model_dir") models = get_model_list(model_path.resolve(), draft_model_dir) if unwrap(model_config.get("use_dummy_models"), False): models.data.insert(0, ModelCard(id="gpt-3.5-turbo")) return models # Currently loaded model endpoint @app.get( "/v1/model", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) @app.get( "/v1/internal/model/info", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def get_current_model(): """Returns the currently loaded model.""" model_name = MODEL_CONTAINER.get_model_path().name prompt_template = MODEL_CONTAINER.prompt_template model_card = ModelCard( id=model_name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.config.max_seq_len, cache_mode="FP8" if MODEL_CONTAINER.cache_fp8 else "FP16", prompt_template=prompt_template.name if prompt_template else None, num_experts_per_token=MODEL_CONTAINER.config.num_experts_per_token, use_cfg=MODEL_CONTAINER.use_cfg, ), logging=gen_logging.PREFERENCES, ) if MODEL_CONTAINER.draft_config: draft_card = ModelCard( id=MODEL_CONTAINER.get_model_path(True).name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.draft_config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.draft_config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.draft_config.max_seq_len, ), ) model_card.parameters.draft = draft_card return model_card @app.get("/v1/model/draft/list", dependencies=[Depends(check_api_key)]) async def list_draft_models(): """Lists all draft models in the model directory.""" draft_model_dir = unwrap(get_draft_model_config().get("draft_model_dir"), "models") draft_model_path = pathlib.Path(draft_model_dir) models = get_model_list(draft_model_path.resolve()) return models # Load model endpoint @app.post("/v1/model/load", dependencies=[Depends(check_admin_key)])
"""The main tabbyAPI module. Contains the FastAPI server and endpoints.""" logger = init_logger(__name__) app = FastAPI( title="TabbyAPI", summary="An OAI compatible exllamav2 API that's both lightweight and fast", description=( "This docs page is not meant to send requests! Please use a service " "like Postman or a frontend UI." ), ) # Globally scoped variables. Undefined until initalized in main MODEL_CONTAINER: Optional[ModelContainer] = None def _check_model_container(): if MODEL_CONTAINER is None or MODEL_CONTAINER.model is None: raise HTTPException(400, "No models are loaded.") # ALlow CORS requests app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Model list endpoint @app.get("/v1/models", dependencies=[Depends(check_api_key)]) @app.get("/v1/model/list", dependencies=[Depends(check_api_key)]) async def list_models(): """Lists all models in the model directory.""" model_config = get_model_config() model_dir = unwrap(model_config.get("model_dir"), "models") model_path = pathlib.Path(model_dir) draft_model_dir = get_draft_model_config().get("draft_model_dir") models = get_model_list(model_path.resolve(), draft_model_dir) if unwrap(model_config.get("use_dummy_models"), False): models.data.insert(0, ModelCard(id="gpt-3.5-turbo")) return models # Currently loaded model endpoint @app.get( "/v1/model", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) @app.get( "/v1/internal/model/info", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def get_current_model(): """Returns the currently loaded model.""" model_name = MODEL_CONTAINER.get_model_path().name prompt_template = MODEL_CONTAINER.prompt_template model_card = ModelCard( id=model_name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.config.max_seq_len, cache_mode="FP8" if MODEL_CONTAINER.cache_fp8 else "FP16", prompt_template=prompt_template.name if prompt_template else None, num_experts_per_token=MODEL_CONTAINER.config.num_experts_per_token, use_cfg=MODEL_CONTAINER.use_cfg, ), logging=gen_logging.PREFERENCES, ) if MODEL_CONTAINER.draft_config: draft_card = ModelCard( id=MODEL_CONTAINER.get_model_path(True).name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.draft_config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.draft_config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.draft_config.max_seq_len, ), ) model_card.parameters.draft = draft_card return model_card @app.get("/v1/model/draft/list", dependencies=[Depends(check_api_key)]) async def list_draft_models(): """Lists all draft models in the model directory.""" draft_model_dir = unwrap(get_draft_model_config().get("draft_model_dir"), "models") draft_model_path = pathlib.Path(draft_model_dir) models = get_model_list(draft_model_path.resolve()) return models # Load model endpoint @app.post("/v1/model/load", dependencies=[Depends(check_admin_key)])
async def load_model(request: Request, data: ModelLoadRequest):
22
2023-11-10 05:54:02+00:00
16k
ShipBit/wingman-ai
services/tower.py
[ { "identifier": "MissingApiKeyException", "path": "exceptions.py", "snippet": "class MissingApiKeyException(Exception):\n pass" }, { "identifier": "OpenAiWingman", "path": "wingmen/open_ai_wingman.py", "snippet": "class OpenAiWingman(Wingman):\n \"\"\"Our OpenAI Wingman base gives you everything you need to interact with OpenAI's various APIs.\n\n It transcribes speech to text using Whisper, uses the Completion API for conversation and implements the Tools API to execute functions.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n config: dict[str, any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n ):\n super().__init__(\n name=name,\n config=config,\n secret_keeper=secret_keeper,\n app_root_dir=app_root_dir,\n )\n\n self.openai: OpenAi = None # validate will set this\n \"\"\"Our OpenAI API wrapper\"\"\"\n\n # every conversation starts with the \"context\" that the user has configured\n self.messages = [\n {\"role\": \"system\", \"content\": self.config[\"openai\"].get(\"context\")}\n ]\n \"\"\"The conversation history that is used for the GPT calls\"\"\"\n\n self.edge_tts = EdgeTTS(app_root_dir)\n self.last_transcript_locale = None\n self.elevenlabs_api_key = None\n self.azure_keys = {\n \"tts\": None,\n \"whisper\": None,\n \"conversation\": None,\n \"summarize\": None,\n }\n self.stt_provider = self.config[\"features\"].get(\"stt_provider\", None)\n self.conversation_provider = self.config[\"features\"].get(\n \"conversation_provider\", None\n )\n self.summarize_provider = self.config[\"features\"].get(\n \"summarize_provider\", None\n )\n\n def validate(self):\n errors = super().validate()\n openai_api_key = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"openai\",\n friendly_key_name=\"OpenAI API key\",\n prompt_if_missing=True,\n )\n if not openai_api_key:\n errors.append(\n \"Missing 'openai' API key. Please provide a valid key in the settings.\"\n )\n else:\n openai_organization = self.config[\"openai\"].get(\"organization\")\n openai_base_url = self.config[\"openai\"].get(\"base_url\")\n self.openai = OpenAi(openai_api_key, openai_organization, openai_base_url)\n\n self.__validate_elevenlabs_config(errors)\n\n self.__validate_azure_config(errors)\n\n return errors\n\n def __validate_elevenlabs_config(self, errors):\n if self.tts_provider == \"elevenlabs\":\n self.elevenlabs_api_key = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"elevenlabs\",\n friendly_key_name=\"Elevenlabs API key\",\n prompt_if_missing=True,\n )\n if not self.elevenlabs_api_key:\n errors.append(\n \"Missing 'elevenlabs' API key. Please provide a valid key in the settings or use another tts_provider.\"\n )\n return\n elevenlabs_settings = self.config.get(\"elevenlabs\")\n if not elevenlabs_settings:\n errors.append(\n \"Missing 'elevenlabs' section in config. Please provide a valid config or change the TTS provider.\"\n )\n return\n if not elevenlabs_settings.get(\"model\"):\n errors.append(\"Missing 'model' setting in 'elevenlabs' config.\")\n return\n voice_settings = elevenlabs_settings.get(\"voice\")\n if not voice_settings:\n errors.append(\n \"Missing 'voice' section in 'elevenlabs' config. Please provide a voice configuration as shown in our example config.\"\n )\n return\n if not voice_settings.get(\"id\") and not voice_settings.get(\"name\"):\n errors.append(\n \"Missing 'id' or 'name' in 'voice' section of 'elevenlabs' config. Please provide a valid name or id for the voice in your config.\"\n )\n\n def __validate_azure_config(self, errors):\n if (\n self.tts_provider == \"azure\"\n or self.stt_provider == \"azure\"\n or self.conversation_provider == \"azure\"\n or self.summarize_provider == \"azure\"\n ):\n azure_settings = self.config.get(\"azure\")\n if not azure_settings:\n errors.append(\n \"Missing 'azure' section in config. Please provide a valid config.\"\n )\n return\n\n if self.tts_provider == \"azure\":\n self.azure_keys[\"tts\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_tts\",\n friendly_key_name=\"Azure TTS API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"tts\"]:\n errors.append(\n \"Missing 'azure' tts API key. Please provide a valid key in the settings.\"\n )\n return\n\n if self.stt_provider == \"azure\":\n self.azure_keys[\"whisper\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_whisper\",\n friendly_key_name=\"Azure Whisper API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"whisper\"]:\n errors.append(\n \"Missing 'azure' whisper API key. Please provide a valid key in the settings.\"\n )\n return\n\n if self.conversation_provider == \"azure\":\n self.azure_keys[\"conversation\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_conversation\",\n friendly_key_name=\"Azure Conversation API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"conversation\"]:\n errors.append(\n \"Missing 'azure' conversation API key. Please provide a valid key in the settings.\"\n )\n return\n\n if self.summarize_provider == \"azure\":\n self.azure_keys[\"summarize\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_summarize\",\n friendly_key_name=\"Azure Summarize API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"summarize\"]:\n errors.append(\n \"Missing 'azure' summarize API key. Please provide a valid key in the settings.\"\n )\n return\n\n async def _transcribe(self, audio_input_wav: str) -> tuple[str | None, str | None]:\n \"\"\"Transcribes the recorded audio to text using the OpenAI Whisper API.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Returns:\n str | None: The transcript of the audio file or None if the transcription failed.\n \"\"\"\n detect_language = self.config[\"edge_tts\"].get(\"detect_language\")\n\n response_format = (\n \"verbose_json\" # verbose_json will return the language detected in the transcript.\n if self.tts_provider == \"edge_tts\" and detect_language\n else \"json\"\n )\n\n azure_config = None\n if self.stt_provider == \"azure\":\n azure_config = self._get_azure_config(\"whisper\")\n\n transcript = self.openai.transcribe(\n audio_input_wav, response_format=response_format, azure_config=azure_config\n )\n\n locale = None\n # skip the GPT call if we didn't change the language\n if (\n response_format == \"verbose_json\"\n and transcript\n and transcript.language != self.last_transcript_locale # type: ignore\n ):\n printr.print(\n f\" EdgeTTS detected language '{transcript.language}'.\", tags=\"info\" # type: ignore\n )\n locale = self.__ask_gpt_for_locale(transcript.language) # type: ignore\n\n return transcript.text if transcript else None, locale\n\n def _get_azure_config(self, section: str):\n azure_api_key = self.azure_keys[section]\n azure_config = AzureConfig(\n api_key=azure_api_key,\n api_base_url=self.config[\"azure\"]\n .get(section, {})\n .get(\"api_base_url\", None),\n api_version=self.config[\"azure\"].get(section, {}).get(\"api_version\", None),\n deployment_name=self.config[\"azure\"]\n .get(section, {})\n .get(\"deployment_name\", None),\n )\n\n return azure_config\n\n async def _get_response_for_transcript(\n self, transcript: str, locale: str | None\n ) -> tuple[str, str]:\n \"\"\"Gets the response for a given transcript.\n\n This function interprets the transcript, runs instant commands if triggered,\n calls the OpenAI API when needed, processes any tool calls, and generates the final response.\n\n Args:\n transcript (str): The user's spoken text transcribed.\n\n Returns:\n A tuple of strings representing the response to a function call and an instant response.\n \"\"\"\n self.last_transcript_locale = locale\n self._add_user_message(transcript)\n\n instant_response = self._try_instant_activation(transcript)\n if instant_response:\n return instant_response, instant_response\n\n completion = self._gpt_call()\n\n if completion is None:\n return None, None\n\n response_message, tool_calls = self._process_completion(completion)\n\n # do not tamper with this message as it will lead to 400 errors!\n self.messages.append(response_message)\n\n if tool_calls:\n instant_response = await self._handle_tool_calls(tool_calls)\n if instant_response:\n return None, instant_response\n\n summarize_response = self._summarize_function_calls()\n return self._finalize_response(str(summarize_response))\n\n return response_message.content, response_message.content\n\n def _add_user_message(self, content: str):\n \"\"\"Shortens the conversation history if needed and adds a user message to it.\n\n Args:\n content (str): The message content to add.\n role (str): The role of the message sender (\"user\", \"assistant\", \"function\" or \"tool\").\n tool_call_id (Optional[str]): The identifier for the tool call, if applicable.\n name (Optional[str]): The name of the function associated with the tool call, if applicable.\n \"\"\"\n msg = {\"role\": \"user\", \"content\": content}\n self._cleanup_conversation_history()\n self.messages.append(msg)\n\n def _cleanup_conversation_history(self):\n \"\"\"Cleans up the conversation history by removing messages that are too old.\"\"\"\n remember_messages = self.config.get(\"features\", {}).get(\n \"remember_messages\", None\n )\n\n if remember_messages is None or len(self.messages) == 0:\n return 0 # Configuration not set, nothing to delete.\n\n # The system message aka `context` does not count\n context_offset = (\n 1 if self.messages and self.messages[0][\"role\"] == \"system\" else 0\n )\n\n # Find the cutoff index where to end deletion, making sure to only count 'user' messages towards the limit starting with newest messages.\n cutoff_index = len(self.messages) - 1\n user_message_count = 0\n for message in reversed(self.messages):\n if self.__get_message_role(message) == \"user\":\n user_message_count += 1\n if user_message_count == remember_messages:\n break # Found the cutoff point.\n cutoff_index -= 1\n\n # If messages below the keep limit, don't delete anything.\n if user_message_count < remember_messages:\n return 0\n\n total_deleted_messages = cutoff_index - context_offset # Messages to delete.\n\n # Remove the messages before the cutoff index, exclusive of the system message.\n del self.messages[context_offset:cutoff_index]\n\n # Optional debugging printout.\n if self.debug and total_deleted_messages > 0:\n printr.print(\n f\"Deleted {total_deleted_messages} messages from the conversation history.\",\n tags=\"warn\",\n )\n\n return total_deleted_messages\n\n def reset_conversation_history(self):\n \"\"\"Resets the conversation history by removing all messages except for the initial system message.\"\"\"\n del self.messages[1:]\n\n def _try_instant_activation(self, transcript: str) -> str:\n \"\"\"Tries to execute an instant activation command if present in the transcript.\n\n Args:\n transcript (str): The transcript to check for an instant activation command.\n\n Returns:\n str: The response to the instant command or None if no such command was found.\n \"\"\"\n command = self._execute_instant_activation_command(transcript)\n if command:\n response = self._select_command_response(command)\n return response\n return None\n\n def _gpt_call(self):\n \"\"\"Makes the primary GPT call with the conversation history and tools enabled.\n\n Returns:\n The GPT completion object or None if the call fails.\n \"\"\"\n if self.debug:\n printr.print(\n f\" Calling GPT with {(len(self.messages) - 1)} messages (excluding context)\",\n tags=\"info\",\n )\n\n azure_config = None\n if self.conversation_provider == \"azure\":\n azure_config = self._get_azure_config(\"conversation\")\n\n return self.openai.ask(\n messages=self.messages,\n tools=self._build_tools(),\n model=self.config[\"openai\"].get(\"conversation_model\"),\n azure_config=azure_config,\n )\n\n def _process_completion(self, completion):\n \"\"\"Processes the completion returned by the GPT call.\n\n Args:\n completion: The completion object from an OpenAI call.\n\n Returns:\n A tuple containing the message response and tool calls from the completion.\n \"\"\"\n response_message = completion.choices[0].message\n\n content = response_message.content\n if content is None:\n response_message.content = \"\"\n\n return response_message, response_message.tool_calls\n\n async def _handle_tool_calls(self, tool_calls):\n \"\"\"Processes all the tool calls identified in the response message.\n\n Args:\n tool_calls: The list of tool calls to process.\n\n Returns:\n str: The immediate response from processed tool calls or None if there are no immediate responses.\n \"\"\"\n instant_response = None\n function_response = \"\"\n\n for tool_call in tool_calls:\n function_name = tool_call.function.name\n function_args = json.loads(tool_call.function.arguments)\n (\n function_response,\n instant_response,\n ) = await self._execute_command_by_function_call(\n function_name, function_args\n )\n\n msg = {\"role\": \"tool\", \"content\": function_response}\n if tool_call.id is not None:\n msg[\"tool_call_id\"] = tool_call.id\n if function_name is not None:\n msg[\"name\"] = function_name\n\n # Don't use self._add_user_message_to_history here because we never want to skip this because of history limitions\n self.messages.append(msg)\n\n return instant_response\n\n def _summarize_function_calls(self):\n \"\"\"Summarizes the function call responses using the GPT model specified for summarization in the configuration.\n\n Returns:\n The content of the GPT response to the function call summaries.\n \"\"\"\n azure_config = None\n if self.summarize_provider == \"azure\":\n azure_config = self._get_azure_config(\"summarize\")\n\n summarize_model = self.config[\"openai\"].get(\"summarize_model\")\n summarize_response = self.openai.ask(\n messages=self.messages,\n model=summarize_model,\n azure_config=azure_config,\n )\n\n if summarize_response is None:\n return None\n\n # do not tamper with this message as it will lead to 400 errors!\n message = summarize_response.choices[0].message\n self.messages.append(message)\n return message.content\n\n def _finalize_response(self, summarize_response: str) -> tuple[str, str]:\n \"\"\"Finalizes the response based on the call of the second (summarize) GPT call.\n\n Args:\n summarize_response (str): The response content from the second GPT call.\n\n Returns:\n A tuple containing the final response to the user.\n \"\"\"\n if summarize_response is None:\n return self.messages[-1][\"content\"], self.messages[-1][\"content\"]\n return summarize_response, summarize_response\n\n async def _execute_command_by_function_call(\n self, function_name: str, function_args: dict[str, any]\n ) -> tuple[str, str]:\n \"\"\"\n Uses an OpenAI function call to execute a command. If it's an instant activation_command, one if its reponses will be played.\n\n Args:\n function_name (str): The name of the function to be executed.\n function_args (dict[str, any]): The arguments to pass to the function being executed.\n\n Returns:\n A tuple containing two elements:\n - function_response (str): The text response or result obtained after executing the function.\n - instant_response (str): An immediate response or action to be taken, if any (e.g., play audio).\n \"\"\"\n function_response = \"\"\n instant_reponse = \"\"\n if function_name == \"execute_command\":\n # get the command based on the argument passed by GPT\n command = self._get_command(function_args[\"command_name\"])\n # execute the command\n function_response = self._execute_command(command)\n # if the command has responses, we have to play one of them\n if command and command.get(\"responses\"):\n instant_reponse = self._select_command_response(command)\n await self._play_to_user(instant_reponse)\n\n return function_response, instant_reponse\n\n async def _play_to_user(self, text: str):\n \"\"\"Plays audio to the user using the configured TTS Provider (default: OpenAI TTS).\n Also adds sound effects if enabled in the configuration.\n\n Args:\n text (str): The text to play as audio.\n \"\"\"\n\n if self.tts_provider == \"edge_tts\":\n await self._play_with_edge_tts(text)\n elif self.tts_provider == \"elevenlabs\":\n self._play_with_elevenlabs(text)\n elif self.tts_provider == \"azure\":\n self._play_with_azure(text)\n else:\n self._play_with_openai(text)\n\n def _play_with_openai(self, text):\n response = self.openai.speak(text, self.config[\"openai\"].get(\"tts_voice\"))\n if response is not None:\n self.audio_player.stream_with_effects(response.content, self.config)\n\n def _play_with_azure(self, text):\n azure_config = self.config[\"azure\"].get(\"tts\", None)\n\n if azure_config is None:\n return\n\n speech_config = speechsdk.SpeechConfig(\n subscription=self.azure_keys[\"tts\"],\n region=azure_config[\"region\"],\n )\n speech_config.speech_synthesis_voice_name = azure_config[\"voice\"]\n\n if azure_config[\"detect_language\"]:\n auto_detect_source_language_config = (\n speechsdk.AutoDetectSourceLanguageConfig()\n )\n\n speech_synthesizer = speechsdk.SpeechSynthesizer(\n speech_config=speech_config,\n audio_config=None,\n auto_detect_source_language_config=auto_detect_source_language_config\n if azure_config[\"detect_language\"]\n else None,\n )\n\n result = speech_synthesizer.speak_text_async(text).get()\n if result is not None:\n self.audio_player.stream_with_effects(result.audio_data, self.config)\n\n async def _play_with_edge_tts(self, text: str):\n edge_config = self.config[\"edge_tts\"]\n\n tts_voice = edge_config.get(\"tts_voice\")\n detect_language = edge_config.get(\"detect_language\")\n if detect_language:\n gender = edge_config.get(\"gender\")\n tts_voice = await self.edge_tts.get_same_random_voice_for_language(\n gender, self.last_transcript_locale\n )\n\n communicate, output_file = await self.edge_tts.generate_speech(\n text, voice=tts_voice\n )\n audio, sample_rate = self.audio_player.get_audio_from_file(output_file)\n\n self.audio_player.stream_with_effects((audio, sample_rate), self.config)\n\n def _play_with_elevenlabs(self, text: str):\n # presence already validated in validate()\n elevenlabs_config = self.config[\"elevenlabs\"]\n # validate() already checked that either id or name is set\n voice_id = elevenlabs_config[\"voice\"].get(\"id\")\n voice_name = elevenlabs_config[\"voice\"].get(\"name\")\n\n voice_settings = elevenlabs_config.get(\"voice_settings\", {})\n user = ElevenLabsUser(self.elevenlabs_api_key)\n model = elevenlabs_config.get(\"model\", \"eleven_multilingual_v2\")\n\n voice: (\n ElevenLabsVoice\n | ElevenLabsDesignedVoice\n | ElevenLabsClonedVoice\n | ElevenLabsProfessionalVoice\n ) = None\n if voice_id:\n voice = user.get_voice_by_ID(voice_id)\n else:\n voice = user.get_voices_by_name(voice_name)[0]\n\n # todo: add start/end callbacks to play Quindar beep even if use_sound_effects is disabled\n playback_options = PlaybackOptions(runInBackground=True)\n generation_options = GenerationOptions(\n model=model,\n latencyOptimizationLevel=elevenlabs_config.get(\"latency\", 0),\n style=voice_settings.get(\"style\", 0),\n use_speaker_boost=voice_settings.get(\"use_speaker_boost\", True),\n )\n stability = voice_settings.get(\"stability\")\n if stability is not None:\n generation_options.stability = stability\n\n similarity_boost = voice_settings.get(\"similarity_boost\")\n if similarity_boost is not None:\n generation_options.similarity_boost = similarity_boost\n\n style = voice_settings.get(\"style\")\n if style is not None and model != \"eleven_turbo_v2\":\n generation_options.style = style\n\n use_sound_effects = elevenlabs_config.get(\"use_sound_effects\", False)\n if use_sound_effects:\n audio_bytes, _history_id = voice.generate_audio_v2(\n prompt=text,\n generationOptions=generation_options,\n )\n if audio_bytes:\n self.audio_player.stream_with_effects(audio_bytes, self.config)\n else:\n voice.generate_stream_audio_v2(\n prompt=text,\n playbackOptions=playback_options,\n generationOptions=generation_options,\n )\n\n def _execute_command(self, command: dict) -> str:\n \"\"\"Does what Wingman base does, but always returns \"Ok\" instead of a command response.\n Otherwise the AI will try to respond to the command and generate a \"duplicate\" response for instant_activation commands.\n \"\"\"\n super()._execute_command(command)\n return \"Ok\"\n\n def _build_tools(self) -> list[dict]:\n \"\"\"\n Builds a tool for each command that is not instant_activation.\n\n Returns:\n list[dict]: A list of tool descriptors in OpenAI format.\n \"\"\"\n commands = [\n command[\"name\"]\n for command in self.config.get(\"commands\", [])\n if not command.get(\"instant_activation\")\n ]\n tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"execute_command\",\n \"description\": \"Executes a command\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"command_name\": {\n \"type\": \"string\",\n \"description\": \"The command to execute\",\n \"enum\": commands,\n },\n },\n \"required\": [\"command_name\"],\n },\n },\n },\n ]\n return tools\n\n def __ask_gpt_for_locale(self, language: str) -> str:\n \"\"\"OpenAI TTS returns a natural language name for the language of the transcript, e.g. \"german\" or \"english\".\n This method uses ChatGPT to find the corresponding locale, e.g. \"de-DE\" or \"en-EN\".\n\n Args:\n language (str): The natural, lowercase language name returned by OpenAI TTS. Thank you for that btw.. WTF OpenAI?\n \"\"\"\n\n response = self.openai.ask(\n messages=[\n {\n \"content\": \"\"\"\n I'll say a natural language name in lowercase and you'll just return the IETF country code / locale for this language.\n Your answer always has exactly 2 lowercase letters, a dash, then two more letters in uppercase.\n If I say \"german\", you answer with \"de-DE\". If I say \"russian\", you answer with \"ru-RU\".\n If it's ambiguous and you don't know which locale to pick (\"en-GB\" vs \"en-US\"), you pick the most commonly used one.\n You only answer with valid country codes according to most common standards.\n If you can't, you respond with \"None\".\n \"\"\",\n \"role\": \"system\",\n },\n {\n \"content\": language,\n \"role\": \"user\",\n },\n ],\n model=\"gpt-3.5-turbo-1106\",\n )\n answer = response.choices[0].message.content\n\n if answer == \"None\":\n return None\n\n printr.print(\n f\" ChatGPT says this language maps to locale '{answer}'.\", tags=\"info\"\n )\n return answer\n\n def __get_message_role(self, message):\n \"\"\"Helper method to get the role of the message regardless of its type.\"\"\"\n if isinstance(message, Mapping):\n return message.get(\"role\")\n elif hasattr(message, \"role\"):\n return message.role\n else:\n raise TypeError(\n f\"Message is neither a mapping nor has a 'role' attribute: {message}\"\n )" }, { "identifier": "Wingman", "path": "wingmen/wingman.py", "snippet": "class Wingman(FileCreator):\n \"\"\"The \"highest\" Wingman base class in the chain. It does some very basic things but is meant to be 'virtual', and so are most its methods, so you'll probably never instantiate it directly.\n\n Instead, you'll create a custom wingman that inherits from this (or a another subclass of it) and override its methods if needed.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n ):\n \"\"\"The constructor of the Wingman class. You can override it in your custom wingman.\n\n Args:\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n app_root_dir (str): The path to the root directory of the app. This is where the Wingman executable lives.\n \"\"\"\n\n super().__init__(app_root_dir=app_root_dir, subdir=\"wingman_data\")\n\n self.config = config\n \"\"\"All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\"\"\"\n\n self.secret_keeper = secret_keeper\n \"\"\"A service that allows you to store and retrieve secrets like API keys. It can prompt the user for secrets if necessary.\"\"\"\n\n self.name = name\n \"\"\"The name of the wingman. This is the key you gave it in the config, e.g. \"atc\".\"\"\"\n\n self.audio_player = AudioPlayer()\n \"\"\"A service that allows you to play audio files and add sound effects to them.\"\"\"\n\n self.execution_start: None | float = None\n \"\"\"Used for benchmarking executon times. The timer is (re-)started whenever the process function starts.\"\"\"\n\n self.debug: bool = self.config[\"features\"].get(\"debug_mode\", False)\n \"\"\"If enabled, the Wingman will skip executing any keypresses. It will also print more debug messages and benchmark results.\"\"\"\n\n self.tts_provider = self.config[\"features\"].get(\"tts_provider\")\n \"\"\"The name of the TTS provider you configured in the config.yaml\"\"\"\n\n self.app_root_dir = app_root_dir\n \"\"\"The path to the root directory of the app. This is where the Wingman executable lives.\"\"\"\n\n @staticmethod\n def create_dynamically(\n module_path: str,\n class_name: str,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n **kwargs,\n ):\n \"\"\"Dynamically creates a Wingman instance from a module path and class name\n\n Args:\n module_path (str): The module path, e.g. wingmen.open_ai_wingman. It's like the filepath from root to your custom-wingman.py but with dots instead of slashes and without the .py extension. Case-sensitive!\n class_name (str): The name of the class inside your custom-wingman.py, e.g. OpenAiWingman. Case-sensitive!\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n \"\"\"\n\n module = import_module(module_path)\n DerivedWingmanClass = getattr(module, class_name)\n instance = DerivedWingmanClass(\n name=name,\n config=config,\n secret_keeper=secret_keeper,\n app_root_dir=app_root_dir,\n **kwargs,\n )\n return instance\n\n def get_record_key(self) -> str:\n \"\"\"Returns the activation or \"push-to-talk\" key for this Wingman.\"\"\"\n return self.config.get(\"record_key\", None)\n\n def print_execution_time(self, reset_timer=False):\n \"\"\"Prints the current time since the execution started (in seconds).\"\"\"\n if self.execution_start:\n execution_stop = time.perf_counter()\n elapsed_seconds = execution_stop - self.execution_start\n printr.print(f\"...took {elapsed_seconds:.2f}s\", tags=\"info\")\n if reset_timer:\n self.start_execution_benchmark()\n\n def start_execution_benchmark(self):\n \"\"\"Starts the execution benchmark timer.\"\"\"\n self.execution_start = time.perf_counter()\n\n # ──────────────────────────────────── Hooks ─────────────────────────────────── #\n\n def validate(self) -> list[str]:\n \"\"\"Use this function to validate params and config before the Wingman is started.\n If you add new config sections or entries to your custom wingman, you should validate them here.\n\n It's a good idea to collect all errors from the base class and not to swallow them first.\n\n If you return errors, your Wingman will be disabled by Tower and not be loaded.\n\n Returns:\n list[str]: A list of error messages or an empty list if everything is okay.\n \"\"\"\n return []\n\n # TODO: this should be async\n def prepare(self):\n \"\"\"This method is called only once when the Wingman is instantiated by Tower.\n It is run AFTER validate() so you can access validated params safely here.\n\n You can override it if you need to load async data from an API or file.\"\"\"\n pass\n\n def reset_conversation_history(self):\n \"\"\"This function is called when the user triggers the ResetConversationHistory command.\n It's a global command that should be implemented by every Wingman that keeps a message history.\n \"\"\"\n\n # ──────────────────────────── The main processing loop ──────────────────────────── #\n\n async def process(self, audio_input_wav: str):\n \"\"\"The main method that gets called when the wingman is activated. This method controls what your wingman actually does and you can override it if you want to.\n\n The base implementation here triggers the transcription and processing of the given audio input.\n If you don't need even transcription, you can just override this entire process method. If you want transcription but then do something in addition, you can override the listed hooks.\n\n Async so you can do async processing, e.g. send a request to an API.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Hooks:\n - async _transcribe: transcribe the audio to text\n - async _get_response_for_transcript: process the transcript and return a text response\n - async _play_to_user: do something with the response, e.g. play it as audio\n \"\"\"\n\n self.start_execution_benchmark()\n\n process_result = None\n\n if self.debug:\n printr.print(\"Starting transcription...\", tags=\"info\")\n\n # transcribe the audio.\n transcript, locale = await self._transcribe(audio_input_wav)\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n if transcript:\n printr.print(f\">> (You): {transcript}\", tags=\"violet\")\n\n if self.debug:\n printr.print(\"Getting response for transcript...\", tags=\"info\")\n\n # process the transcript further. This is where you can do your magic. Return a string that is the \"answer\" to your passed transcript.\n process_result, instant_response = await self._get_response_for_transcript(\n transcript, locale\n )\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n actual_response = instant_response or process_result\n printr.print(f\"<< ({self.name}): {actual_response}\", tags=\"green\")\n\n if self.debug:\n printr.print(\"Playing response back to user...\", tags=\"info\")\n\n # the last step in the chain. You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n await self._play_to_user(str(process_result))\n\n if self.debug:\n self.print_execution_time()\n\n # ───────────────── virtual methods / hooks ───────────────── #\n\n async def _transcribe(self, audio_input_wav: str) -> tuple[str | None, str | None]:\n \"\"\"Transcribes the audio to text. You can override this method if you want to use a different transcription service.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Returns:\n tuple[str | None, str | None]: The transcript of the audio file and the detected language as locale (if determined).\n \"\"\"\n return None, None\n\n async def _get_response_for_transcript(\n self, transcript: str, locale: str | None\n ) -> tuple[str, str]:\n \"\"\"Processes the transcript and return a response as text. This where you'll do most of your work.\n Pass the transcript to AI providers and build a conversation. Call commands or APIs. Play temporary results to the user etc.\n\n\n Args:\n transcript (str): The user's spoken text transcribed as text.\n locale (str | None): The language that was detected to be used in the transcript, e.g. \"de-DE\".\n\n Returns:\n A tuple of strings representing the response to a function call and/or an instant response.\n \"\"\"\n return (\"\", \"\")\n\n async def _play_to_user(self, text: str):\n \"\"\"You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n\n Args:\n text (str): The response of your _get_response_for_transcript. This is usually the \"response\" from conversation with the AI.\n \"\"\"\n pass\n\n # ───────────────────────────────── Commands ─────────────────────────────── #\n\n def _get_command(self, command_name: str) -> dict | None:\n \"\"\"Extracts the command with the given name\n\n Args:\n command_name (str): the name of the command you used in the config\n\n Returns:\n {}: The command object from the config\n \"\"\"\n\n command = next(\n (\n item\n for item in self.config.get(\"commands\", [])\n if item[\"name\"] == command_name\n ),\n None,\n )\n return command\n\n def _select_command_response(self, command: dict) -> str | None:\n \"\"\"Returns one of the configured responses of the command. This base implementation returns a random one.\n\n Args:\n command (dict): The command object from the config\n\n Returns:\n str: A random response from the command's responses list in the config.\n \"\"\"\n command_responses = command.get(\"responses\", None)\n if (command_responses is None) or (len(command_responses) == 0):\n return None\n\n return random.choice(command_responses)\n\n def _execute_instant_activation_command(self, transcript: str) -> dict | None:\n \"\"\"Uses a fuzzy string matching algorithm to match the transcript to a configured instant_activation command and executes it immediately.\n\n Args:\n transcript (text): What the user said, transcripted to text. Needs to be similar to one of the defined instant_activation phrases to work.\n\n Returns:\n {} | None: The executed instant_activation command.\n \"\"\"\n\n instant_activation_commands = [\n command\n for command in self.config.get(\"commands\", [])\n if command.get(\"instant_activation\")\n ]\n\n # check if transcript matches any instant activation command. Each command has a list of possible phrases\n for command in instant_activation_commands:\n for phrase in command.get(\"instant_activation\"):\n ratio = SequenceMatcher(\n None,\n transcript.lower(),\n phrase.lower(),\n ).ratio()\n if (\n ratio > 0.8\n ): # if the ratio is higher than 0.8, we assume that the command was spoken\n self._execute_command(command)\n\n if command.get(\"responses\"):\n return command\n return None\n return None\n\n def _execute_command(self, command: dict) -> str:\n \"\"\"Triggers the execution of a command. This base implementation executes the keypresses defined in the command.\n\n Args:\n command (dict): The command object from the config to execute\n\n Returns:\n str: the selected response from the command's responses list in the config. \"Ok\" if there are none.\n \"\"\"\n\n if not command:\n return \"Command not found\"\n\n printr.print(f\"❖ Executing command: {command.get('name')}\", tags=\"info\")\n\n if self.debug:\n printr.print(\n \"Skipping actual keypress execution in debug_mode...\", tags=\"warn\"\n )\n\n if len(command.get(\"keys\", [])) > 0 and not self.debug:\n self.execute_keypress(command)\n # TODO: we could do mouse_events here, too...\n\n # handle the global special commands:\n if command.get(\"name\", None) == \"ResetConversationHistory\":\n self.reset_conversation_history()\n\n if not self.debug:\n # in debug mode we already printed the separate execution times\n self.print_execution_time()\n\n return self._select_command_response(command) or \"Ok\"\n\n def execute_keypress(self, command: dict):\n \"\"\"Executes the keypresses defined in the command in order.\n\n pydirectinput uses SIGEVENTS to send keypresses to the OS. This lib seems to be the only way to send keypresses to games reliably.\n\n It only works on Windows. For MacOS, we fall back to PyAutoGUI (which has the exact same API as pydirectinput is built on top of it).\n\n Args:\n command (dict): The command object from the config to execute\n \"\"\"\n\n for entry in command.get(\"keys\", []):\n if entry.get(\"modifier\"):\n key_module.keyDown(entry[\"modifier\"])\n\n if entry.get(\"hold\"):\n key_module.keyDown(entry[\"key\"])\n time.sleep(entry[\"hold\"])\n key_module.keyUp(entry[\"key\"])\n else:\n key_module.press(entry[\"key\"])\n\n if entry.get(\"modifier\"):\n key_module.keyUp(entry[\"modifier\"])\n\n if entry.get(\"wait\"):\n time.sleep(entry[\"wait\"])" }, { "identifier": "Printr", "path": "services/printr.py", "snippet": "class Printr(object):\n _instance = None\n\n LILA = \"\\033[95m\"\n BLUE = \"\\033[94m\"\n CYAN = \"\\033[96m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n RED = \"\\033[91m\"\n CLEAR = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n FAINT = \"\\033[2m\"\n NORMAL_WEIGHT = \"\\033[22m\"\n UNDERLINE = \"\\033[4m\"\n END_UNDERLINE = \"\\033[24m\"\n OVERLINE = \"\\033[53m\"\n END_OVERLINE = \"\\033[55m\"\n FRAMED = \"\\033[51m\"\n ENCIRCLED = \"\\033[52m\"\n DELETE_LINE = \"\\033[2K\\033[1G\"\n PREVIOUS_LINE = \"\\033[2F\"\n\n tags = [\n # {\"tagName\": \"bold\", \"font\": \"TkTextFont bold\"},\n {\"tagName\": \"info\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"warn\", \"foreground\": \"orange\"},\n {\"tagName\": \"err\", \"foreground\": \"red\"},\n\n {\"tagName\": \"green\", \"foreground\": \"#33cc33\"},\n {\"tagName\": \"blue\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"violet\", \"foreground\": \"#aa33dd\"},\n {\"tagName\": \"grey\", \"foreground\": \"grey\"}\n ]\n\n CHANNEL = Literal[\"main\", \"error\", \"warning\", \"info\"]\n OUTPUT_TYPES = None | ctk.StringVar | ctk.CTkTextbox\n\n _message_stacks: dict[CHANNEL, list] = dict(\n main=[],\n error=[],\n warning=[],\n info=[]\n )\n\n # NOTE this is a singleton class\n def __new__(cls):\n if cls._instance is None:\n cls._instance = super(Printr, cls).__new__(cls)\n\n cls.out: dict[Printr.CHANNEL, Printr.OUTPUT_TYPES ] = dict(\n main=None,\n error=None,\n warning=None,\n info=None\n )\n return cls._instance\n\n\n def set_output(self, output_channel: CHANNEL, output_element: OUTPUT_TYPES):\n if isinstance(output_element, ctk.CTkTextbox):\n for tag in self.tags:\n output_element.tag_config(**tag)\n\n self.out[output_channel] = output_element\n\n msg_stack = self._message_stacks.get(output_channel, [])\n if len(msg_stack) > 0:\n msg = \"\\n\".join(msg_stack)\n self.print(msg, output_channel)\n # TODO: clear stack?\n for _ in range(len(msg_stack)):\n msg_stack.pop()\n\n\n\n def print(self, text, output_channel: CHANNEL = \"main\", tags=None, wait_for_gui=False, console_only=False):\n channel = self.out.get(output_channel, None)\n if channel and not console_only:\n if isinstance(channel, ctk.CTkTextbox):\n channel.configure(state=\"normal\")\n channel.insert(\"end\", f\"{text}\\n\", tags=tags)\n channel.see(\"end\")\n channel.configure(state=\"disabled\")\n else:\n # output type -> StringVar\n channel.set(text)\n elif wait_for_gui and not console_only:\n # message should only be shown in GUI\n # so add it to the queue to wait for GUI initialization\n self._message_stacks.get(output_channel, []).append(text)\n else:\n # no special output type -> terminal output\n print(text)\n\n\n def print_err(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"error\", wait_for_gui=wait_for_gui)\n\n def print_warn(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"warning\", wait_for_gui=wait_for_gui)\n\n def print_info(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"info\", wait_for_gui=wait_for_gui)\n\n\n @staticmethod\n def clr(text, color_format):\n return f\"{color_format}{text}{Printr.CLEAR}\"\n\n @staticmethod\n def clr_print(text, color_format):\n print(Printr.clr(text, color_format))\n\n @staticmethod\n def sys_print(text, headline=\"\", color=RED, first_message=True):\n if first_message:\n print(\"\")\n if headline.strip():\n print(\n Printr.clr(f\"{Printr.BOLD}{headline}{Printr.NORMAL_WEIGHT}\", color)\n )\n else:\n print(Printr.PREVIOUS_LINE)\n print(Printr.clr(f\"⎢ {text}\", color))\n print(\"\")\n\n @staticmethod\n def err_print(text, first_message=True):\n Printr.sys_print(text, \"Something went wrong!\", first_message=first_message)\n\n @staticmethod\n def warn_print(text, first_message=True):\n Printr.sys_print(text, \"Please note:\", Printr.YELLOW, first_message)\n\n @staticmethod\n def info_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.BLUE, first_message)\n\n @staticmethod\n def hl_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.CYAN, first_message)\n\n @staticmethod\n def override_print(text):\n print(f\"{Printr.DELETE_LINE}{text}\")\n\n @staticmethod\n def box_start():\n print(\n f\"{Printr.CYAN}⎡{Printr.OVERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_OVERLINE}⎤\"\n )\n print(f\"⎢{Printr.CLEAR}\")\n\n @staticmethod\n def box_end():\n print(f\"{Printr.CYAN}⎢\")\n print(\n f\"⎣{Printr.UNDERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_UNDERLINE}⎦{Printr.CLEAR}\"\n )\n\n @staticmethod\n def box_print(text):\n print(f\"{Printr.CYAN}⎜{Printr.CLEAR} {text}\")" }, { "identifier": "SecretKeeper", "path": "services/secret_keeper.py", "snippet": "class SecretKeeper:\n def __init__(self, app_root_path: str):\n self.printr = Printr()\n self.system_config_path: str = os.path.join(app_root_path, SYSTEM_CONFIG_PATH)\n self.config_file = os.path.join(self.system_config_path, SECRETS_FILE)\n self.secrets = self.__load()\n if not self.secrets:\n self.secrets = {}\n\n def __load(self) -> dict[str, any]: # type: ignore\n parsed_config = None\n\n if os.path.exists(self.config_file) and os.path.isfile(self.config_file):\n with open(self.config_file, \"r\", encoding=\"UTF-8\") as stream:\n try:\n parsed_config = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not load ({SECRETS_FILE})\\n{str(e)}\", True\n )\n\n return parsed_config\n\n def save(self):\n \"\"\"Write all secrets to the file\"\"\"\n with open(self.config_file, \"w\", encoding=\"UTF-8\") as stream:\n try:\n yaml.dump(self.secrets, stream)\n return True\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not write ({SECRETS_FILE})\\n{str(e)}\", True\n )\n return False\n\n def retrieve(\n self,\n requester: str,\n key: str,\n friendly_key_name: str,\n prompt_if_missing: bool = True,\n ) -> str:\n \"\"\"Retrieve secret a secret and optionally prompt user for it if missing\"\"\"\n\n secret = self.secrets.get(key, None)\n if not secret and prompt_if_missing:\n # Prompt user for key\n dialog = ctk.CTkInputDialog(\n text=f\"Please enter '{friendly_key_name}':\",\n title=f\"{requester} needs to know a secret\",\n )\n secret = dialog.get_input()\n if secret:\n secret = secret.strip().replace(\"\\n\", \"\")\n self.secrets[key] = secret\n self.save()\n\n return secret" } ]
import copy from exceptions import MissingApiKeyException from wingmen.open_ai_wingman import OpenAiWingman from wingmen.wingman import Wingman from services.printr import Printr from services.secret_keeper import SecretKeeper
12,641
printr = Printr() class Tower: def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore self.config = config self.app_root_dir = app_root_dir self.secret_keeper = secret_keeper self.key_wingman_dict: dict[str, Wingman] = {} self.broken_wingmen = [] self.wingmen = self.__instantiate_wingmen() self.key_wingman_dict: dict[str, Wingman] = {} for wingman in self.wingmen: self.key_wingman_dict[wingman.get_record_key()] = wingman def __instantiate_wingmen(self) -> list[Wingman]: wingmen = [] for wingman_name, wingman_config in self.config["wingmen"].items(): if wingman_config.get("disabled") is True: continue global_config = { "sound": self.config.get("sound", {}), "openai": self.config.get("openai", {}), "features": self.config.get("features", {}), "edge_tts": self.config.get("edge_tts", {}), "commands": self.config.get("commands", {}), "elevenlabs": self.config.get("elevenlabs", {}), "azure": self.config.get("azure", {}), } merged_config = self.__merge_configs(global_config, wingman_config) class_config = merged_config.get("class") wingman = None # it's a custom Wingman try: if class_config: kwargs = class_config.get("args", {}) wingman = Wingman.create_dynamically( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, module_path=class_config.get("module"), class_name=class_config.get("name"), app_root_dir=self.app_root_dir, **kwargs ) else:
printr = Printr() class Tower: def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore self.config = config self.app_root_dir = app_root_dir self.secret_keeper = secret_keeper self.key_wingman_dict: dict[str, Wingman] = {} self.broken_wingmen = [] self.wingmen = self.__instantiate_wingmen() self.key_wingman_dict: dict[str, Wingman] = {} for wingman in self.wingmen: self.key_wingman_dict[wingman.get_record_key()] = wingman def __instantiate_wingmen(self) -> list[Wingman]: wingmen = [] for wingman_name, wingman_config in self.config["wingmen"].items(): if wingman_config.get("disabled") is True: continue global_config = { "sound": self.config.get("sound", {}), "openai": self.config.get("openai", {}), "features": self.config.get("features", {}), "edge_tts": self.config.get("edge_tts", {}), "commands": self.config.get("commands", {}), "elevenlabs": self.config.get("elevenlabs", {}), "azure": self.config.get("azure", {}), } merged_config = self.__merge_configs(global_config, wingman_config) class_config = merged_config.get("class") wingman = None # it's a custom Wingman try: if class_config: kwargs = class_config.get("args", {}) wingman = Wingman.create_dynamically( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, module_path=class_config.get("module"), class_name=class_config.get("name"), app_root_dir=self.app_root_dir, **kwargs ) else:
wingman = OpenAiWingman(
1
2023-11-15 09:36:06+00:00
16k
wjun0830/CGDETR
cg_detr/inference.py
[ { "identifier": "AverageMeter", "path": "utils/basic_utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current/max/min value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n\n def update(self, val, n=1):\n self.max = max(val, self.max)\n self.min = min(val, self.min)\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "TestOptions", "path": "cg_detr/config.py", "snippet": "class TestOptions(BaseOptions):\n \"\"\"add additional options for evaluating\"\"\"\n\n def initialize(self):\n BaseOptions.initialize(self)\n # also need to specify --eval_split_name\n self.parser.add_argument(\"--eval_id\", type=str, help=\"evaluation id\")\n self.parser.add_argument(\"--eval_results_dir\", type=str, default=None,\n help=\"dir to save results, if not set, fall back to training results_dir\")\n self.parser.add_argument(\"--model_dir\", type=str,\n help=\"dir contains the model file, will be converted to absolute path afterwards\")" }, { "identifier": "build_model", "path": "cg_detr/model.py", "snippet": "def build_model(args):\n device = torch.device(args.device)\n\n transformer = build_transformer(args)\n position_embedding, txt_position_embedding = build_position_encoding(args)\n\n if args.a_feat_dir is None:\n model = CGDETR(\n transformer,\n position_embedding,\n txt_position_embedding,\n txt_dim=args.t_feat_dim,\n vid_dim=args.v_feat_dim,\n num_queries=args.num_queries,\n input_dropout=args.input_dropout,\n aux_loss=args.aux_loss,\n contrastive_align_loss=args.contrastive_align_loss,\n contrastive_hdim=args.contrastive_hdim,\n span_loss_type=args.span_loss_type,\n use_txt_pos=args.use_txt_pos,\n n_input_proj=args.n_input_proj,\n args=args\n )\n else:\n model = CGDETR(\n transformer,\n position_embedding,\n txt_position_embedding,\n txt_dim=args.t_feat_dim,\n vid_dim=args.v_feat_dim,\n aud_dim=args.a_feat_dim,\n num_queries=args.num_queries,\n input_dropout=args.input_dropout,\n aux_loss=args.aux_loss,\n contrastive_align_loss=args.contrastive_align_loss,\n contrastive_hdim=args.contrastive_hdim,\n span_loss_type=args.span_loss_type,\n use_txt_pos=args.use_txt_pos,\n n_input_proj=args.n_input_proj,\n args=args\n )\n\n matcher = build_matcher(args)\n weight_dict = {\"loss_span\": args.span_loss_coef,\n \"loss_giou\": args.giou_loss_coef,\n \"loss_label\": args.label_loss_coef,\n \"loss_saliency\": args.lw_saliency,\n \"loss_ms_align\": args.lw_ms_align,\n \"loss_distill\": args.lw_distill,\n \"loss_orthogonal_dummy\":args.lw_distill}\n if args.contrastive_align_loss:\n weight_dict[\"loss_contrastive_align\"] = args.contrastive_align_loss_coef\n\n if args.aux_loss:\n aux_weight_dict = {}\n for i in range(args.dec_layers - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items() if k != \"loss_saliency\"})\n weight_dict.update(aux_weight_dict)\n\n losses = ['spans', 'labels', 'saliency', 'ms_align', 'distill', 'orthogonal_dummy']\n if args.contrastive_align_loss:\n losses += [\"contrastive_align\"]\n \n # For highlight detection datasets\n use_matcher = not (args.dset_name in ['youtube_uni', 'tvsum'])\n \n criterion = SetCriterion(\n matcher=matcher, weight_dict=weight_dict, losses=losses,\n eos_coef=args.eos_coef, temperature=args.temperature,\n span_loss_type=args.span_loss_type, max_v_l=args.max_v_l,\n saliency_margin=args.saliency_margin, use_matcher=use_matcher, args=args\n )\n criterion.to(device)\n return model, criterion" }, { "identifier": "span_cxw_to_xx", "path": "cg_detr/span_utils.py", "snippet": "def span_cxw_to_xx(cxw_spans):\n \"\"\"\n Args:\n cxw_spans: tensor, (#windows, 2) or (..., 2), the last dim is a row denoting a window of format (center, width)\n\n >>> spans = torch.Tensor([[0.5000, 1.0000], [0.3000, 0.2000]])\n >>> span_cxw_to_xx(spans)\n tensor([[0.0000, 1.0000],\n [0.2000, 0.4000]])\n >>> spans = torch.Tensor([[[0.5000, 1.0000], [0.3000, 0.2000]]])\n >>> span_cxw_to_xx(spans)\n tensor([[[0.0000, 1.0000],\n [0.2000, 0.4000]]])\n \"\"\"\n x1 = cxw_spans[..., 0] - 0.5 * cxw_spans[..., 1]\n x2 = cxw_spans[..., 0] + 0.5 * cxw_spans[..., 1]\n return torch.stack([x1, x2], dim=-1)" }, { "identifier": "StartEndDataset", "path": "cg_detr/start_end_dataset.py", "snippet": "class StartEndDataset(Dataset):\n Q_FEAT_TYPES = [\"pooler_output\", \"last_hidden_state\"]\n \"\"\"One line in data loaded from data_path.\"\n {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17],\n \"relevant_windows\": [[26, 36]]\n }\n \"\"\"\n\n def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode=\"video\",\n normalize_v=True, normalize_t=True, load_labels=True,\n clip_len=2, max_windows=5, span_loss_type=\"l1\", txt_drop_ratio=0,\n dset_domain=None):\n self.dset_name = dset_name\n self.data_path = data_path\n self.data_ratio = data_ratio\n self.v_feat_dirs = v_feat_dirs \\\n if isinstance(v_feat_dirs, list) else [v_feat_dirs]\n self.q_feat_dir = q_feat_dir\n self.q_feat_type = q_feat_type\n if max_v_l == -1:\n max_v_l = 100000000\n if max_q_l == -1:\n max_q_l = 100\n self.max_q_l = max_q_l\n self.max_v_l = max_v_l\n self.ctx_mode = ctx_mode\n self.use_tef = \"tef\" in ctx_mode\n self.use_video = \"video\" in ctx_mode\n self.normalize_t = normalize_t\n self.normalize_v = normalize_v\n self.load_labels = load_labels\n self.clip_len = clip_len\n self.max_windows = max_windows # maximum number of windows to use as labels\n self.span_loss_type = span_loss_type\n self.txt_drop_ratio = txt_drop_ratio\n if \"val\" in data_path or \"test\" in data_path:\n assert txt_drop_ratio == 0\n\n\n # checks\n assert q_feat_type in self.Q_FEAT_TYPES\n\n # data\n self.data = self.load_data()\n \n # load specific domain data for tvsum dataset\n if self.dset_name in ['tvsum', 'tvsum_sfc']:\n target_domain = dset_domain\n assert target_domain in [\"BK\", \"BT\", \"DS\", \"FM\", \"GA\", \"MS\", \"PK\", \"PR\", \"VT\", \"VU\"]\n\n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data\n \n # load specific domain data for youtube-hl dataset\n if self.dset_name == 'youtube_uni':\n target_domain = dset_domain\n assert target_domain in [\"dog\", \"gymnastics\", \"parkour\", \"skating\", \"skiing\", \"surfing\"]\n \n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data \n \n self.use_glove = False\n self.use_glove = 'vgg' in self.v_feat_dirs[0]\n\n if self.dset_name == 'charadesSTA' and self.use_glove:\n self.vocab = vocab.pretrained_aliases['glove.6B.300d']()\n self.vocab.itos.extend(['<unk>'])\n self.vocab.stoi['<unk>'] = self.vocab.vectors.shape[0]\n self.vocab.vectors = torch.cat(\n (self.vocab.vectors, torch.zeros(1, self.vocab.dim)), dim=0)\n self.embedding = nn.Embedding.from_pretrained(self.vocab.vectors)\n \n\n def load_data(self):\n datalist = load_jsonl(self.data_path)\n if self.data_ratio != 1:\n n_examples = int(len(datalist) * self.data_ratio)\n datalist = datalist[:n_examples]\n logger.info(\"Using {}% of the data: {} examples\"\n .format(self.data_ratio * 100, n_examples))\n return datalist\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n meta = self.data[index]\n\n model_inputs = dict()\n\n if self.use_glove:\n model_inputs[\"query_feat\"] = self.get_query(meta[\"query\"])\n else:\n model_inputs[\"query_feat\"] = self._get_query_feat_by_qid(meta[\"qid\"]) # (Dq, ) or (Lq, Dq)\n \n if self.use_video:\n model_inputs[\"video_feat\"] = self._get_video_feat_by_vid(meta[\"vid\"]) # (Lv, Dv)\n ctx_l = len(model_inputs[\"video_feat\"])\n else:\n ctx_l = self.max_v_l\n\n\n if self.use_tef:\n tef_st = torch.arange(0, ctx_l, 1.0) / ctx_l\n tef_ed = tef_st + 1.0 / ctx_l\n tef = torch.stack([tef_st, tef_ed], dim=1) # (Lv, 2)\n if self.use_video:\n model_inputs[\"video_feat\"] = torch.cat(\n [model_inputs[\"video_feat\"], tef], dim=1) # (Lv, Dv+2)\n else:\n model_inputs[\"video_feat\"] = tef\n\n\n if self.dset_name in ['tvsum']:\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_tvsum(meta_label, ctx_l)\n if len(model_inputs[\"saliency_all_labels\"]) != len(model_inputs[\"video_feat\"]):\n model_inputs[\"video_feat\"] = model_inputs[\"video_feat\"][:len(model_inputs[\"saliency_all_labels\"])]\n\n elif self.dset_name == 'youtube_uni':\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_youtube(meta_label, ctx_l)\n else:\n if \"relevant_windows\" in meta: ## For Qvhighlights test set\n model_inputs[\"span_labels\"] = self.get_span_labels(meta[\"relevant_windows\"], ctx_l) # (#windows, 2)\n if self.dset_name in ['charadesSTA', 'tacos', 'activitynet']: ## charades, tacos, nlq\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n elif self.dset_name in ['nlq']:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l, 2) # only one gt\n elif \"subs_train\" not in self.data_path:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all(meta[\"relevant_clip_ids\"], meta[\"saliency_scores\"], ctx_l)\n else:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\n \"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n\n if 'qvhighlight' in self.data_path:\n model_inputs[\"relevant_clip_ids\"] = meta[\"relevant_clip_ids\"]\n model_inputs[\"vid\"] = meta[\"vid\"]\n model_inputs[\"qid\"] = meta[\"qid\"]\n return dict(meta=meta, model_inputs=model_inputs)\n\n def get_query(self, query):\n word_inds = torch.LongTensor(\n [self.vocab.stoi.get(w.lower(), 400000) for w in query.split()])\n return self.embedding(word_inds)\n\n def get_saliency_labels_sub_as_query(self, gt_window, duration, ctx_l, max_n=2):\n clip_len = duration / ctx_l\n gt_st = int(gt_window[0] / clip_len)\n gt_ed = max(0, min(int(gt_window[1] / clip_len), ctx_l) - 1)\n if gt_st > gt_ed:\n gt_st = gt_ed\n\n if gt_st != gt_ed:\n pos_clip_indices = random.sample(range(gt_st, gt_ed + 1), k=max_n)\n else:\n if self.dset_name == 'nlq':\n pos_clip_indices = [gt_st] * 2\n else:\n pos_clip_indices = [gt_st, gt_st]\n\n neg_pool = list(range(0, gt_st)) + list(range(gt_ed+1, ctx_l))\n try:\n neg_clip_indices = random.sample(neg_pool, k=max_n)\n except:\n neg_clip_indices = pos_clip_indices\n\n # For charades_sta\n score_array = np.zeros(ctx_l)\n score_array[gt_st:gt_ed + 1] = 1\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n\n def get_saliency_labels(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices\n\n def get_saliency_labels_all(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # score_array = [min(agg_scores[idx], ctx_l-1) for idx in range(ctx_l)]\n score_array = np.zeros(ctx_l)\n for idx in range(len(rel_clip_ids)):\n if rel_clip_ids[idx] >= ctx_l:\n score_array_new = np.zeros(ctx_l + 1)\n score_array_new[:ctx_l] = score_array\n score_array = score_array_new\n score_array[rel_clip_ids[idx]] = agg_scores[idx]\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_tvsum(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n agg_scores = np.sum(labels - np.ones_like(labels), axis=-1)[:ctx_l] # start from 1, so minus 1\n score_array = agg_scores / 80 * 12\n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_youtube(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n # Youtube-hl only have binary score\n agg_scores = np.array(labels)[:, 0] # (L, 1) --> (L, )\n score_array = agg_scores * 1\n \n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n \n def get_span_labels(self, windows, ctx_l):\n \"\"\"\n windows: list([st, ed]) in seconds. E.g. [[26, 36]], corresponding st_ed clip_indices [[13, 17]] (inclusive)\n Note a maximum of `self.max_windows` windows are used.\n returns Tensor of shape (#windows, 2), each row is [center, width] normalized by video length\n \"\"\"\n if len(windows) > self.max_windows:\n random.shuffle(windows)\n windows = windows[:self.max_windows]\n if self.span_loss_type == \"l1\":\n windows = torch.Tensor(windows) / (ctx_l * self.clip_len) # normalized windows in xx\n windows = span_xx_to_cxw(windows) # normalized windows in cxw\n elif self.span_loss_type == \"ce\":\n windows = torch.Tensor([\n [int(w[0] / self.clip_len), min(int(w[1] / self.clip_len), ctx_l) - 1]\n for w in windows]).long() # inclusive\n else:\n raise NotImplementedError\n return windows\n\n def _get_query_feat_by_qid(self, qid):\n if self.dset_name == 'tvsum':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid))) # 'token', 'text'\n return torch.from_numpy(q_feat['token'])\n # youtube-hl\n elif self.dset_name == 'youtube_uni':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid)))\n return torch.from_numpy(q_feat['last_hidden_state'])\n \n elif self.dset_name in ['tacos', 'nlq']:\n q_feat_path = join(self.q_feat_dir, f\"{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n else:\n # QVhighlight dataset\n q_feat_path = join(self.q_feat_dir, f\"qid{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n return torch.from_numpy(q_feat) # (D, ) or (Lq, D)\n\n def random_drop_rows(self, embeddings):\n \"\"\"randomly mask num_drop rows in embeddings to be zero.\n Args:\n embeddings: np.ndarray (L, D)\n \"\"\"\n num_drop_rows = round(len(embeddings) * self.txt_drop_ratio)\n if num_drop_rows > 0:\n row_indices = np.random.choice(\n len(embeddings), size=num_drop_rows, replace=False)\n embeddings[row_indices] = 0\n return embeddings\n\n def _get_video_feat_by_vid(self, vid):\n if self.dset_name == 'tvsum':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n _feat_path = join(_feat_dir, f\"{vid}_rgb.npy\")\n _feat_rgb = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n\n _feat_path = join(_feat_dir, f\"{vid}_opt.npy\")\n _feat_opt = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n _feat = np.concatenate([_feat_rgb, _feat_opt], axis=-1)\n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n elif self.dset_name == 'youtube_uni':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n # Only single npz files per directory\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list] # TODO do we need to cut the length over the min_len?\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n else:\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n return torch.from_numpy(v_feat) # (Lv, D)" }, { "identifier": "start_end_collate", "path": "cg_detr/start_end_dataset.py", "snippet": "def start_end_collate(batch):\n batch_meta = [e[\"meta\"] for e in batch] # seems no need to collate ?\n\n model_inputs_keys = batch[0][\"model_inputs\"].keys()\n batched_data = dict()\n for k in model_inputs_keys:\n if k == \"span_labels\":\n batched_data[k] = [dict(spans=e[\"model_inputs\"][\"span_labels\"]) for e in batch]\n continue\n if k in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n batched_data[k] = torch.LongTensor([e[\"model_inputs\"][k] for e in batch])\n continue\n if k == \"saliency_all_labels\":\n pad_data, mask_data = pad_sequences_1d([e[\"model_inputs\"][k] for e in batch], dtype=np.float32, fixed_length=None)\n batched_data[k] = torch.tensor(pad_data, dtype=torch.float32)\n continue\n if k == 'qid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n if k == 'vid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n batched_data[k] = pad_sequences_1d(\n [e[\"model_inputs\"][k] for e in batch], dtype=torch.float32, fixed_length=None)\n return batch_meta, batched_data" }, { "identifier": "prepare_batch_inputs", "path": "cg_detr/start_end_dataset.py", "snippet": "def prepare_batch_inputs(batched_model_inputs, device, non_blocking=False):\n model_inputs = dict(\n src_txt=batched_model_inputs[\"query_feat\"][0].to(device, non_blocking=non_blocking),\n src_txt_mask=batched_model_inputs[\"query_feat\"][1].to(device, non_blocking=non_blocking),\n src_vid=batched_model_inputs[\"video_feat\"][0].to(device, non_blocking=non_blocking),\n src_vid_mask=batched_model_inputs[\"video_feat\"][1].to(device, non_blocking=non_blocking),\n vid=batched_model_inputs[\"vid\"],\n qid=batched_model_inputs[\"qid\"],\n )\n targets = {}\n\n if \"span_labels\" in batched_model_inputs:\n targets[\"span_labels\"] = [\n dict(spans=e[\"spans\"].to(device, non_blocking=non_blocking))\n for e in batched_model_inputs[\"span_labels\"]\n ]\n if \"saliency_pos_labels\" in batched_model_inputs:\n for name in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n targets[name] = batched_model_inputs[name].to(device, non_blocking=non_blocking)\n\n if \"saliency_all_labels\" in batched_model_inputs:\n targets[\"saliency_all_labels\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets[\"relevant_clips\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets = None if len(targets) == 0 else targets\n return model_inputs, targets" }, { "identifier": "PostProcessorDETR", "path": "cg_detr/postprocessing_cg_detr.py", "snippet": "class PostProcessorDETR:\n def __init__(self, clip_length=2, min_ts_val=0, max_ts_val=150,\n min_w_l=2, max_w_l=70, move_window_method=\"center\",\n process_func_names=(\"clip_window_l\", \"clip_ts\", \"round_multiple\")):\n self.clip_length = clip_length\n self.min_ts_val = min_ts_val\n self.max_ts_val = max_ts_val\n self.min_w_l = min_w_l\n self.max_w_l = max_w_l\n self.move_window_method = move_window_method\n self.process_func_names = process_func_names\n self.name2func = dict(\n clip_ts=self.clip_min_max_timestamps,\n round_multiple=self.round_to_multiple_clip_lengths,\n clip_window_l=self.clip_window_lengths\n )\n\n def __call__(self, lines):\n processed_lines = []\n for line in tqdm(lines, desc=f\"convert to multiples of clip_length={self.clip_length}\"):\n windows_and_scores = torch.tensor(line[\"pred_relevant_windows\"])\n windows = windows_and_scores[:, :2]\n for func_name in self.process_func_names:\n windows = self.name2func[func_name](windows)\n line[\"pred_relevant_windows\"] = torch.cat(\n [windows, windows_and_scores[:, 2:3]], dim=1).tolist()\n line[\"pred_relevant_windows\"] = [e[:2] + [float(f\"{e[2]:.4f}\")] for e in line[\"pred_relevant_windows\"]]\n processed_lines.append(line)\n return processed_lines\n\n def clip_min_max_timestamps(self, windows):\n \"\"\"\n windows: (#windows, 2) torch.Tensor\n ensure timestamps for all windows is within [min_val, max_val], clip is out of boundaries.\n \"\"\"\n return torch.clamp(windows, min=self.min_ts_val, max=self.max_ts_val)\n\n def round_to_multiple_clip_lengths(self, windows):\n \"\"\"\n windows: (#windows, 2) torch.Tensor\n ensure the final window timestamps are multiples of `clip_length`\n \"\"\"\n return torch.round(windows / self.clip_length) * self.clip_length\n\n def clip_window_lengths(self, windows):\n \"\"\"\n windows: (#windows, 2) np.ndarray\n ensure the final window duration are within [self.min_w_l, self.max_w_l]\n \"\"\"\n window_lengths = windows[:, 1] - windows[:, 0]\n small_rows = window_lengths < self.min_w_l\n if torch.sum(small_rows) > 0:\n windows = self.move_windows(\n windows, small_rows, self.min_w_l, move_method=self.move_window_method)\n large_rows = window_lengths > self.max_w_l\n if torch.sum(large_rows) > 0:\n windows = self.move_windows(\n windows, large_rows, self.max_w_l, move_method=self.move_window_method)\n return windows\n\n @classmethod\n def move_windows(cls, windows, row_selector, new_length, move_method=\"left\"):\n \"\"\"\n Args:\n windows:\n row_selector:\n new_length:\n move_method: str,\n left: keep left unchanged\n center: keep center unchanged\n right: keep right unchanged\n\n Returns:\n\n \"\"\"\n # import ipdb;\n # ipdb.set_trace()\n if move_method == \"left\":\n windows[row_selector, 1] = windows[row_selector, 0] + new_length\n elif move_method == \"right\":\n windows[row_selector, 0] = windows[row_selector, 1] - new_length\n elif move_method == \"center\":\n center = (windows[row_selector, 1] + windows[row_selector, 0]) / 2.\n windows[row_selector, 0] = center - new_length / 2.\n windows[row_selector, 1] = center + new_length / 2.\n return windows" }, { "identifier": "eval_submission", "path": "standalone_eval/eval.py", "snippet": "def eval_submission(submission, ground_truth, verbose=True, match_number=True):\n \"\"\"\n Args:\n submission: list(dict), each dict is {\n qid: str,\n query: str,\n vid: str,\n pred_relevant_windows: list([st, ed]),\n pred_saliency_scores: list(float), len == #clips in video.\n i.e., each clip in the video will have a saliency score.\n }\n ground_truth: list(dict), each dict is {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17]\n \"saliency_scores\": [[4, 4, 2], [3, 4, 2], [2, 2, 3], [2, 2, 2], [0, 1, 3]]\n each sublist corresponds to one clip in relevant_clip_ids.\n The 3 elements in the sublist are scores from 3 different workers. The\n scores are in [0, 1, 2, 3, 4], meaning [Very Bad, ..., Good, Very Good]\n }\n verbose:\n match_number:\n\n Returns:\n\n \"\"\"\n pred_qids = set([e[\"qid\"] for e in submission])\n gt_qids = set([e[\"qid\"] for e in ground_truth])\n if match_number:\n assert pred_qids == gt_qids, \\\n f\"qids in ground_truth and submission must match. \" \\\n f\"use `match_number=False` if you wish to disable this check\"\n else: # only leave the items that exists in both submission and ground_truth\n shared_qids = pred_qids.intersection(gt_qids)\n submission = [e for e in submission if e[\"qid\"] in shared_qids]\n ground_truth = [e for e in ground_truth if e[\"qid\"] in shared_qids]\n\n eval_metrics = {}\n eval_metrics_brief = OrderedDict()\n if \"pred_relevant_windows\" in submission[0]:\n moment_ret_scores = eval_moment_retrieval(\n submission, ground_truth, verbose=verbose)\n eval_metrics.update(moment_ret_scores)\n moment_ret_scores_brief = {\n \"MR-full-mAP\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"average\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"0.5\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"0.75\"],\n \"MR-short-mAP\": moment_ret_scores[\"short\"][\"MR-mAP\"][\"average\"],\n \"MR-middle-mAP\": moment_ret_scores[\"middle\"][\"MR-mAP\"][\"average\"],\n \"MR-long-mAP\": moment_ret_scores[\"long\"][\"MR-mAP\"][\"average\"],\n \"MR-full-mIoU\": moment_ret_scores[\"full\"][\"MR-mIoU\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.3\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.5\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.7\"],\n }\n eval_metrics_brief.update(\n sorted([(k, v) for k, v in moment_ret_scores_brief.items()], key=lambda x: x[0]))\n\n if \"pred_saliency_scores\" in submission[0]:\n highlight_det_scores = eval_highlight(\n submission, ground_truth, verbose=verbose)\n eval_metrics.update(highlight_det_scores)\n highlight_det_scores_brief = dict([\n (f\"{k}-{sub_k.split('-')[1]}\", v[sub_k])\n for k, v in highlight_det_scores.items() for sub_k in v])\n eval_metrics_brief.update(highlight_det_scores_brief)\n\n # sort by keys\n final_eval_metrics = OrderedDict()\n final_eval_metrics[\"brief\"] = eval_metrics_brief\n final_eval_metrics.update(sorted([(k, v) for k, v in eval_metrics.items()], key=lambda x: x[0]))\n return final_eval_metrics" }, { "identifier": "save_jsonl", "path": "utils/basic_utils.py", "snippet": "def save_jsonl(data, filename):\n \"\"\"data is a list\"\"\"\n with open(filename, \"w\") as f:\n f.write(\"\\n\".join([json.dumps(e) for e in data]))" }, { "identifier": "save_json", "path": "utils/basic_utils.py", "snippet": "def save_json(data, filename, save_pretty=False, sort_keys=False):\n with open(filename, \"w\") as f:\n if save_pretty:\n f.write(json.dumps(data, indent=4, sort_keys=sort_keys))\n else:\n json.dump(data, f)" }, { "identifier": "temporal_nms", "path": "utils/temporal_nms.py", "snippet": "def temporal_nms(predictions, nms_thd, max_after_nms=100):\n \"\"\"\n Args:\n predictions: list(sublist), each sublist is [st (float), ed(float), score (float)],\n note larger scores are better and are preserved. For metrics that are better when smaller,\n please convert to its negative, e.g., convert distance to negative distance.\n nms_thd: float in [0, 1]\n max_after_nms:\n Returns:\n predictions_after_nms: list(sublist), each sublist is [st (float), ed(float), score (float)]\n References:\n https://github.com/wzmsltw/BSN-boundary-sensitive-network/blob/7b101fc5978802aa3c95ba5779eb54151c6173c6/Post_processing.py#L42\n \"\"\"\n if len(predictions) == 1: # only has one prediction, no need for nms\n return predictions\n\n predictions = sorted(predictions, key=lambda x: x[2], reverse=True) # descending order\n\n tstart = [e[0] for e in predictions]\n tend = [e[1] for e in predictions]\n tscore = [e[2] for e in predictions]\n rstart = []\n rend = []\n rscore = []\n while len(tstart) > 1 and len(rscore) < max_after_nms: # max 100 after nms\n idx = 1\n while idx < len(tstart): # compare with every prediction in the list.\n if compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]) > nms_thd:\n # rm highly overlapped lower score entries.\n tstart.pop(idx)\n tend.pop(idx)\n tscore.pop(idx)\n # print(\"--------------------------------\")\n # print(compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]))\n # print([tstart[0], tend[0]], [tstart[idx], tend[idx]])\n # print(tstart.pop(idx), tend.pop(idx), tscore.pop(idx))\n else:\n # move to next\n idx += 1\n rstart.append(tstart.pop(0))\n rend.append(tend.pop(0))\n rscore.append(tscore.pop(0))\n\n if len(rscore) < max_after_nms and len(tstart) >= 1: # add the last, possibly empty.\n rstart.append(tstart.pop(0))\n rend.append(tend.pop(0))\n rscore.append(tscore.pop(0))\n\n predictions_after_nms = [[st, ed, s] for s, st, ed in zip(rscore, rstart, rend)]\n return predictions_after_nms" } ]
import pprint import numpy as np import os import torch import torch.nn.functional as F import torch.backends.cudnn as cudnn import logging from tqdm import tqdm, trange from collections import OrderedDict, defaultdict from utils.basic_utils import AverageMeter from torch.utils.data import DataLoader from cg_detr.config import TestOptions from cg_detr.model import build_model from cg_detr.span_utils import span_cxw_to_xx from cg_detr.start_end_dataset import StartEndDataset, start_end_collate, prepare_batch_inputs from cg_detr.postprocessing_cg_detr import PostProcessorDETR from standalone_eval.eval import eval_submission from utils.basic_utils import save_jsonl, save_json from utils.temporal_nms import temporal_nms from collections import OrderedDict from sys import argv
10,919
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms): mr_res_after_nms = [] for e in mr_res: e["pred_relevant_windows"] = temporal_nms( e["pred_relevant_windows"][:max_before_nms], nms_thd=nms_thd, max_after_nms=max_after_nms ) mr_res_after_nms.append(e) return mr_res_after_nms def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename): # IOU_THDS = (0.5, 0.7) logger.info("Saving/Evaluating before nms results") submission_path = os.path.join(opt.results_dir, save_submission_filename) save_jsonl(submission, submission_path) if opt.eval_split_name in ["val"]: # since test_public has no GT metrics = eval_submission( submission, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_path = submission_path.replace(".jsonl", "_metrics.json")
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms): mr_res_after_nms = [] for e in mr_res: e["pred_relevant_windows"] = temporal_nms( e["pred_relevant_windows"][:max_before_nms], nms_thd=nms_thd, max_after_nms=max_after_nms ) mr_res_after_nms.append(e) return mr_res_after_nms def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename): # IOU_THDS = (0.5, 0.7) logger.info("Saving/Evaluating before nms results") submission_path = os.path.join(opt.results_dir, save_submission_filename) save_jsonl(submission, submission_path) if opt.eval_split_name in ["val"]: # since test_public has no GT metrics = eval_submission( submission, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_path = submission_path.replace(".jsonl", "_metrics.json")
save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False)
10
2023-11-10 12:45:25+00:00
16k
dazhangyu123/ACMIL
Step1_create_patches_fp.py
[ { "identifier": "WholeSlideImage", "path": "wsi_core/WholeSlideImage.py", "snippet": "class WholeSlideImage(object):\n def __init__(self, path):\n\n \"\"\"\n Args:\n path (str): fullpath to WSI file\n \"\"\"\n\n# self.name = \".\".join(path.split(\"/\")[-1].split('.')[:-1])\n self.name = os.path.splitext(os.path.basename(path))[0]\n # pdb.set_trace()\n try:\n self.wsi = openslide.open_slide(path)\n except:\n self.wsi = kfbslide.open_kfbslide(path)\n # self.wsi = openSlide(path)\n # pdb.set_trace()\n self.level_downsamples = self._assertLevelDownsamples()\n self.level_dim = self.wsi.level_dimensions\n\n self.contours_tissue = None\n self.contours_tumor = None\n self.hdf5_file = None\n\n def getOpenSlide(self):\n return self.wsi\n\n def initXML(self, xml_path):\n def _createContour(coord_list):\n return np.array([[[int(float(coord.attributes['X'].value)), \n int(float(coord.attributes['Y'].value))]] for coord in coord_list], dtype = 'int32')\n\n xmldoc = minidom.parse(xml_path)\n annotations = [anno.getElementsByTagName('Coordinate') for anno in xmldoc.getElementsByTagName('Annotation')]\n self.contours_tumor = [_createContour(coord_list) for coord_list in annotations]\n self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True)\n\n def initTxt(self,annot_path):\n def _create_contours_from_dict(annot):\n all_cnts = []\n for idx, annot_group in enumerate(annot):\n contour_group = annot_group['coordinates']\n if annot_group['type'] == 'Polygon':\n for idx, contour in enumerate(contour_group):\n contour = np.array(contour).astype(np.int32).reshape(-1,1,2)\n all_cnts.append(contour) \n\n else:\n for idx, sgmt_group in enumerate(contour_group):\n contour = []\n for sgmt in sgmt_group:\n contour.extend(sgmt)\n contour = np.array(contour).astype(np.int32).reshape(-1,1,2) \n all_cnts.append(contour) \n\n return all_cnts\n \n with open(annot_path, \"r\") as f:\n annot = f.read()\n annot = eval(annot)\n self.contours_tumor = _create_contours_from_dict(annot)\n self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True)\n\n def initSegmentation(self, mask_file):\n # load segmentation results from pickle file\n import pickle\n asset_dict = load_pkl(mask_file)\n self.holes_tissue = asset_dict['holes']\n self.contours_tissue = asset_dict['tissue']\n\n def saveSegmentation(self, mask_file):\n # save segmentation results using pickle\n asset_dict = {'holes': self.holes_tissue, 'tissue': self.contours_tissue}\n save_pkl(mask_file, asset_dict)\n\n def segmentTissue(self, seg_level=0, sthresh=20, sthresh_up = 255, mthresh=7, close = 0, use_otsu=False, \n filter_params={'a_t':100}, ref_patch_size=512, exclude_ids=[], keep_ids=[]):\n \"\"\"\n Segment the tissue via HSV -> Median thresholding -> Binary threshold\n \"\"\"\n \n def _filter_contours(contours, hierarchy, filter_params):\n \"\"\"\n Filter contours by: area.\n \"\"\"\n filtered = []\n\n # find indices of foreground contours (parent == -1)\n hierarchy_1 = np.flatnonzero(hierarchy[:,1] == -1)\n all_holes = []\n \n # loop through foreground contour indices\n for cont_idx in hierarchy_1:\n # actual contour\n # pdb.set_trace()\n\n cont = contours[cont_idx]\n # indices of holes contained in this contour (children of parent contour)\n holes = np.flatnonzero(hierarchy[:, 1] == cont_idx)\n # take contour area (includes holes)\n a = cv2.contourArea(cont)\n # calculate the contour area of each hole\n hole_areas = [cv2.contourArea(contours[hole_idx]) for hole_idx in holes]\n # actual area of foreground contour region\n a = a - np.array(hole_areas).sum()\n if a == 0: continue\n # print(tuple((filter_params['a_t'],)),tuple((a,)))\n if tuple((filter_params['a_t'],)) < tuple((a,)): \n filtered.append(cont_idx)\n all_holes.append(holes)\n\n\n foreground_contours = [contours[cont_idx] for cont_idx in filtered]\n \n hole_contours = []\n\n for hole_ids in all_holes:\n unfiltered_holes = [contours[idx] for idx in hole_ids ]\n unfilered_holes = sorted(unfiltered_holes, key=cv2.contourArea, reverse=True)\n # take max_n_holes largest holes by area\n unfilered_holes = unfilered_holes[:filter_params['max_n_holes']]\n filtered_holes = []\n \n # filter these holes\n for hole in unfilered_holes:\n if cv2.contourArea(hole) > filter_params['a_h']:\n filtered_holes.append(hole)\n\n hole_contours.append(filtered_holes)\n\n return foreground_contours, hole_contours\n # pdb.set_trace()\n try:\n img = np.array(self.wsi.read_region((0,0), seg_level, self.level_dim[seg_level]))\n except:\n print('failed read region')\n img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # Convert to HSV space\n img_med = cv2.medianBlur(img_hsv[:,:,1], mthresh) # Apply median blurring\n \n \n # Thresholding\n # if use_otsu:\n if False:\n otsu_thresh, img_otsu = cv2.threshold(img_med, 0, sthresh_up, cv2.THRESH_OTSU+cv2.THRESH_BINARY)\n # adjust_thresh = max(sthresh,otsu_thresh-20)\n adjust_thresh = otsu_thresh\n _, img_otsu = cv2.threshold(img_med, adjust_thresh, sthresh_up, cv2.THRESH_BINARY)\n print('otsu_threshold:',otsu_thresh,'adjust_thresh:',adjust_thresh)\n else:\n print('not otsu')\n _, img_otsu = cv2.threshold(img_med, sthresh, sthresh_up, cv2.THRESH_BINARY)\n # pdb.set_trace()\n ## hed operas\n # img_hed = rgb2hed(cv2.cvtColor(img, cv2.COLOR_RGBA2RGB))\n # # img_e = hed2rgb(np.stack((img_hed[:, :, 1], img_hed[:, :, 1], img_hed[:, :, 1]), axis=-1))\n # img_h = hed2rgb(np.stack((img_hed[:, :, 0], np.zeros_like(img_hed[:, :, 0]), np.zeros_like(img_hed[:, :, 0])), axis=-1))\n # img_h = (img_h*255).astype(np.uint8)\n # img_h_gray = 255-cv2.medianBlur(cv2.cvtColor(img_h, cv2.COLOR_BGR2GRAY),mthresh)\n # # _, img_otsu = cv2.threshold(img_h_gray, sthresh, sthresh_up, cv2.THRESH_BINARY)\n # otsu_thresh, img_otsu = cv2.threshold(img_h_gray, 0, sthresh_up, cv2.THRESH_OTSU + cv2.THRESH_BINARY)\n # adjust_thresh = max(sthresh,otsu_thresh-20)\n # _, img_otsu = cv2.threshold(img_h_gray, adjust_thresh, sthresh_up, cv2.THRESH_BINARY)\n\n # img_d = hed2rgb(np.stack((img_hed[:, :, 2], img_hed[:, :, 2], img_hed[:, :, 2]), axis=-1))\n # filter this?\n # Morphological closing\n if close > 0:\n kernel = np.ones((close, close), np.uint8)\n img_otsu = cv2.morphologyEx(img_otsu, cv2.MORPH_CLOSE, kernel) \n\n scale = self.level_downsamples[seg_level]\n scaled_ref_patch_area = int(ref_patch_size**2 / (scale[0] * scale[1]))\n print('scaled_ref_patch_area',scaled_ref_patch_area)\n print('ref_patch_size',ref_patch_size)\n print('scale',scale,'seg_level',seg_level)\n\n filter_params = filter_params.copy()\n filter_params['a_t'] = filter_params['a_t'] * scaled_ref_patch_area\n filter_params['a_h'] = filter_params['a_h'] * scaled_ref_patch_area\n \n # Find and filter contours\n contours, hierarchy = cv2.findContours(img_otsu, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # Find contours \n hierarchy = np.squeeze(hierarchy, axis=(0,))[:, 2:]\n # pdb.set_trace()\n if filter_params: foreground_contours, hole_contours = _filter_contours(contours, hierarchy, filter_params) # Necessary for filtering out artifacts\n\n self.contours_tissue = self.scaleContourDim(foreground_contours, scale)\n self.holes_tissue = self.scaleHolesDim(hole_contours, scale)\n\n #exclude_ids = [0,7,9]\n if len(keep_ids) > 0:\n contour_ids = set(keep_ids) - set(exclude_ids)\n else:\n contour_ids = set(np.arange(len(self.contours_tissue))) - set(exclude_ids)\n\n self.contours_tissue = [self.contours_tissue[i] for i in contour_ids]\n self.holes_tissue = [self.holes_tissue[i] for i in contour_ids]\n\n def visWSI(self, vis_level=0, color = (0,255,0), hole_color = (0,0,255), annot_color=(255,0,0), \n line_thickness=250, max_size=None, top_left=None, bot_right=None, custom_downsample=1, view_slide_only=False,\n number_contours=False, seg_display=True, annot_display=True):\n \n downsample = self.level_downsamples[vis_level]\n scale = [1/downsample[0], 1/downsample[1]]\n # pdb.set_trace()\n if top_left is not None and bot_right is not None:\n top_left = tuple(top_left)\n bot_right = tuple(bot_right)\n w, h = tuple((np.array(bot_right) * scale).astype(int) - (np.array(top_left) * scale).astype(int))\n region_size = (w, h)\n else:\n top_left = (0,0)\n region_size = self.level_dim[vis_level]\n img = self.wsi.read_region(top_left, vis_level, region_size)\n try:\n img = np.array(img.convert(\"RGB\"))\n except:\n pass\n\n # view_slide_only= True\n if not view_slide_only:\n offset = tuple(-(np.array(top_left) * scale).astype(int))\n line_thickness = int(line_thickness * math.sqrt(scale[0] * scale[1]))\n if self.contours_tissue is not None and seg_display:\n if not number_contours:\n cv2.drawContours(img, self.scaleContourDim(self.contours_tissue, scale), \n -1, color, line_thickness, lineType=cv2.LINE_8, offset=offset)\n\n else: # add numbering to each contour\n for idx, cont in enumerate(self.contours_tissue):\n contour = np.array(self.scaleContourDim(cont, scale))\n M = cv2.moments(contour)\n cX = int(M[\"m10\"] / (M[\"m00\"] + 1e-9))\n cY = int(M[\"m01\"] / (M[\"m00\"] + 1e-9))\n # draw the contour and put text next to center\n cv2.drawContours(img, [contour], -1, color, line_thickness, lineType=cv2.LINE_8, offset=offset)\n cv2.putText(img, \"{}\".format(idx), (cX, cY),\n cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 10)\n\n for holes in self.holes_tissue:\n cv2.drawContours(img, self.scaleContourDim(holes, scale), \n -1, hole_color, line_thickness, lineType=cv2.LINE_8)\n \n if self.contours_tumor is not None and annot_display:\n cv2.drawContours(img, self.scaleContourDim(self.contours_tumor, scale), \n -1, annot_color, line_thickness, lineType=cv2.LINE_8, offset=offset)\n \n img = Image.fromarray(img)\n \n w, h = img.size\n if custom_downsample > 1:\n img = img.resize((int(w/custom_downsample), int(h/custom_downsample)))\n\n if max_size is not None and (w > max_size or h > max_size):\n resizeFactor = max_size/w if w > h else max_size/h\n img = img.resize((int(w*resizeFactor), int(h*resizeFactor)))\n \n return img\n\n\n def createPatches_bag_hdf5(self, save_path, patch_level=0, patch_size=256, step_size=256, save_coord=True, **kwargs):\n contours = self.contours_tissue\n contour_holes = self.holes_tissue\n\n print(\"Creating patches for: \", self.name, \"...\",)\n elapsed = time.time()\n for idx, cont in enumerate(contours):\n patch_gen = self._getPatchGenerator(cont, idx, patch_level, save_path, patch_size, step_size, **kwargs)\n \n if self.hdf5_file is None:\n try:\n first_patch = next(patch_gen)\n\n # empty contour, continue\n except StopIteration:\n continue\n\n file_path = initialize_hdf5_bag(first_patch, save_coord=save_coord)\n self.hdf5_file = file_path\n\n for patch in patch_gen:\n savePatchIter_bag_hdf5(patch)\n\n return self.hdf5_file\n\n def createTopkPatches_bag_hdf5(self, save_path, target_coords, patch_level=1, patch_size=256, step_size=256, save_coord=True,\n **kwargs):\n print(\"Creating patches for: \", self.name, \"...\", )\n topk_list = []\n for idx, coord in enumerate(target_coords):\n x, y = coord\n patch_PIL = self.wsi.read_region((x, y), patch_level, (patch_size, patch_size)).convert('RGB')\n topk_list.append(np.array(patch_PIL))\n\n # save_dict = {'patches':np.asarray(topk_list),'coords':target_coords}\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n saved_path = os.path.join(save_path, self.name + '.h5')\n if not os.path.exists(saved_path):\n f=h5py.File(saved_path,'w')\n f.create_dataset('patches',data=np.asarray(topk_list))\n f.create_dataset('coords',data=target_coords)\n f.close()\n\n return\n\n def _getPatchGenerator(self, cont, cont_idx, patch_level, save_path, patch_size=256, step_size=256, custom_downsample=1,\n white_black=True, white_thresh=15, black_thresh=50, contour_fn='four_pt', use_padding=True):\n start_x, start_y, w, h = cv2.boundingRect(cont) if cont is not None else (0, 0, self.level_dim[patch_level][0], self.level_dim[patch_level][1])\n print(\"Bounding Box:\", start_x, start_y, w, h)\n print(\"Contour Area:\", cv2.contourArea(cont))\n \n if custom_downsample > 1:\n assert custom_downsample == 2 \n target_patch_size = patch_size\n patch_size = target_patch_size * 2\n step_size = step_size * 2\n print(\"Custom Downsample: {}, Patching at {} x {}, But Final Patch Size is {} x {}\".format(custom_downsample, patch_size, patch_size, \n target_patch_size, target_patch_size))\n\n patch_downsample = (int(self.level_downsamples[patch_level][0]), int(self.level_downsamples[patch_level][1]))\n ref_patch_size = (patch_size*patch_downsample[0], patch_size*patch_downsample[1])\n \n step_size_x = step_size * patch_downsample[0]\n step_size_y = step_size * patch_downsample[1]\n \n if isinstance(contour_fn, str):\n if contour_fn == 'four_pt':\n cont_check_fn = isInContourV3_Easy(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)\n elif contour_fn == 'four_pt_hard':\n cont_check_fn = isInContourV3_Hard(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)\n elif contour_fn == 'center':\n cont_check_fn = isInContourV2(contour=cont, patch_size=ref_patch_size[0])\n elif contour_fn == 'basic':\n cont_check_fn = isInContourV1(contour=cont)\n else:\n raise NotImplementedError\n else:\n assert isinstance(contour_fn, Contour_Checking_fn)\n cont_check_fn = contour_fn\n\n img_w, img_h = self.level_dim[0]\n if use_padding:\n stop_y = start_y+h\n stop_x = start_x+w\n else:\n stop_y = min(start_y+h, img_h-ref_patch_size[1])\n stop_x = min(start_x+w, img_w-ref_patch_size[0])\n\n count = 0\n for y in range(start_y, stop_y, step_size_y):\n for x in range(start_x, stop_x, step_size_x):\n\n if not self.isInContours(cont_check_fn, (x,y), self.holes_tissue[cont_idx], ref_patch_size[0]): #point not inside contour and its associated holes\n continue \n \n count+=1\n patch_PIL = self.wsi.read_region((x,y), patch_level, (patch_size, patch_size)).convert('RGB')\n if custom_downsample > 1:\n patch_PIL = patch_PIL.resize((target_patch_size, target_patch_size))\n \n if white_black:\n if isBlackPatch(np.array(patch_PIL), rgbThresh=black_thresh) or isWhitePatch(np.array(patch_PIL), satThresh=white_thresh): \n continue\n\n patch_info = {'x':x // (patch_downsample[0] * custom_downsample), 'y':y // (patch_downsample[1] * custom_downsample), 'cont_idx':cont_idx, 'patch_level':patch_level, \n 'downsample': self.level_downsamples[patch_level], 'downsampled_level_dim': tuple(np.array(self.level_dim[patch_level])//custom_downsample), 'level_dim': self.level_dim[patch_level],\n 'patch_PIL':patch_PIL, 'name':self.name, 'save_path':save_path}\n\n yield patch_info\n\n \n print(\"patches extracted: {}\".format(count))\n\n @staticmethod\n def isInHoles(holes, pt, patch_size):\n for hole in holes:\n print((pt[0]+patch_size/2, pt[1]+patch_size/2))\n # pdb.set_trace()\n if cv2.pointPolygonTest(hole, (pt[0]+patch_size/2, pt[1]+patch_size/2), False) > 0:\n return 1\n \n return 0\n\n @staticmethod\n def isInContours(cont_check_fn, pt, holes=None, patch_size=256):\n if cont_check_fn(pt):\n if holes is not None:\n return not WholeSlideImage.isInHoles(holes, pt, patch_size)\n else:\n return 1\n return 0\n \n @staticmethod\n def scaleContourDim(contours, scale):\n return [np.array(cont * scale, dtype='int32') for cont in contours]\n\n @staticmethod\n def scaleHolesDim(contours, scale):\n return [[np.array(hole * scale, dtype = 'int32') for hole in holes] for holes in contours]\n\n def _assertLevelDownsamples(self):\n level_downsamples = []\n dim_0 = self.wsi.level_dimensions[0]\n \n for downsample, dim in zip(self.wsi.level_downsamples, self.wsi.level_dimensions):\n try:\n estimated_downsample = (dim_0[0]/float(dim[0]), dim_0[1]/float(dim[1]))\n except:\n continue\n level_downsamples.append(estimated_downsample) if estimated_downsample != (downsample, downsample) else level_downsamples.append((downsample, downsample))\n \n return level_downsamples\n\n def process_contours(self, save_path, patch_level=0, patch_size=256, step_size=256, **kwargs):\n save_path_hdf5 = os.path.join(save_path, str(self.name) + '.h5')\n print(\"Creating patches for: \", self.name, \"...\",)\n elapsed = time.time()\n n_contours = len(self.contours_tissue)\n print(\"Total number of contours to process: \", n_contours)\n fp_chunk_size = math.ceil(n_contours * 0.05)\n init = True\n for idx, cont in enumerate(self.contours_tissue):\n if (idx + 1) % fp_chunk_size == fp_chunk_size:\n print('Processing contour {}/{}'.format(idx, n_contours))\n # pdb.set_trace()\n asset_dict, attr_dict = self.process_contour(cont, self.holes_tissue[idx], patch_level, save_path, patch_size, step_size, **kwargs)\n if len(asset_dict) > 0:\n if init:\n save_hdf5(save_path_hdf5, asset_dict, attr_dict, mode='w')\n init = False\n else:\n save_hdf5(save_path_hdf5, asset_dict, mode='a')\n\n return self.hdf5_file\n\n\n def process_contour(self, cont, contour_holes, patch_level, save_path, patch_size = 256, step_size = 256,\n contour_fn='four_pt', use_padding=True, top_left=None, bot_right=None):\n # pdb.set_trace()\n start_x, start_y, w, h = cv2.boundingRect(cont) if cont is not None else (0, 0, self.level_dim[patch_level][0], self.level_dim[patch_level][1])\n\n patch_downsample = (int(self.level_downsamples[patch_level][0]), int(self.level_downsamples[patch_level][1]))\n ref_patch_size = (patch_size*patch_downsample[0], patch_size*patch_downsample[1])\n \n img_w, img_h = self.level_dim[0]\n if use_padding:\n stop_y = start_y+h\n stop_x = start_x+w\n else:\n stop_y = min(start_y+h, img_h-ref_patch_size[1]+1)\n stop_x = min(start_x+w, img_w-ref_patch_size[0]+1)\n \n print(\"Bounding Box:\", start_x, start_y, w, h)\n print(\"Contour Area:\", cv2.contourArea(cont))\n\n if bot_right is not None:\n stop_y = min(bot_right[1], stop_y)\n stop_x = min(bot_right[0], stop_x)\n if top_left is not None:\n start_y = max(top_left[1], start_y)\n start_x = max(top_left[0], start_x)\n\n if bot_right is not None or top_left is not None:\n w, h = stop_x - start_x, stop_y - start_y\n if w <= 0 or h <= 0:\n print(\"Contour is not in specified ROI, skip\")\n return {}, {}\n else:\n print(\"Adjusted Bounding Box:\", start_x, start_y, w, h)\n \n if isinstance(contour_fn, str):\n if contour_fn == 'four_pt':\n cont_check_fn = isInContourV3_Easy(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)\n elif contour_fn == 'four_pt_hard':\n cont_check_fn = isInContourV3_Hard(contour=cont, patch_size=ref_patch_size[0], center_shift=0.5)\n elif contour_fn == 'center':\n cont_check_fn = isInContourV2(contour=cont, patch_size=ref_patch_size[0])\n elif contour_fn == 'basic':\n cont_check_fn = isInContourV1(contour=cont)\n else:\n raise NotImplementedError\n else:\n assert isinstance(contour_fn, Contour_Checking_fn)\n cont_check_fn = contour_fn\n\n \n step_size_x = step_size * patch_downsample[0]\n step_size_y = step_size * patch_downsample[1]\n\n x_range = np.arange(start_x, stop_x, step=step_size_x)\n y_range = np.arange(start_y, stop_y, step=step_size_y)\n x_coords, y_coords = np.meshgrid(x_range, y_range, indexing='ij')\n coord_candidates = np.array([x_coords.flatten(), y_coords.flatten()]).transpose()\n\n num_workers = mp.cpu_count()\n if num_workers > 4:\n num_workers = 4\n pool = mp.Pool(num_workers)\n # pdb.set_trace()\n # iterable = [(coord, contour_holes, ref_patch_size[0], cont_check_fn) for coord in coord_candidates]\n # iter_patch_label = [(coord, self.contours_tumor, ref_patch_size[0], cont_check_fn) for coord in coord_candidates]\n iterable = [[coord, contour_holes, ref_patch_size[0], cont_check_fn] for coord in coord_candidates]\n iter_patch_label = [[coord, self.contours_tumor, ref_patch_size[0], cont_check_fn] for coord in coord_candidates]\n\n results = pool.starmap(WholeSlideImage.process_coord_candidate, iterable)\n labels = pool.starmap(WholeSlideImage.process_coord_candidate, iter_patch_label)\n pool.close()\n\n final_results = []\n final_labels = []\n for res_index in range(len(results)):\n if results[res_index] is not None:\n final_results.append(results[res_index])\n if labels[res_index] is None:\n final_labels.append(1)\n else:\n final_labels.append(0)\n # pdb.set_trace()\n\n # results = np.array([result for result in results if result is not None])\n results = np.asarray(final_results)\n labels = np.asarray(final_labels)\n # print('Extracted {} coordinates'.format(len(results)))\n\n if len(results)>1:\n asset_dict = {'coords' : results,\n 'labels': labels}\n # pdb.set_trace()\n print('patch_shape',results.shape)\n attr = {'patch_size' : patch_size, # To be considered...\n 'patch_level' : patch_level,\n 'downsample': self.level_downsamples[patch_level],\n 'downsampled_level_dim' : tuple(np.array(self.level_dim[patch_level])),\n 'level_dim': self.level_dim[patch_level],\n 'name': self.name,\n 'save_path': save_path}\n\n attr_dict = { 'coords' : attr}\n return asset_dict, attr_dict\n\n else:\n return {}, {}\n\n @staticmethod\n def process_coord_candidate(coord, contour_holes, ref_patch_size, cont_check_fn):\n if WholeSlideImage.isInContours(cont_check_fn, coord, contour_holes, ref_patch_size):\n return coord\n else:\n return None\n\n def visHeatmap(self, scores, coords, vis_level=-1, \n top_left=None, bot_right=None,\n patch_size=(256, 256), \n blank_canvas=False, canvas_color=(220, 20, 50), alpha=0.4, \n blur=False, overlap=0.0, \n segment=True, use_holes=True,\n convert_to_percentiles=False, \n binarize=False, thresh=0.5,\n max_size=None,\n custom_downsample = 1,\n cmap='coolwarm'):\n\n \"\"\"\n Args:\n scores (numpy array of float): Attention scores \n coords (numpy array of int, n_patches x 2): Corresponding coordinates (relative to lvl 0)\n vis_level (int): WSI pyramid level to visualize\n patch_size (tuple of int): Patch dimensions (relative to lvl 0)\n blank_canvas (bool): Whether to use a blank canvas to draw the heatmap (vs. using the original slide)\n canvas_color (tuple of uint8): Canvas color\n alpha (float [0, 1]): blending coefficient for overlaying heatmap onto original slide\n blur (bool): apply gaussian blurring\n overlap (float [0 1]): percentage of overlap between neighboring patches (only affect radius of blurring)\n segment (bool): whether to use tissue segmentation contour (must have already called self.segmentTissue such that \n self.contours_tissue and self.holes_tissue are not None\n use_holes (bool): whether to also clip out detected tissue cavities (only in effect when segment == True)\n convert_to_percentiles (bool): whether to convert attention scores to percentiles\n binarize (bool): only display patches > threshold\n threshold (float): binarization threshold\n max_size (int): Maximum canvas size (clip if goes over)\n custom_downsample (int): additionally downscale the heatmap by specified factor\n cmap (str): name of matplotlib colormap to use\n \"\"\"\n\n if vis_level < 0:\n vis_level = self.wsi.get_best_level_for_downsample(32)\n # pdb.set_trace()\n downsample = self.level_downsamples[vis_level]\n scale = [1/downsample[0], 1/downsample[1]] # Scaling from 0 to desired level\n if len(scores.shape) == 2:\n scores = scores.flatten()\n\n if binarize:\n if thresh < 0:\n threshold = 1.0/len(scores)\n \n else:\n threshold = thresh\n \n else:\n threshold = 0.0\n\n ##### calculate size of heatmap and filter coordinates/scores outside specified bbox region #####\n if top_left is not None and bot_right is not None:\n scores, coords = screen_coords(scores, coords, top_left, bot_right)\n coords = coords - top_left\n top_left = tuple(top_left)\n bot_right = tuple(bot_right)\n w, h = tuple((np.array(bot_right) * scale).astype(int) - (np.array(top_left) * scale).astype(int))\n region_size = (w, h)\n\n else:\n region_size = self.level_dim[vis_level]\n top_left = (0,0)\n bot_right = self.level_dim[0]\n w, h = region_size\n\n patch_size = np.ceil(np.array(patch_size) * np.array(scale)).astype(int)\n coords = np.ceil(coords * np.array(scale)).astype(int)\n \n print('\\ncreating heatmap for: ')\n print('top_left: ', top_left, 'bot_right: ', bot_right)\n print('w: {}, h: {}'.format(w, h))\n print('scaled patch size: ', patch_size)\n\n ###### normalize filtered scores ######\n if convert_to_percentiles:\n scores = to_percentiles(scores) \n\n scores /= 100\n \n ######## calculate the heatmap of raw attention scores (before colormap) \n # by accumulating scores over overlapped regions ######\n \n # heatmap overlay: tracks attention score over each pixel of heatmap\n # overlay counter: tracks how many times attention score is accumulated over each pixel of heatmap\n overlay = np.full(np.flip(region_size), 0).astype(float)\n counter = np.full(np.flip(region_size), 0).astype(np.uint16) \n count = 0\n for idx in range(len(coords)):\n score = scores[idx]\n coord = coords[idx]\n if score > threshold:\n if binarize:\n score=1.0\n count+=1\n else:\n score=0.0\n # accumulate attention\n overlay[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] += score\n # accumulate counter\n counter[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] += 1\n\n if binarize:\n print('\\nbinarized tiles based on cutoff of {}'.format(threshold))\n print('identified {}/{} patches as positive'.format(count, len(coords)))\n \n # fetch attended region and average accumulated attention\n zero_mask = counter == 0\n\n if binarize:\n overlay[~zero_mask] = np.around(overlay[~zero_mask] / counter[~zero_mask])\n else:\n overlay[~zero_mask] = overlay[~zero_mask] / counter[~zero_mask]\n del counter \n if blur:\n overlay = cv2.GaussianBlur(overlay,tuple((patch_size * (1-overlap)).astype(int) * 2 +1),0) \n\n if segment:\n tissue_mask = self.get_seg_mask(region_size, scale, use_holes=use_holes, offset=tuple(top_left))\n # return Image.fromarray(tissue_mask) # tissue mask\n # pdb.set_trace()\n\n if not blank_canvas:\n # downsample original image and use as canvas\n img = np.array(self.wsi.read_region(top_left, vis_level, region_size).convert(\"RGB\"))\n else:\n # use blank canvas\n img = np.array(Image.new(size=region_size, mode=\"RGB\", color=(255,255,255))) \n\n #return Image.fromarray(img) #raw image\n\n print('\\ncomputing heatmap image')\n print('total of {} patches'.format(len(coords)))\n twenty_percent_chunk = max(1, int(len(coords) * 0.2))\n\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n \n for idx in range(len(coords)):\n if (idx + 1) % twenty_percent_chunk == 0:\n print('progress: {}/{}'.format(idx, len(coords)))\n \n score = scores[idx]\n coord = coords[idx]\n if score >= threshold:\n\n # attention block\n raw_block = overlay[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]]\n \n # image block (either blank canvas or orig image)\n img_block = img[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]].copy()\n\n # color block (cmap applied to attention block)\n color_block = (cmap(raw_block) * 255)[:,:,:3].astype(np.uint8)\n\n if segment:\n # tissue mask block\n mask_block = tissue_mask[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] \n # copy over only tissue masked portion of color block\n img_block[mask_block] = color_block[mask_block]\n else:\n # copy over entire color block\n img_block = color_block\n\n # rewrite image block\n img[coord[1]:coord[1]+patch_size[1], coord[0]:coord[0]+patch_size[0]] = img_block.copy()\n \n #return Image.fromarray(img) #overlay\n print('Done')\n del overlay\n\n if blur:\n img = cv2.GaussianBlur(img,tuple((patch_size * (1-overlap)).astype(int) * 2 +1),0) \n\n if alpha < 1.0:\n img = self.block_blending(img, vis_level, top_left, bot_right, alpha=alpha, blank_canvas=blank_canvas, block_size=1024)\n\n if self.contours_tumor is not None:\n cv2.drawContours(img, self.scaleContourDim(self.contours_tumor, scale),\n -1, color=(0,71,171), thickness=4, lineType=cv2.LINE_8, offset=tuple(-(np.array(top_left) * scale).astype(int)))\n\n img = Image.fromarray(img)\n w, h = img.size\n\n if custom_downsample > 1:\n img = img.resize((int(w/custom_downsample), int(h/custom_downsample)))\n\n if max_size is not None and (w > max_size or h > max_size):\n resizeFactor = max_size/w if w > h else max_size/h\n img = img.resize((int(w*resizeFactor), int(h*resizeFactor)))\n \n return img\n\n \n def block_blending(self, img, vis_level, top_left, bot_right, alpha=0.5, blank_canvas=False, block_size=1024):\n print('\\ncomputing blend')\n downsample = self.level_downsamples[vis_level]\n w = img.shape[1]\n h = img.shape[0]\n block_size_x = min(block_size, w)\n block_size_y = min(block_size, h)\n print('using block size: {} x {}'.format(block_size_x, block_size_y))\n\n shift = top_left # amount shifted w.r.t. (0,0)\n for x_start in range(top_left[0], bot_right[0], block_size_x * int(downsample[0])):\n for y_start in range(top_left[1], bot_right[1], block_size_y * int(downsample[1])):\n #print(x_start, y_start)\n\n # 1. convert wsi coordinates to image coordinates via shift and scale\n x_start_img = int((x_start - shift[0]) / int(downsample[0]))\n y_start_img = int((y_start - shift[1]) / int(downsample[1]))\n \n # 2. compute end points of blend tile, careful not to go over the edge of the image\n y_end_img = min(h, y_start_img+block_size_y)\n x_end_img = min(w, x_start_img+block_size_x)\n\n if y_end_img == y_start_img or x_end_img == x_start_img:\n continue\n #print('start_coord: {} end_coord: {}'.format((x_start_img, y_start_img), (x_end_img, y_end_img)))\n \n # 3. fetch blend block and size\n blend_block = img[y_start_img:y_end_img, x_start_img:x_end_img] \n blend_block_size = (x_end_img-x_start_img, y_end_img-y_start_img)\n \n if not blank_canvas:\n # 4. read actual wsi block as canvas block\n pt = (x_start, y_start)\n canvas = np.array(self.wsi.read_region(pt, vis_level, blend_block_size).convert(\"RGB\")) \n else:\n # 4. OR create blank canvas block\n canvas = np.array(Image.new(size=blend_block_size, mode=\"RGB\", color=(255,255,255)))\n\n # 5. blend color block and canvas block\n img[y_start_img:y_end_img, x_start_img:x_end_img] = cv2.addWeighted(blend_block, alpha, canvas, 1 - alpha, 0, canvas)\n return img\n\n def get_seg_mask(self, region_size, scale, use_holes=False, offset=(0,0)):\n print('\\ncomputing foreground tissue mask')\n tissue_mask = np.full(np.flip(region_size), 0).astype(np.uint8)\n contours_tissue = self.scaleContourDim(self.contours_tissue, scale)\n offset = tuple((np.array(offset) * np.array(scale) * -1).astype(np.int32))\n\n contours_holes = self.scaleHolesDim(self.holes_tissue, scale)\n contours_tissue, contours_holes = zip(*sorted(zip(contours_tissue, contours_holes), key=lambda x: cv2.contourArea(x[0]), reverse=True))\n for idx in range(len(contours_tissue)):\n cv2.drawContours(image=tissue_mask, contours=contours_tissue, contourIdx=idx, color=(1), offset=offset, thickness=-1)\n\n if use_holes:\n cv2.drawContours(image=tissue_mask, contours=contours_holes[idx], contourIdx=-1, color=(0), offset=offset, thickness=-1)\n # contours_holes = self._scaleContourDim(self.holes_tissue, scale, holes=True, area_thresh=area_thresh)\n \n tissue_mask = tissue_mask.astype(bool)\n print('detected {}/{} of region as tissue'.format(tissue_mask.sum(), tissue_mask.size))\n return tissue_mask" }, { "identifier": "StitchCoords", "path": "wsi_core/wsi_utils.py", "snippet": "def StitchCoords(hdf5_file_path, wsi_object, downscale=16, draw_grid=False, bg_color=(0,0,0), alpha=-1):\n wsi = wsi_object.getOpenSlide()\n vis_level = wsi.get_best_level_for_downsample(downscale)\n file = h5py.File(hdf5_file_path, 'r')\n dset = file['coords']\n coords = dset[:]\n w, h = wsi.level_dimensions[0]\n\n print('start stitching {}'.format(dset.attrs['name']))\n print('original size: {} x {}'.format(w, h))\n\n w, h = wsi.level_dimensions[vis_level]\n\n print('downscaled size for stiching: {} x {}'.format(w, h))\n print('number of patches: {}'.format(len(coords)))\n \n patch_size = dset.attrs['patch_size']\n patch_level = dset.attrs['patch_level']\n print('patch size: {}x{} patch level: {}'.format(patch_size, patch_size, patch_level))\n patch_size = tuple((np.array((patch_size, patch_size)) * wsi.level_downsamples[patch_level]).astype(np.int32))\n print('ref patch size: {}x{}'.format(patch_size, patch_size))\n\n if w*h > Image.MAX_IMAGE_PIXELS: \n raise Image.DecompressionBombError(\"Visualization Downscale %d is too large\" % downscale)\n \n if alpha < 0 or alpha == -1:\n heatmap = Image.new(size=(w,h), mode=\"RGB\", color=bg_color)\n else:\n heatmap = Image.new(size=(w,h), mode=\"RGBA\", color=bg_color + (int(255 * alpha),))\n \n heatmap = np.array(heatmap)\n heatmap = DrawMapFromCoords(heatmap, wsi_object, coords, patch_size, vis_level, indices=None, draw_grid=draw_grid)\n \n file.close()\n return heatmap" }, { "identifier": "initialize_df", "path": "wsi_core/batch_process_utils.py", "snippet": "def initialize_df(slides, seg_params, filter_params, vis_params, patch_params, \n\tuse_heatmap_args=False, save_patches=False):\n\n\ttotal = len(slides)\n\tif isinstance(slides, pd.DataFrame):\n\t\tslide_ids = slides.slide_id.values\n\telse:\n\t\tslide_ids = slides\n\tdefault_df_dict = {'slide_id': slide_ids, 'process': np.full((total), 1, dtype=np.uint8)}\n\n\t# initiate empty labels in case not provided\n\tif use_heatmap_args:\n\t\tdefault_df_dict.update({'label': np.full((total), -1)})\n\t\n\tdefault_df_dict.update({\n\t\t'status': np.full((total), 'tbp'),\n\t\t# seg params\n\t\t'seg_level': np.full((total), int(seg_params['seg_level']), dtype=np.int8),\n\t\t'sthresh': np.full((total), int(seg_params['sthresh']), dtype=np.uint8),\n\t\t'mthresh': np.full((total), int(seg_params['mthresh']), dtype=np.uint8),\n\t\t'close': np.full((total), int(seg_params['close']), dtype=np.uint32),\n\t\t'use_otsu': np.full((total), bool(seg_params['use_otsu']), dtype=bool),\n\t\t'keep_ids': np.full((total), seg_params['keep_ids']),\n\t\t'exclude_ids': np.full((total), seg_params['exclude_ids']),\n\t\t\n\t\t# filter params\n\t\t'a_t': np.full((total), int(filter_params['a_t']), dtype=np.float32),\n\t\t'a_h': np.full((total), int(filter_params['a_h']), dtype=np.float32),\n\t\t'max_n_holes': np.full((total), int(filter_params['max_n_holes']), dtype=np.uint32),\n\n\t\t# vis params\n\t\t'vis_level': np.full((total), int(vis_params['vis_level']), dtype=np.int8),\n\t\t'line_thickness': np.full((total), int(vis_params['line_thickness']), dtype=np.uint32),\n\n\t\t# patching params\n\t\t'use_padding': np.full((total), bool(patch_params['use_padding']), dtype=bool),\n\t\t'contour_fn': np.full((total), patch_params['contour_fn'])\n\t\t})\n\n\tif save_patches:\n\t\tdefault_df_dict.update({\n\t\t\t'white_thresh': np.full((total), int(patch_params['white_thresh']), dtype=np.uint8),\n\t\t\t'black_thresh': np.full((total), int(patch_params['black_thresh']), dtype=np.uint8)})\n\n\tif use_heatmap_args:\n\t\t# initiate empty x,y coordinates in case not provided\n\t\tdefault_df_dict.update({'x1': np.empty((total)).fill(np.NaN), \n\t\t\t'x2': np.empty((total)).fill(np.NaN), \n\t\t\t'y1': np.empty((total)).fill(np.NaN), \n\t\t\t'y2': np.empty((total)).fill(np.NaN)})\n\n\n\tif isinstance(slides, pd.DataFrame):\n\t\ttemp_copy = pd.DataFrame(default_df_dict) # temporary dataframe w/ default params\n\t\t# find key in provided df\n\t\t# if exist, fill empty fields w/ default values, else, insert the default values as a new column\n\t\tfor key in default_df_dict.keys(): \n\t\t\tif key in slides.columns:\n\t\t\t\tmask = slides[key].isna()\n\t\t\t\tslides.loc[mask, key] = temp_copy.loc[mask, key]\n\t\t\telse:\n\t\t\t\tslides.insert(len(slides.columns), key, default_df_dict[key])\n\telse:\n\t\tslides = pd.DataFrame(default_df_dict)\n\t\n\treturn slides" } ]
from wsi_core.WholeSlideImage import WholeSlideImage from wsi_core.wsi_utils import StitchCoords from wsi_core.batch_process_utils import initialize_df from glob import glob import os import numpy as np import time import argparse import pdb import pandas as pd
11,839
# internal imports # other imports def stitching(file_path, wsi_object, downscale=64): start = time.time() heatmap = StitchCoords(file_path, wsi_object, downscale=downscale, bg_color=(0, 0, 0), alpha=-1, draw_grid=False) total_time = time.time() - start return heatmap, total_time def segment(WSI_object, seg_params, filter_params): ### Start Seg Timer start_time = time.time() # Segment WSI_object.segmentTissue(**seg_params, filter_params=filter_params) ### Stop Seg Timers seg_time_elapsed = time.time() - start_time return WSI_object, seg_time_elapsed def patching(WSI_object, **kwargs): ### Start Patch Timer start_time = time.time() # Patch file_path = WSI_object.process_contours(**kwargs) ### Stop Patch Timer patch_time_elapsed = time.time() - start_time return file_path, patch_time_elapsed def walk_dir(data_dir, file_types=['.kfb', '.tif', '.svs', '.ndpi', '.mrxs', '.hdx', '.sdpc', '.mdsx', '.tiff', '.tmap']): path_list = [] for dirpath, dirnames, files in os.walk(data_dir): for f in files: for this_type in file_types: if f.lower().endswith(this_type): path_list.append(os.path.join(dirpath, f)) break return path_list def seg_and_patch(source, save_dir, patch_save_dir, mask_save_dir, stitch_save_dir, patch_size=256, step_size=256, seg_params={'seg_level': -1, 'sthresh': 8, 'mthresh': 7, 'close': 4, 'use_otsu': False, 'keep_ids': 'none', 'exclude_ids': 'none'}, filter_params={'a_t': 100, 'a_h': 16, 'max_n_holes': 8}, vis_params={'vis_level': -1, 'line_thickness': 500}, patch_params={'use_padding': True, 'contour_fn': 'four_pt'}, patch_level=1, use_default_params=False, seg=False, save_mask=True, stitch=False, patch=False, auto_skip=True, process_list=None): slides = glob(source + '/*/*/*/*.svs') # slides = sorted(os.listdir(source), reverse=True) # slides = # pdb.set_trace() # slides = slides[-10:] slides = [slide for slide in slides if os.path.isfile(os.path.join(source, slide))] if process_list is None:
# internal imports # other imports def stitching(file_path, wsi_object, downscale=64): start = time.time() heatmap = StitchCoords(file_path, wsi_object, downscale=downscale, bg_color=(0, 0, 0), alpha=-1, draw_grid=False) total_time = time.time() - start return heatmap, total_time def segment(WSI_object, seg_params, filter_params): ### Start Seg Timer start_time = time.time() # Segment WSI_object.segmentTissue(**seg_params, filter_params=filter_params) ### Stop Seg Timers seg_time_elapsed = time.time() - start_time return WSI_object, seg_time_elapsed def patching(WSI_object, **kwargs): ### Start Patch Timer start_time = time.time() # Patch file_path = WSI_object.process_contours(**kwargs) ### Stop Patch Timer patch_time_elapsed = time.time() - start_time return file_path, patch_time_elapsed def walk_dir(data_dir, file_types=['.kfb', '.tif', '.svs', '.ndpi', '.mrxs', '.hdx', '.sdpc', '.mdsx', '.tiff', '.tmap']): path_list = [] for dirpath, dirnames, files in os.walk(data_dir): for f in files: for this_type in file_types: if f.lower().endswith(this_type): path_list.append(os.path.join(dirpath, f)) break return path_list def seg_and_patch(source, save_dir, patch_save_dir, mask_save_dir, stitch_save_dir, patch_size=256, step_size=256, seg_params={'seg_level': -1, 'sthresh': 8, 'mthresh': 7, 'close': 4, 'use_otsu': False, 'keep_ids': 'none', 'exclude_ids': 'none'}, filter_params={'a_t': 100, 'a_h': 16, 'max_n_holes': 8}, vis_params={'vis_level': -1, 'line_thickness': 500}, patch_params={'use_padding': True, 'contour_fn': 'four_pt'}, patch_level=1, use_default_params=False, seg=False, save_mask=True, stitch=False, patch=False, auto_skip=True, process_list=None): slides = glob(source + '/*/*/*/*.svs') # slides = sorted(os.listdir(source), reverse=True) # slides = # pdb.set_trace() # slides = slides[-10:] slides = [slide for slide in slides if os.path.isfile(os.path.join(source, slide))] if process_list is None:
df = initialize_df(slides, seg_params, filter_params, vis_params, patch_params)
2
2023-11-12 14:07:34+00:00
16k
zhang-tao-whu/DVIS_Plus
dvis_Plus/meta_architecture.py
[ { "identifier": "VideoSetCriterion", "path": "mask2former_video/modeling/criterion.py", "snippet": "class VideoSetCriterion(nn.Module):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n\n def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,\n num_points, oversample_ratio, importance_sample_ratio, frames=2):\n \"\"\"Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer(\"empty_weight\", empty_weight)\n\n # pointwise mask loss parameters\n self.num_points = num_points\n self.oversample_ratio = oversample_ratio\n self.importance_sample_ratio = importance_sample_ratio\n self.frames = frames\n\n def loss_labels(self, outputs, targets, indices, num_masks):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert \"pred_logits\" in outputs\n src_logits = outputs[\"pred_logits\"].float()\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device\n )\n target_classes[idx] = target_classes_o.to(target_classes)\n\n loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)\n losses = {\"loss_ce\": loss_ce}\n return losses\n \n def loss_masks(self, outputs, targets, indices, num_masks):\n \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n \"\"\"\n assert \"pred_masks\" in outputs\n\n src_idx = self._get_src_permutation_idx(indices)\n src_masks = outputs[\"pred_masks\"]\n src_masks = src_masks[src_idx]\n # Modified to handle video\n target_masks = torch.cat([t['masks'][i] for t, (_, i) in zip(targets, indices)]).to(src_masks)\n\n # No need to upsample predictions as we are using normalized coordinates :)\n # NT x 1 x H x W\n src_masks = src_masks.flatten(0, 1)[:, None]\n target_masks = target_masks.flatten(0, 1)[:, None]\n\n with torch.no_grad():\n # sample point_coords\n point_coords = get_uncertain_point_coords_with_randomness(\n src_masks.to(torch.float32),\n lambda logits: calculate_uncertainty(logits),\n self.num_points,\n self.oversample_ratio,\n self.importance_sample_ratio,\n )\n # get gt labels\n point_labels = point_sample(\n target_masks,\n point_coords.to(target_masks),\n align_corners=False,\n ).squeeze(1)\n\n point_logits = point_sample(\n src_masks,\n point_coords.to(src_masks),\n align_corners=False,\n ).squeeze(1)\n\n losses = {\n \"loss_mask\": sigmoid_ce_loss_jit(point_logits, point_labels, num_masks),\n \"loss_dice\": dice_loss_jit(point_logits, point_labels, num_masks),\n }\n\n del src_masks\n del target_masks\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_masks):\n loss_map = {\n 'labels': self.loss_labels,\n 'masks': self.loss_masks,\n }\n assert loss in loss_map, f\"do you really want to compute {loss} loss?\"\n return loss_map[loss](outputs, targets, indices, num_masks)\n\n def forward(self, outputs, targets, matcher_outputs=None, ret_match_result=False):\n \"\"\"This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n if matcher_outputs is None:\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n else:\n outputs_without_aux = {k: v for k, v in matcher_outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n # [per image indicates], per image indicates -> (pred inds, gt inds)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_masks = sum(len(t[\"labels\"]) for t in targets)\n num_masks = torch.as_tensor(\n [num_masks], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_masks)\n num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n if matcher_outputs is None:\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_masks)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n if ret_match_result:\n return losses, indices\n return losses\n\n def __repr__(self):\n head = \"Criterion \" + self.__class__.__name__\n body = [\n \"matcher: {}\".format(self.matcher.__repr__(_repr_indent=8)),\n \"losses: {}\".format(self.losses),\n \"weight_dict: {}\".format(self.weight_dict),\n \"num_classes: {}\".format(self.num_classes),\n \"eos_coef: {}\".format(self.eos_coef),\n \"num_points: {}\".format(self.num_points),\n \"oversample_ratio: {}\".format(self.oversample_ratio),\n \"importance_sample_ratio: {}\".format(self.importance_sample_ratio),\n ]\n _repr_indent = 4\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "VideoHungarianMatcher", "path": "mask2former_video/modeling/matcher.py", "snippet": "class VideoHungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost\n cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_mask = cost_mask\n self.cost_dice = cost_dice\n\n assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, \"all costs cant be 0\"\n\n self.num_points = num_points\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n indices = []\n\n # Iterate through batch size\n for b in range(bs):\n\n out_prob = outputs[\"pred_logits\"][b].softmax(-1) # [num_queries, num_classes]\n tgt_ids = targets[b][\"labels\"].to(torch.int64)\n\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n try:\n cost_class = -out_prob[:, tgt_ids]\n except:\n cost_class = 0.0\n print(tgt_ids)\n\n out_mask = outputs[\"pred_masks\"][b] # [num_queries, T, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[b][\"masks\"].to(out_mask) # [num_gts, T, H_pred, W_pred]\n\n # out_mask = out_mask[:, None]\n # tgt_mask = tgt_mask[:, None]\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1).to(tgt_mask),\n align_corners=False,\n ).flatten(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1).to(out_mask),\n align_corners=False,\n ).flatten(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n\n indices.append(linear_sum_assignment(C))\n\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n # [per image indicates], per image indicates -> (pred inds, gt inds)\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_masks\": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"masks\": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n return self.memory_efficient_forward(outputs, targets)\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_mask: {}\".format(self.cost_mask),\n \"cost_dice: {}\".format(self.cost_dice),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "VideoHungarianMatcher_Consistent", "path": "mask2former_video/modeling/matcher.py", "snippet": "class VideoHungarianMatcher_Consistent(VideoHungarianMatcher):\n \"\"\"\n Only match in the first frame where the object appears in the GT.\n \"\"\"\n def __init__(self, cost_class: float = 1, cost_mask: float = 1,\n cost_dice: float = 1, num_points: int = 0,\n frames: int = 5):\n super().__init__(\n cost_class=cost_class, cost_mask=cost_mask,\n cost_dice=cost_dice, num_points=num_points,\n )\n self.frames = frames\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n indices = []\n\n # Iterate through batch size\n for b in range(bs // self.frames):\n # find the fist frame where the object appears\n id_apper_frame = {}\n for f in range(self.frames):\n overall_bs = b * self.frames + f\n instance_ids = targets[overall_bs][\"ids\"]\n valid = torch.nonzero(instance_ids.squeeze(1) != -1)\n for v in valid:\n v = v.item()\n if v not in id_apper_frame.keys():\n id_apper_frame[v] = f\n\n # obtain the object ID that first appears in each frame\n apper_frame_id = {}\n for id in id_apper_frame.keys():\n f = id_apper_frame[id]\n if f in apper_frame_id:\n apper_frame_id[f].append(id)\n else:\n apper_frame_id[f] = [id]\n need_match_frames = list(apper_frame_id.keys())\n need_match_frames.sort()\n\n # per frame match\n used_query_idx = []\n matched_indices = [[], []]\n for f in need_match_frames:\n overall_bs = b * self.frames + f\n used_tgt = apper_frame_id[f]\n out_prob = outputs[\"pred_logits\"][overall_bs].softmax(-1) # [num_queries, num_classes]\n tgt_ids = targets[overall_bs][\"labels\"][used_tgt]\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n\n out_mask = outputs[\"pred_masks\"][overall_bs] # [num_queries, T, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[overall_bs][\"masks\"][used_tgt].to(out_mask) # [num_gts, T, H_pred, W_pred]\n\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1).to(tgt_mask),\n align_corners=False,\n ).flatten(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1).to(out_mask),\n align_corners=False,\n ).flatten(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n\n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n if len(used_query_idx) != 0:\n C[used_query_idx, :] = 1e6\n indice1, indice2 = linear_sum_assignment(C)\n\n used_query_idx += list(indice1)\n\n indice2 = np.array(used_tgt)[indice2]\n matched_indices[0] += list(indice1)\n matched_indices[1] += list(indice2)\n indices += [matched_indices] * self.frames\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]" }, { "identifier": "retry_if_cuda_oom", "path": "mask2former_video/utils/memory.py", "snippet": "def retry_if_cuda_oom(func):\n \"\"\"\n Makes a function retry itself after encountering\n pytorch's CUDA OOM error.\n It will first retry after calling `torch.cuda.empty_cache()`.\n If that still fails, it will then retry by trying to convert inputs to CPUs.\n In this case, it expects the function to dispatch to CPU implementation.\n The return values may become CPU tensors as well and it's user's\n responsibility to convert it back to CUDA tensor if needed.\n Args:\n func: a stateless callable that takes tensor-like objects as arguments\n Returns:\n a callable which retries `func` if OOM is encountered.\n Examples:\n ::\n output = retry_if_cuda_oom(some_torch_function)(input1, input2)\n # output may be on CPU even if inputs are on GPU\n Note:\n 1. When converting inputs to CPU, it will only look at each argument and check\n if it has `.device` and `.to` for conversion. Nested structures of tensors\n are not supported.\n 2. Since the function might be called more than once, it has to be\n stateless.\n \"\"\"\n\n def maybe_to_cpu(x):\n try:\n like_gpu_tensor = x.device.type == \"cuda\" and hasattr(x, \"to\")\n except AttributeError:\n like_gpu_tensor = False\n if like_gpu_tensor:\n return x.to(device=\"cpu\").to(torch.float32)\n else:\n return x\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n with _ignore_torch_cuda_oom():\n return func(*args, **kwargs)\n\n # Clear cache and retry\n torch.cuda.empty_cache()\n with _ignore_torch_cuda_oom():\n return func(*args, **kwargs)\n\n # Try on CPU. This slows down the code significantly, therefore print a notice.\n logger = logging.getLogger(__name__)\n logger.info(\"Attempting to copy inputs to CPU due to CUDA OOM\")\n new_args = (maybe_to_cpu(x) for x in args)\n new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}\n with autocast(enabled=False):\n return func(*new_args, **new_kwargs)\n\n return wrapped" }, { "identifier": "ReferringTracker_noiser", "path": "dvis_Plus/tracker.py", "snippet": "class ReferringTracker_noiser(torch.nn.Module):\n def __init__(\n self,\n hidden_channel=256,\n feedforward_channel=2048,\n num_head=8,\n decoder_layer_num=6,\n mask_dim=256,\n class_num=25,\n noise_mode='hard',\n noise_ratio=0.5,\n ):\n super(ReferringTracker_noiser, self).__init__()\n\n # init transformer layers\n self.num_heads = num_head\n self.num_layers = decoder_layer_num\n self.transformer_self_attention_layers = nn.ModuleList()\n self.transformer_cross_attention_layers = nn.ModuleList()\n self.transformer_ffn_layers = nn.ModuleList()\n\n for _ in range(self.num_layers):\n\n self.transformer_self_attention_layers.append(\n SelfAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_cross_attention_layers.append(\n ReferringCrossAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_ffn_layers.append(\n FFNLayer(\n d_model=hidden_channel,\n dim_feedforward=feedforward_channel,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.use_memory = False\n if self.use_memory:\n self.memory_cross_attn = CrossAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,)\n self.references_memory = None\n\n self.decoder_norm = nn.LayerNorm(hidden_channel)\n\n # init heads\n self.class_embed = nn.Linear(2 * hidden_channel, class_num + 1)\n self.mask_embed = MLP(hidden_channel, hidden_channel, mask_dim, 3)\n\n # for cl learning\n self.ref_proj = MLP(hidden_channel, hidden_channel, hidden_channel, 3)\n\n for layer in self.ref_proj.layers:\n weight_init.c2_xavier_fill(layer)\n\n # mask features projection\n self.mask_feature_proj = nn.Conv2d(\n mask_dim,\n mask_dim,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n\n # record previous frame information\n self.last_outputs = None\n self.last_frame_embeds = None\n self.last_reference = None\n\n self.noiser = Noiser(noise_ratio=noise_ratio, mode=noise_mode)\n\n def _clear_memory(self):\n del self.last_outputs\n self.last_outputs = None\n self.last_reference = None\n return\n\n def forward(self, frame_embeds, mask_features, resume=False,\n return_indices=False, frame_classes=None,\n frame_embeds_no_norm=None):\n \"\"\"\n :param frame_embeds: the instance queries output by the segmenter\n :param mask_features: the mask features output by the segmenter\n :param resume: whether the first frame is the start of the video\n :param return_indices: whether return the match indices\n :return: output dict, including masks, classes, embeds.\n \"\"\"\n # mask feature projection\n mask_features_shape = mask_features.shape\n mask_features = self.mask_feature_proj(mask_features.flatten(0, 1)).reshape(*mask_features_shape) # (b, t, c, h, w)\n\n frame_embeds = frame_embeds.permute(2, 3, 0, 1) # t, q, b, c\n if frame_embeds_no_norm is not None:\n frame_embeds_no_norm = frame_embeds_no_norm.permute(2, 3, 0, 1) # t, q, b, c\n n_frame, n_q, bs, _ = frame_embeds.size()\n outputs = []\n ret_indices = []\n\n all_frames_references = []\n\n for i in range(n_frame):\n ms_output = []\n single_frame_embeds = frame_embeds[i] # q b c\n if frame_embeds_no_norm is not None:\n single_frame_embeds_no_norm = frame_embeds_no_norm[i]\n else:\n single_frame_embeds_no_norm = single_frame_embeds\n if frame_classes is None:\n single_frame_classes = None\n else:\n single_frame_classes = frame_classes[i]\n\n frame_key = single_frame_embeds_no_norm\n\n # the first frame of a video\n if i == 0 and resume is False:\n self._clear_memory()\n for j in range(self.num_layers):\n if j == 0:\n indices, noised_init = self.noiser(\n single_frame_embeds,\n single_frame_embeds,\n cur_embeds_no_norm=single_frame_embeds_no_norm,\n activate=False,\n cur_classes=single_frame_classes,\n )\n ms_output.append(single_frame_embeds_no_norm[indices])\n self.last_frame_embeds = single_frame_embeds[indices]\n ret_indices.append(indices)\n output = self.transformer_cross_attention_layers[j](\n noised_init, self.ref_proj(frame_key),\n frame_key, single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n else:\n output = self.transformer_cross_attention_layers[j](\n ms_output[-1], self.ref_proj(ms_output[-1]),\n frame_key, single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n self.last_reference = self.ref_proj(frame_key)\n else:\n reference = self.ref_proj(self.last_outputs[-1])\n self.last_reference = reference\n\n for j in range(self.num_layers):\n if j == 0:\n indices, noised_init = self.noiser(\n self.last_frame_embeds,\n single_frame_embeds,\n cur_embeds_no_norm=single_frame_embeds_no_norm,\n activate=self.training,\n cur_classes=single_frame_classes,\n )\n ms_output.append(single_frame_embeds_no_norm[indices])\n self.last_frame_embeds = single_frame_embeds[indices]\n ret_indices.append(indices)\n output = self.transformer_cross_attention_layers[j](\n noised_init, reference, frame_key,\n single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n else:\n output = self.transformer_cross_attention_layers[j](\n ms_output[-1], reference, frame_key,\n single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n\n all_frames_references.append(self.last_reference)\n\n ms_output = torch.stack(ms_output, dim=0) # (1 + layers, q, b, c)\n self.last_outputs = ms_output\n outputs.append(ms_output[1:])\n outputs = torch.stack(outputs, dim=0) # (t, l, q, b, c)\n\n all_frames_references = torch.stack(all_frames_references, dim=0) # (t, q, b, c)\n\n mask_features_ = mask_features\n if not self.training:\n outputs = outputs[:, -1:]\n del mask_features\n outputs_class, outputs_masks = self.prediction(outputs, mask_features_, all_frames_references)\n out = {\n 'pred_logits': outputs_class[-1].transpose(1, 2), # (b, t, q, c)\n 'pred_masks': outputs_masks[-1], # (b, q, t, h, w)\n 'aux_outputs': self._set_aux_loss(\n outputs_class, outputs_masks\n ),\n 'pred_embds': outputs[:, -1].permute(2, 3, 0, 1), # (b, c, t, q),\n 'pred_references': all_frames_references.permute(2, 3, 0, 1), # (b, c, t, q),\n }\n if return_indices:\n return out, ret_indices\n else:\n return out\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_seg_masks):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{\"pred_logits\": a.transpose(1, 2), \"pred_masks\": b}\n for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])\n ]\n\n def prediction(self, outputs, mask_features, references):\n # outputs (t, l, q, b, c)\n # mask_features (b, t, c, h, w)\n # references (t, q, b, c)\n decoder_output = self.decoder_norm(outputs)\n decoder_output = decoder_output.permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n\n references = references.unsqueeze(1).repeat(1, decoder_output.size(0), 1, 1, 1).permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n decoder_output_cls = torch.cat([references, decoder_output], dim=-1)\n outputs_class = self.class_embed(decoder_output_cls).transpose(2, 3) # (l, b, q, t, cls+1)\n mask_embed = self.mask_embed(decoder_output)\n outputs_mask = torch.einsum(\"lbtqc,btchw->lbqthw\", mask_embed, mask_features)\n return outputs_class, outputs_mask" }, { "identifier": "TemporalRefiner", "path": "dvis_Plus/refiner.py", "snippet": "class TemporalRefiner(torch.nn.Module):\n def __init__(\n self,\n hidden_channel=256,\n feedforward_channel=2048,\n num_head=8,\n decoder_layer_num=6,\n mask_dim=256,\n class_num=25,\n windows=5,\n ):\n super(TemporalRefiner, self).__init__()\n\n self.windows = windows\n\n # init transformer layers\n self.num_heads = num_head\n self.num_layers = decoder_layer_num\n self.transformer_obj_self_attention_layers = nn.ModuleList()\n self.transformer_time_self_attention_layers = nn.ModuleList()\n self.transformer_cross_attention_layers = nn.ModuleList()\n self.transformer_ffn_layers = nn.ModuleList()\n\n self.conv_short_aggregate_layers = nn.ModuleList()\n self.conv_norms = nn.ModuleList()\n\n for _ in range(self.num_layers):\n self.transformer_time_self_attention_layers.append(\n SelfAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.conv_short_aggregate_layers.append(\n nn.Sequential(\n nn.Conv1d(hidden_channel, hidden_channel,\n kernel_size=5, stride=1,\n padding='same', padding_mode='replicate'),\n nn.ReLU(inplace=True),\n nn.Conv1d(hidden_channel, hidden_channel,\n kernel_size=3, stride=1,\n padding='same', padding_mode='replicate'),\n )\n )\n\n self.conv_norms.append(nn.LayerNorm(hidden_channel))\n\n self.transformer_obj_self_attention_layers.append(\n SelfAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_cross_attention_layers.append(\n CrossAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_ffn_layers.append(\n FFNLayer(\n d_model=hidden_channel,\n dim_feedforward=feedforward_channel,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.decoder_norm = nn.LayerNorm(hidden_channel)\n\n # init heads\n self.class_embed = nn.Linear(hidden_channel, class_num + 1)\n self.mask_embed = MLP(hidden_channel, hidden_channel, mask_dim, 3)\n\n self.activation_proj = nn.Linear(hidden_channel, 1)\n\n def forward(self, instance_embeds, frame_embeds, mask_features):\n \"\"\"\n :param instance_embeds: the aligned instance queries output by the tracker, shape is (b, c, t, q)\n :param frame_embeds: the instance queries processed by the tracker.frame_forward function, shape is (b, c, t, q)\n :param mask_features: the mask features output by the segmenter, shape is (b, t, c, h, w)\n :return: output dict, including masks, classes, embeds.\n \"\"\"\n n_batch, n_channel, n_frames, n_instance = instance_embeds.size()\n\n outputs = []\n output = instance_embeds\n frame_embeds = frame_embeds.permute(3, 0, 2, 1).flatten(1, 2)\n\n for i in range(self.num_layers):\n output = output.permute(2, 0, 3, 1) # (t, b, q, c)\n output = output.flatten(1, 2) # (t, bq, c)\n\n # do long temporal attention\n output = self.transformer_time_self_attention_layers[i](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n\n # do short temporal conv\n output = output.permute(1, 2, 0) # (bq, c, t)\n output = self.conv_norms[i](\n (self.conv_short_aggregate_layers[i](output) + output).transpose(1, 2)\n ).transpose(1, 2)\n output = output.reshape(\n n_batch, n_instance, n_channel, n_frames\n ).permute(1, 0, 3, 2).flatten(1, 2) # (q, bt, c)\n\n # do objects self attention\n output = self.transformer_obj_self_attention_layers[i](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n\n # do cross attention\n output = self.transformer_cross_attention_layers[i](\n output, frame_embeds,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n # FFN\n output = self.transformer_ffn_layers[i](\n output\n )\n\n output = output.reshape(n_instance, n_batch, n_frames, n_channel).permute(1, 3, 2, 0) # (b, c, t, q)\n outputs.append(output)\n\n outputs = torch.stack(outputs, dim=0).permute(3, 0, 4, 1, 2) # (l, b, c, t, q) -> (t, l, q, b, c)\n outputs_class, outputs_masks = self.prediction(outputs, mask_features)\n outputs = self.decoder_norm(outputs)\n out = {\n 'pred_logits': outputs_class[-1].transpose(1, 2), # (b, t, q, c)\n 'pred_masks': outputs_masks[-1], # (b, q, t, h, w)\n 'aux_outputs': self._set_aux_loss(\n outputs_class, outputs_masks\n ),\n 'pred_embds': outputs[:, -1].permute(2, 3, 0, 1) # (b, c, t, q)\n }\n return out\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_seg_masks):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{\"pred_logits\": a.transpose(1, 2), \"pred_masks\": b}\n for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])\n ]\n\n def windows_prediction(self, outputs, mask_features, windows=5):\n \"\"\"\n for windows prediction, because mask features consumed too much GPU memory\n \"\"\"\n iters = outputs.size(0) // windows\n if outputs.size(0) % windows != 0:\n iters += 1\n outputs_classes = []\n outputs_masks = []\n for i in range(iters):\n start_idx = i * windows\n end_idx = (i + 1) * windows\n clip_outputs = outputs[start_idx:end_idx]\n decoder_output = self.decoder_norm(clip_outputs)\n decoder_output = decoder_output.permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n mask_embed = self.mask_embed(decoder_output)\n outputs_mask = torch.einsum(\n \"lbtqc,btchw->lbqthw\",\n mask_embed,\n mask_features[:, start_idx:end_idx].to(mask_embed.device)\n )\n outputs_classes.append(decoder_output)\n outputs_masks.append(outputs_mask.cpu().to(torch.float32))\n outputs_classes = torch.cat(outputs_classes, dim=2)\n outputs_classes = self.pred_class(outputs_classes)\n return outputs_classes.cpu().to(torch.float32), torch.cat(outputs_masks, dim=3)\n\n def pred_class(self, decoder_output):\n \"\"\"\n fuse the objects queries of all frames and predict an overall score based on the fused objects queries\n :param decoder_output: instance queries, shape is (l, b, t, q, c)\n \"\"\"\n T = decoder_output.size(2)\n\n # compute the weighted average of the decoder_output\n activation = self.activation_proj(decoder_output).softmax(dim=2) # (l, b, t, q, 1)\n class_output = (decoder_output * activation).sum(dim=2, keepdim=True) # (l, b, 1, q, c)\n\n # to unify the output format, duplicate the fused features T times\n class_output = class_output.repeat(1, 1, T, 1, 1)\n outputs_class = self.class_embed(class_output).transpose(2, 3)\n return outputs_class\n\n def prediction(self, outputs, mask_features):\n \"\"\"\n :param outputs: instance queries, shape is (t, l, q, b, c)\n :param mask_features: mask features, shape is (b, t, c, h, w)\n :return: pred class and pred masks\n \"\"\"\n if self.training:\n decoder_output = self.decoder_norm(outputs)\n decoder_output = decoder_output.permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n outputs_class = self.pred_class(decoder_output)\n mask_embed = self.mask_embed(decoder_output)\n outputs_mask = torch.einsum(\"lbtqc,btchw->lbqthw\", mask_embed, mask_features)\n else:\n outputs = outputs[:, -1:]\n outputs_class, outputs_mask = self.windows_prediction(outputs, mask_features, windows=self.windows)\n return outputs_class, outputs_mask" }, { "identifier": "loss_reid", "path": "dvis_Plus/utils.py", "snippet": "def loss_reid(qd_items, outputs):\n # outputs only using when have not contrastive items\n # compute two loss, contrastive loss & similarity loss\n contras_loss = 0\n aux_loss = 0\n num_qd_items = len(qd_items) # n_instances * frames\n\n # if none items, return 0 loss\n if len(qd_items) == 0:\n if 'pred_references' in outputs.keys():\n losses = {'loss_reid': outputs['pred_references'].sum() * 0,\n 'loss_aux_reid': outputs['pred_references'].sum() * 0}\n else:\n losses = {'loss_reid': outputs['pred_embds'].sum() * 0,\n 'loss_aux_reid': outputs['pred_embds'].sum() * 0}\n return losses\n\n for qd_item in qd_items:\n # (n_pos, n_anchor) -> (n_anchor, n_pos)\n pred = qd_item['dot_product'].permute(1, 0)\n label = qd_item['label'].unsqueeze(0)\n # contrastive loss\n pos_inds = (label == 1)\n neg_inds = (label == 0)\n pred_pos = pred * pos_inds.float()\n pred_neg = pred * neg_inds.float()\n # use -inf to mask out unwanted elements.\n pred_pos[neg_inds] = pred_pos[neg_inds] + float('inf')\n pred_neg[pos_inds] = pred_neg[pos_inds] + float('-inf')\n\n _pos_expand = torch.repeat_interleave(pred_pos, pred.shape[1], dim=1)\n _neg_expand = pred_neg.repeat(1, pred.shape[1])\n # [bz,N], N is all pos and negative samples on reference frame, label indicate it's pos or negative\n x = torch.nn.functional.pad(\n (_neg_expand - _pos_expand), (0, 1), \"constant\", 0)\n contras_loss += torch.logsumexp(x, dim=1)\n\n aux_pred = qd_item['cosine_similarity'].permute(1, 0)\n aux_label = qd_item['label'].unsqueeze(0)\n aux_loss += (torch.abs(aux_pred - aux_label) ** 2).mean()\n\n losses = {'loss_reid': contras_loss.sum() / num_qd_items,\n 'loss_aux_reid': aux_loss / num_qd_items}\n return losses" }, { "identifier": "Outputs_Memory_PerClasses", "path": "dvis_Plus/utils.py", "snippet": "class Outputs_Memory_PerClasses:\n def __init__(self, max_len=100,):\n self.class_references = {}\n self.max_len = max_len\n\n def push(self, references, targets, referecne_match_result):\n # for tracker\n references = references.detach()\n for i in range(len(targets)):\n classes = targets[i]['labels'] # (N, )\n frame_match_result = referecne_match_result[i]\n frame_reference = references[i]\n for i_ref, i_gt in zip(frame_match_result[0], frame_match_result[1]):\n cls = classes[i_gt].item()\n if cls in self.class_references.keys():\n self.class_references[cls].append(frame_reference[i_ref])\n else:\n self.class_references[cls] = [frame_reference[i_ref]]\n for cls in self.class_references.keys():\n if len(self.class_references[cls]) > self.max_len:\n self.class_references[cls] = self.class_references[cls][-self.max_len:]\n return\n\n def push_refiner(self, references, targets, referecne_match_result):\n # for refiner\n references = references.clone().detach()\n classes = targets['labels'] # (N, )\n for i_ref, i_gt in zip(referecne_match_result[0], referecne_match_result[1]):\n cls = classes[i_gt].item()\n if cls in self.class_references.keys():\n self.class_references[cls].extend(list(torch.unbind(references[:, i_ref], dim=0)))\n else:\n self.class_references[cls] = list(torch.unbind(references[:, i_ref], dim=0))\n\n for cls in self.class_references.keys():\n if len(self.class_references[cls]) > self.max_len:\n random.shuffle(self.class_references[cls])\n self.class_references[cls] = self.class_references[cls][-self.max_len:]\n return\n\n def get_items(self, cls):\n if cls not in self.class_references.keys():\n return []\n else:\n cls_ref = torch.stack(self.class_references[cls], dim=0)\n return cls_ref" } ]
from typing import Tuple from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.data import MetadataCatalog from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head from detectron2.modeling.backbone import Backbone from detectron2.structures import Boxes, ImageList, Instances, BitMasks from mask2former_video.modeling.criterion import VideoSetCriterion from mask2former_video.modeling.matcher import VideoHungarianMatcher, VideoHungarianMatcher_Consistent from mask2former_video.utils.memory import retry_if_cuda_oom from scipy.optimize import linear_sum_assignment from .tracker import ReferringTracker_noiser from .refiner import TemporalRefiner from .utils import loss_reid, Outputs_Memory_PerClasses import einops import torch
11,986
@META_ARCH_REGISTRY.register() class MinVIS(nn.Module): """ Copied from "https://github.com/NVlabs/MinVIS". """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, metadata, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video num_frames, window_inference, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image semantic_on: bool, whether to output semantic segmentation prediction instance_on: bool, whether to output instance segmentation prediction panoptic_on: bool, whether to output panoptic segmentation prediction test_topk_per_image: int, instance segmentation parameter, keep topk instances per image """ super().__init__() self.backbone = backbone self.sem_seg_head = sem_seg_head self.criterion = criterion self.num_queries = num_queries self.overlap_threshold = overlap_threshold self.object_mask_threshold = object_mask_threshold self.metadata = metadata if size_divisibility < 0: # use backbone size_divisibility if not set size_divisibility = self.backbone.size_divisibility self.size_divisibility = size_divisibility self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) self.num_frames = num_frames self.window_inference = window_inference @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion
@META_ARCH_REGISTRY.register() class MinVIS(nn.Module): """ Copied from "https://github.com/NVlabs/MinVIS". """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, metadata, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video num_frames, window_inference, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image semantic_on: bool, whether to output semantic segmentation prediction instance_on: bool, whether to output instance segmentation prediction panoptic_on: bool, whether to output panoptic segmentation prediction test_topk_per_image: int, instance segmentation parameter, keep topk instances per image """ super().__init__() self.backbone = backbone self.sem_seg_head = sem_seg_head self.criterion = criterion self.num_queries = num_queries self.overlap_threshold = overlap_threshold self.object_mask_threshold = object_mask_threshold self.metadata = metadata if size_divisibility < 0: # use backbone size_divisibility if not set size_divisibility = self.backbone.size_divisibility self.size_divisibility = size_divisibility self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) self.num_frames = num_frames self.window_inference = window_inference @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion
matcher = VideoHungarianMatcher(
1
2023-11-14 10:55:11+00:00
16k
ej0cl6/TextEE
TextEE/models/Degree/EAEtrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, config):\n for instance in instances:\n instance[\"extra_info\"] = None\n return instances\n \n def load_model(self, checkpoint=None):\n pass\n \n def train(self, train_data, dev_data, **kwargs):\n pass\n \n def predict(self, data, **kwargs):\n pass" }, { "identifier": "DegreeEAEModel", "path": "TextEE/models/Degree/EAEmodel.py", "snippet": "class DegreeEAEModel(nn.Module):\n def __init__(self, config, tokenizer, type_set):\n super().__init__()\n self.config = config\n self.tokenizer = tokenizer\n self.type_set = type_set\n \n if self.config.pretrained_model_name.startswith('facebook/bart'):\n self.model_config = AutoConfig.from_pretrained(self.config.pretrained_model_name,\n cache_dir=self.config.cache_dir)\n self.model = AutoModelForPreTraining.from_pretrained(self.config.pretrained_model_name,\n cache_dir=self.config.cache_dir, config=self.model_config)\n else:\n raise ValueError(\"Not implemented.\")\n \n self.model.resize_token_embeddings(len(self.tokenizer))\n \n def process_data(self, batch):\n # encoder inputs\n inputs = self.tokenizer(batch.batch_input, return_tensors='pt', padding=True)\n enc_idxs = inputs['input_ids']\n enc_attn = inputs['attention_mask']\n\n # decoder inputs\n targets = self.tokenizer(batch.batch_target, return_tensors='pt', padding=True)\n batch_size = enc_idxs.size(0)\n \n if self.config.pretrained_model_name.startswith('facebook/bart'):\n padding = torch.ones((batch_size, 1), dtype=torch.long)\n padding[:] = self.tokenizer.eos_token_id\n # for BART, the decoder input should be:\n # PAD => BOS\n # BOS => A\n # A => B \n else:\n # t5 case\n padding = torch.ones((batch_size, 1), dtype=torch.long)\n padding[:] = self.tokenizer.pad_token_id\n # for t5, the decoder input should be:\n # PAD => A\n # A => B\n \n dec_idxs = torch.cat((padding, targets['input_ids']), dim=1)\n dec_attn = torch.cat((torch.ones((batch_size, 1), dtype=torch.long), targets['attention_mask']), dim=1)\n # dec_idxs = targets['input_ids']\n # dec_idxs[:, 0] = self.tokenizer.eos_token_id\n # dec_attn = targets['attention_mask']\n \n # labels\n padding = torch.ones((batch_size, 1), dtype=torch.long)\n padding[:] = self.tokenizer.pad_token_id\n raw_lbl_idxs = torch.cat((dec_idxs[:, 1:], padding), dim=1)\n lbl_attn = torch.cat((dec_attn[:, 1:], torch.zeros((batch_size, 1), dtype=torch.long)), dim=1)\n lbl_idxs = raw_lbl_idxs.masked_fill(lbl_attn==0, -100) # ignore padding\n \n enc_idxs = enc_idxs.cuda()\n enc_attn = enc_attn.cuda()\n dec_idxs = dec_idxs.cuda()\n dec_attn = dec_attn.cuda()\n raw_lbl_idxs = raw_lbl_idxs.cuda()\n lbl_idxs = lbl_idxs.cuda()\n \n return enc_idxs, enc_attn, dec_idxs, dec_attn, raw_lbl_idxs, lbl_idxs\n\n def forward(self, batch):\n enc_idxs, enc_attn, dec_idxs, dec_attn, raw_lbl_idxs, lbl_idxs = self.process_data(batch)\n outputs = self.model(input_ids=enc_idxs, \n attention_mask=enc_attn, \n decoder_input_ids=dec_idxs, \n decoder_attention_mask=dec_attn, \n labels=lbl_idxs, \n return_dict=True)\n \n loss = outputs['loss']\n \n return loss\n \n def predict(self, batch, num_beams=4, max_length=50):\n enc_idxs, enc_attn, dec_idxs, dec_attn, raw_lbl_idxs, lbl_idxs = self.process_data(batch)\n return self.generate(enc_idxs, enc_attn, num_beams, max_length)\n \n def generate(self, input_ids, attention_mask, num_beams=4, max_length=50, **kwargs):\n self.eval()\n with torch.no_grad():\n outputs = self.model.generate(input_ids=input_ids, \n attention_mask=attention_mask, \n num_beams=num_beams, \n max_length=max_length)\n final_output = []\n for bid in range(len(input_ids)):\n output_sentence = self.tokenizer.decode(outputs[bid], skip_special_tokens=True, clean_up_tokenization_spaces=True)\n final_output.append(output_sentence)\n self.train()\n return final_output\n \n def save_model(self, save_path):\n self.model.save_pretrained(save_path)\n\n def load_model(self, load_path):\n self.model.from_pretrained(load_path)" }, { "identifier": "event_template", "path": "TextEE/models/Degree/template_generate.py", "snippet": "class event_template():\n def __init__(self, event_type, info_dict, input_style, output_style, passage, ROLE_PH_MAP, gold_event=None):\n self.ROLE_PH_MAP = ROLE_PH_MAP\n self.info_dict = info_dict\n self.event_type = event_type\n self.input_style = input_style\n self.output_style = output_style\n self.output_template = self.get_output_template()\n self.passage = ' '.join(passage) # Assume this is English\n self.tokens = passage\n \n if gold_event is not None:\n self.gold_event = gold_event\n if isinstance(gold_event, list):\n # instance base\n self.trigger_text = f\" {AND} \".join([x['trigger text'] for x in gold_event if x['event type']==event_type])\n self.trigger_span = [x['trigger span'] for x in gold_event if x['event type']==event_type]\n self.arguments = [x['arguments'] for x in gold_event if x['event type']==event_type]\n else:\n # trigger base\n self.trigger_text = gold_event['trigger text']\n self.trigger_span = [gold_event['trigger span']]\n self.arguments = [gold_event['arguments']] \n else:\n self.gold_event = None\n \n def get_keywords(self):\n return self.info_dict['keywords']\n\n def get_output_template(self):\n output_template = ''\n for o_style in OUTPUT_STYLE_SET:\n if o_style in self.output_style:\n if o_style == 'trigger:sentence':\n output_template += ' {} {}'.format(SEP, format_template(self.info_dict['ED template'], self.ROLE_PH_MAP))\n if o_style == 'argument:sentence':\n output_template += ' {} {}'.format(SEP, format_template(self.info_dict['EAE template'], self.ROLE_PH_MAP))\n return (f'{SEP}'.join(output_template.split(f'{SEP}')[1:])).strip()\n\n def generate_pair(self, query_trigger):\n \"\"\"\n Generate model input sentence and output sentence pair\n \"\"\"\n input_str, supplements = self.generate_input_str_detail(query_trigger)\n output_str, gold_sample = self.generate_output_str(query_trigger)\n return (input_str, output_str, self.gold_event, gold_sample, self.event_type, self.tokens, supplements)\n\n def generate_input_str_detail(self, query_trigger):\n input_str = ''\n for i_style in INPUT_STYLE_SET:\n if i_style in self.input_style:\n if i_style == 'event_type':\n input_str += ' {} {}'.format(SEP, self.info_dict['event type'])\n if i_style == 'event_type_sent':\n input_str += ' {} {}'.format(SEP, self.info_dict['event description'])\n if i_style == 'keywords':\n input_str += ' {} Similar triggers such as {}'.format(SEP, ', '.join(self.get_keywords()))\n if i_style == 'triggers':\n input_str += ' {} The event trigger word is {}'.format(SEP, query_trigger)\n if i_style == 'template':\n input_str += ' {} {}'.format(SEP, self.output_template)\n return self.passage+input_str, input_str\n\n def generate_input_str(self, query_trigger):\n input_str = self.passage\n for i_style in INPUT_STYLE_SET:\n if i_style in self.input_style:\n if i_style == 'event_type':\n input_str += ' {} {}'.format(SEP, self.info_dict['event type'])\n if i_style == 'event_type_sent':\n input_str += ' {} {}'.format(SEP, self.info_dict['event description'])\n if i_style == 'keywords':\n input_str += ' {} Similar triggers such as {}'.format(SEP, ', '.join(self.get_keywords()))\n if i_style == 'triggers':\n input_str += ' {} The event trigger word is {}'.format(SEP, query_trigger)\n if i_style == 'template':\n input_str += ' {} {}'.format(SEP, self.output_template)\n return input_str\n\n def generate_output_str(self, query_trigger):\n assert self.gold_event is not None\n output_str = ''\n gold_sample = False\n for o_style in OUTPUT_STYLE_SET:\n if o_style in self.output_style:\n if o_style == 'trigger:sentence':\n filler = dict()\n if self.trigger_text != '':\n filler[\"Trigger\"] = self.trigger_text\n gold_sample = True\n else:\n filler[\"Trigger\"] = TRIGGER_PH_MAP['Trigger']\n output_str += ' {} {}'.format(SEP, self.info_dict['ED template'].format(**filler))\n\n if o_style == 'argument:sentence':\n output_texts = []\n for argu in self.arguments:\n filler = dict()\n roles = re.findall(r\"{[^/}][^}]*}\", self.info_dict['EAE template'])\n roles = [role[1:-1].split(ROLE_TEMPLATE_PREFIX, 1)[1] for role in roles]\n for role_type in roles:\n filler['{}{}'.format(ROLE_TEMPLATE_PREFIX, role_type)] = f\" {AND} \".join([ a['argument text'] for a in argu[role_type]]) if role_type in argu.keys() else self.ROLE_PH_MAP['ROLE_{}'.format(role_type)]\n output_texts.append(self.info_dict['EAE template'].format(**filler))\n gold_sample = True\n output_str += ' {} {}'.format(SEP, ' <sep> '.join(output_texts))\n\n output_str = (f'{SEP}'.join(output_str.split(f'{SEP}')[1:])).strip()\n return (output_str, gold_sample)\n\n def decode(self, preds):\n output = []\n for cnt, pred in enumerate(preds.split(f'{SEP}')):\n used_o_cnt = 0\n full_pred = pred.strip()\n for o_style in OUTPUT_STYLE_SET:\n if o_style in self.output_style:\n if o_style == 'trigger:sentence':\n if used_o_cnt == cnt:\n # try:\n # contexts = re.split(r\"{[^/}][^}]*}\", self.info_dict['ED template'])\n # triggers = []\n # for idx in range(len(contexts)-1):\n # trigger = full_pred.split(contexts[idx], 1)[1]\n # trigger = trigger.split(contexts[idx+1], 1)[0]\n # triggers.append(trigger.strip())\n # triggers = [tri for trigger in triggers for tri in trigger.split(' and ') ]\n # for t_cnt, t in enumerate(triggers):\n # if t != TRIGGER_PH_MAP['Trigger'] and t != '':\n # output.append((t, self.event_type, {'tri counter': t_cnt})) # (text, type, kwargs)\n # except:\n # pass\n contexts = re.split(r\"{[^/}][^}]*}\", self.info_dict['ED template'])\n triggers = []\n for idx in range(len(contexts)-1):\n try:\n trigger = full_pred.split(contexts[idx], 1)[1]\n trigger = trigger.split(contexts[idx+1], 1)[0]\n triggers.append(trigger.strip())\n except:\n pass\n triggers = [tri for trigger in triggers for tri in trigger.split(f' {AND} ')]\n for t_cnt, t in enumerate(triggers):\n if t != TRIGGER_PH_MAP['Trigger'] and t != '':\n output.append((t, self.event_type, {'tri counter': t_cnt})) # (text, type, kwargs)\n used_o_cnt += 1\n if o_style == 'argument:sentence':\n if used_o_cnt == cnt:\n for a_cnt, prediction in enumerate(full_pred.split(' <sep> ')):\n contexts = re.split(r\"{[^/}][^}]*}\", self.info_dict['EAE template'])\n roles = re.findall(r\"{[^/}][^}]*}\", self.info_dict['EAE template'])\n roles = [role[1:-1].split(ROLE_TEMPLATE_PREFIX, 1)[1] for role in roles]\n assert len(contexts) == len(roles)+1\n\n for idx in range(len(contexts)-1):\n try:\n if contexts[idx] != '':\n pred_argu = prediction.split(contexts[idx], 1)[1]\n else:\n pred_argu = prediction\n if contexts[idx+1] != '':\n pred_argu = pred_argu.split(contexts[idx+1], 1)[0]\n pred_argu = pred_argu.split(f' {AND} ')\n for argu in pred_argu:\n if argu != self.ROLE_PH_MAP[\"{}{}\".format(ROLE_TEMPLATE_PREFIX, roles[idx])]:\n if argu != '':\n output.append((argu, roles[idx], {'cor tri cnt': a_cnt}))\n except:\n pass\n used_o_cnt += 1\n \n return output\n\n def evaluate(self, predict_output):\n assert self.gold_event is not None\n # categorize prediction\n pred_trigger = []\n pred_argument = []\n for pred in predict_output:\n if pred[1] == self.event_type:\n pred_trigger.append(pred)\n else:\n pred_argument.append(pred)\n \n # get trigger id map\n pred_trigger_map = {}\n for p_tri in pred_trigger:\n # assert p_tri[2]['tri counter'] not in pred_trigger_map.keys()\n pred_trigger_map[p_tri[2]['tri counter']] = p_tri\n\n # trigger score\n gold_tri_num = len(self.trigger_span)\n pred_tris = []\n for pred in pred_trigger:\n pred_span = self.predstr2span(pred[0])\n if pred_span[0] > -1:\n pred_tris.append((pred_span[0], pred_span[1], pred[1]))\n pred_tri_num = len(pred_tris)\n match_tri = 0\n for pred in pred_tris:\n id_flag = False\n for gold_span in self.trigger_span:\n if gold_span[0] == pred[0] and gold_span[1] == pred[1]:\n id_flag = True\n match_tri += int(id_flag)\n\n # argument score\n converted_gold = self.get_converted_gold()\n gold_arg_num = len(converted_gold)\n pred_arg = []\n for pred in pred_argument:\n # find corresponding trigger\n pred_span = None\n if isinstance(self.gold_event, list):\n # end2end case\n try:\n # we need this ``try'' because we cannot gurantee the model will be bug-free on the matching\n cor_tri = pred_trigger_map[pred[2]['cor tri cnt']]\n cor_tri_span_head = self.predstr2span(cor_tri[0])[0]\n if cor_tri_span_head > -1:\n pred_span = self.predstr2span(pred[0], cor_tri_span_head)\n else:\n continue\n except Exception as e:\n print('unmatch exception')\n print(e)\n else:\n # argument only case\n pred_span = self.predstr2span(pred[0], self.trigger_span[0][0])\n if (pred_span is not None) and (pred_span[0] > -1):\n pred_arg.append((pred_span[0], pred_span[1], pred[1]))\n pred_arg = list(set(pred_arg))\n pred_arg_num = len(pred_arg)\n \n target = converted_gold\n match_id = 0\n match_type = 0\n for pred in pred_arg:\n id_flag = False\n id_type = False\n for gold in target:\n if gold[0]==pred[0] and gold[1]==pred[1]:\n id_flag = True\n if gold[2] == pred[2]:\n id_type = True\n break\n match_id += int(id_flag)\n match_type += int(id_type)\n return {\n 'gold_tri_num': gold_tri_num, \n 'pred_tri_num': pred_tri_num,\n 'match_tri_num': match_tri,\n 'gold_arg_num': gold_arg_num,\n 'pred_arg_num': pred_arg_num,\n 'match_arg_id': match_id,\n 'match_arg_cls': match_type\n }\n \n def get_converted_gold(self):\n converted_gold = []\n for argu in self.arguments:\n for arg_type, arg_list in argu.items():\n for arg in arg_list:\n converted_gold.append((arg['argument span'][0], arg['argument span'][1], arg_type))\n return list(set(converted_gold))\n \n def predstr2span(self, pred_str, trigger_idx=None):\n sub_words = [_.strip() for _ in pred_str.strip().lower().split()]\n candidates=[]\n for i in range(len(self.tokens)):\n j = 0\n while j < len(sub_words) and i+j < len(self.tokens):\n if self.tokens[i+j].lower() == sub_words[j]:\n j += 1\n else:\n break\n if j == len(sub_words):\n candidates.append((i, i+len(sub_words)))\n if len(candidates) < 1:\n return -1, -1\n else:\n if trigger_idx is not None:\n return sorted(candidates, key=lambda x: abs(trigger_idx-x[0]))[0]\n else:\n return candidates[0]" }, { "identifier": "eve_template_generator", "path": "TextEE/models/Degree/template_generate.py", "snippet": "class eve_template_generator():\n def __init__(self, dataset, passage, triggers, roles, input_style, output_style, vocab, instance_base=False):\n \"\"\"\n generate strctured information for events\n \n args:\n dataset(str): which dataset is used\n passage(List): a list of tokens\n triggers(List): a list of triggers\n roles(List): a list of Roles\n input_style(List): List of elements; elements belongs to INPUT_STYLE_SET\n input_style(List): List of elements; elements belongs to OUTPUT_STYLE_SET\n instance_base(Bool): if instance_base, we generate only one pair (use for trigger generation), else, we generate trigger_base (use for argument generation)\n \"\"\"\n self.raw_passage = passage\n self.triggers = triggers\n self.roles = roles\n self.events = self.process_events(passage, triggers, roles)\n self.input_style = input_style\n self.output_style = output_style\n self.vocab = vocab\n self.event_templates = []\n if instance_base:\n for e_type in self.vocab['event_type_itos']:\n self.event_templates.append(\n event_template(e_type, patterns[dataset][e_type], \n self.input_style, self.output_style, passage, ROLE_PH_MAP[dataset], self.events)\n )\n else:\n for event in self.events:\n self.event_templates.append(\n event_template(event['event type'], patterns[dataset][event['event type']], \n self.input_style, self.output_style, event['tokens'], ROLE_PH_MAP[dataset], event)\n )\n self.data = [x.generate_pair(x.trigger_text) for x in self.event_templates]\n self.data = [x for x in self.data if x]\n\n def get_training_data(self):\n return self.data\n\n def process_events(self, passage, triggers, roles):\n \"\"\"\n Given a list of token and event annotation, return a list of structured event\n\n structured_event:\n {\n 'trigger text': str,\n 'trigger span': (start, end),\n 'event type': EVENT_TYPE(str),\n 'arguments':{\n ROLE_TYPE(str):[{\n 'argument text': str,\n 'argument span': (start, end)\n }],\n ROLE_TYPE(str):...,\n ROLE_TYPE(str):....\n }\n 'passage': PASSAGE\n }\n \"\"\"\n \n events = {trigger: [] for trigger in triggers}\n\n for argument in roles:\n trigger = argument[0]\n events[trigger].append(argument)\n \n event_structures = []\n for trigger, arguments in events.items():\n eve_type = trigger[2]\n eve_text = ' '.join(passage[trigger[0]:trigger[1]])\n eve_span = (trigger[0], trigger[1])\n argus = {}\n for argument in arguments:\n role_type = argument[1][2]\n if role_type not in argus.keys():\n argus[role_type] = []\n argus[role_type].append({\n 'argument text': ' '.join(passage[argument[1][0]:argument[1][1]]),\n 'argument span': (argument[1][0], argument[1][1]),\n })\n event_structures.append({\n 'trigger text': eve_text,\n 'trigger span': eve_span,\n 'event type': eve_type,\n 'arguments': argus,\n 'passage': ' '.join(passage),\n 'tokens': passage\n })\n return event_structures" }, { "identifier": "patterns", "path": "TextEE/models/Degree/pattern.py", "snippet": "ROLE_PH_MAP = {\n \"ace05-en\": {\n 'ROLE_Person': 'somebody',\n 'ROLE_Entity': 'some people or some organization',\n 'ROLE_Defendant': 'somebody',\n 'ROLE_Prosecutor': 'some other',\n 'ROLE_Plaintiff': 'some other',\n 'ROLE_Buyer': 'someone',\n 'ROLE_Artifact': 'something',\n 'ROLE_Seller': 'some seller',\n 'ROLE_Destination': 'somewhere',\n 'ROLE_Origin': 'some place',\n 'ROLE_Vehicle': 'some vehicle',\n 'ROLE_Agent': 'somebody or some organization',\n 'ROLE_Attacker': 'some attacker',\n 'ROLE_Target': 'some facility, someone, or some organization',\n 'ROLE_Victim': 'some victim',\n 'ROLE_Instrument': 'some way',\n 'ROLE_Giver': 'someone',\n 'ROLE_Recipient': 'some other',\n 'ROLE_Org': 'some organization',\n 'ROLE_Place': 'somewhere',\n 'ROLE_Adjudicator': 'some adjudicator',\n },\n \"richere-en\": {\n 'ROLE_Person': 'somebody',\n 'ROLE_Entity': 'some people or some organization',\n 'ROLE_Defendant': 'somebody',\n 'ROLE_Prosecutor': 'some other',\n 'ROLE_Plaintiff': 'some other',\n 'ROLE_Buyer': 'someone',\n 'ROLE_Artifact': 'something',\n 'ROLE_Seller': 'some seller',\n 'ROLE_Destination': 'somewhere',\n 'ROLE_Origin': 'some place',\n 'ROLE_Vehicle': 'some vehicle',\n 'ROLE_Agent': 'somebody or some organization',\n 'ROLE_Attacker': 'some attacker',\n 'ROLE_Target': 'some facility, someone, or some organization',\n 'ROLE_Victim': 'some victim',\n 'ROLE_Instrument': 'some way',\n 'ROLE_Giver': 'someone',\n 'ROLE_Recipient': 'some other',\n 'ROLE_Org': 'some organization',\n 'ROLE_Place': 'somewhere',\n 'ROLE_Adjudicator': 'some adjudicator',\n 'ROLE_Thing': 'something',\n 'ROLE_Audience': 'some publicity',\n },\n \"m2e2\": {\n 'ROLE_Person': 'somebody',\n 'ROLE_Entity': 'some people or some organization',\n 'ROLE_Artifact': 'something',\n 'ROLE_Destination': 'somewhere',\n 'ROLE_Origin': 'some place',\n 'ROLE_Vehicle': 'some vehicle',\n 'ROLE_Agent': 'somebody or some organization',\n 'ROLE_Attacker': 'some attacker',\n 'ROLE_Target': 'some facility, someone, or some organization',\n 'ROLE_Victim': 'some victim',\n 'ROLE_Instrument': 'some way',\n 'ROLE_Giver': 'someone',\n 'ROLE_Recipient': 'some other',\n 'ROLE_Place': 'somewhere',\n 'ROLE_Police': 'some police',\n },\n \"geneva\": {\n \"ROLE_Act\": \"some act\",\n \"ROLE_Action\": \"some action\",\n \"ROLE_Activity\": \"some activity\",\n \"ROLE_Actor\": \"some actor\",\n \"ROLE_Addressee\": \"some addressee\",\n \"ROLE_Affected\": \"some affected\",\n \"ROLE_Affliction\": \"some affliction\",\n \"ROLE_Agent\": \"some agent\",\n \"ROLE_Agreement\": \"some agreement\",\n \"ROLE_Area\": \"some area\",\n \"ROLE_Arguer\": \"some arguer\",\n \"ROLE_Arguer2\": \"some arguer2\",\n \"ROLE_Arguers\": \"some arguers\",\n \"ROLE_Assailant\": \"some assailant\",\n \"ROLE_Asset\": \"some asset\",\n \"ROLE_Attendees\": \"some attendees\",\n \"ROLE_Attribute\": \"some attribute\",\n \"ROLE_Author\": \"some author\",\n \"ROLE_Authorities\": \"some authorities\",\n \"ROLE_Avenger\": \"some avenger\",\n \"ROLE_Barrier\": \"some barrier\",\n \"ROLE_Behavior\": \"some behavior\",\n \"ROLE_Beneficiary\": \"some beneficiary\",\n \"ROLE_Benefited_party\": \"some benefited party\",\n \"ROLE_Body\": \"some body\",\n \"ROLE_Body_location\": \"some body location\",\n \"ROLE_Body_part\": \"some body part\",\n \"ROLE_Buyer\": \"some buyer\",\n \"ROLE_Carrier\": \"some carrier\",\n \"ROLE_Cause\": \"some cause\",\n \"ROLE_Charges\": \"some charges\",\n \"ROLE_Chosen\": \"some chosen\",\n \"ROLE_Circumstances\": \"some circumstances\",\n \"ROLE_Clothing\": \"some clothing\",\n \"ROLE_Cognizer\": \"some cognizer\",\n \"ROLE_Communicator\": \"some communicator\",\n \"ROLE_Competition\": \"some competition\",\n \"ROLE_Components\": \"some components\",\n \"ROLE_Configuration\": \"some configuration\",\n \"ROLE_Conqueror\": \"some conqueror\",\n \"ROLE_Container\": \"some container\",\n \"ROLE_Content\": \"some content\",\n \"ROLE_Contents\": \"some contents\",\n \"ROLE_Controlling_variable\": \"some controlling variable\",\n \"ROLE_Course\": \"some course\",\n \"ROLE_Created_entity\": \"some created entity\",\n \"ROLE_Creator\": \"some creator\",\n \"ROLE_Crime\": \"some crime\",\n \"ROLE_Culture\": \"some culture\",\n \"ROLE_Deceased\": \"some deceased\",\n \"ROLE_Decision\": \"some decision\",\n \"ROLE_Defender\": \"some defender\",\n \"ROLE_Dependent_variable\": \"some dependent variable\",\n \"ROLE_Destroyer\": \"some destroyer\",\n \"ROLE_Difference\": \"some difference\",\n \"ROLE_Dimension\": \"some dimension\",\n \"ROLE_Direction\": \"some direction\",\n \"ROLE_Distance\": \"some distance\",\n \"ROLE_Domain\": \"some domain\",\n \"ROLE_Donor\": \"some donor\",\n \"ROLE_Duration\": \"some duration\",\n \"ROLE_Earner\": \"some earner\",\n \"ROLE_Earnings\": \"some earnings\",\n \"ROLE_Effect\": \"some effect\",\n \"ROLE_Employee\": \"some employee\",\n \"ROLE_Employer\": \"some employer\",\n \"ROLE_Entity\": \"some entity\",\n \"ROLE_Evaluee\": \"some evaluee\",\n \"ROLE_Event\": \"some event\",\n \"ROLE_Evidence\": \"some evidence\",\n \"ROLE_Exchanger_1\": \"some exchanger 1\",\n \"ROLE_Exchanger_2\": \"some exchanger 2\",\n \"ROLE_Exchangers\": \"some exchangers\",\n \"ROLE_Experiencer\": \"some experiencer\",\n \"ROLE_Fact\": \"some fact\",\n \"ROLE_Factory\": \"some factory\",\n \"ROLE_Field\": \"some field\",\n \"ROLE_Figures\": \"some figures\",\n \"ROLE_Final_category\": \"some final category\",\n \"ROLE_Final_quality\": \"some final quality\",\n \"ROLE_Final_subevent\": \"some final subevent\",\n \"ROLE_Final_value\": \"some final value\",\n \"ROLE_Focal_entity\": \"some focal entity\",\n \"ROLE_Goal\": \"some goal\",\n \"ROLE_Goal_area\": \"some goal area\",\n \"ROLE_Goods\": \"some goods\",\n \"ROLE_Ground\": \"some ground\",\n \"ROLE_Group\": \"some group\",\n \"ROLE_Helper\": \"some helper\",\n \"ROLE_Hindrance\": \"some hindrance\",\n \"ROLE_Host\": \"some host\",\n \"ROLE_Imposed_purpose\": \"some imposed purpose\",\n \"ROLE_Incident\": \"some incident\",\n \"ROLE_Individuals\": \"some individuals\",\n \"ROLE_Information\": \"some information\",\n \"ROLE_Ingestibles\": \"some ingestibles\",\n \"ROLE_Ingestor\": \"some ingestor\",\n \"ROLE_Inherent_purpose\": \"some inherent purpose\",\n \"ROLE_Initial_category\": \"some initial category\",\n \"ROLE_Initial_size\": \"some initial size\",\n \"ROLE_Initial_subevent\": \"some initial subevent\",\n \"ROLE_Injured_party\": \"some injured party\",\n \"ROLE_Injury\": \"some injury\",\n \"ROLE_Inspector\": \"some inspector\",\n \"ROLE_Instrument\": \"some instrument\",\n \"ROLE_Intended_event\": \"some intended event\",\n \"ROLE_Interlocutors\": \"some interlocutors\",\n \"ROLE_Investigator\": \"some investigator\",\n \"ROLE_Issue\": \"some issue\",\n \"ROLE_Item\": \"some item\",\n \"ROLE_Killer\": \"some killer\",\n \"ROLE_Label\": \"some label\",\n \"ROLE_Location\": \"some location\",\n \"ROLE_Manipulator\": \"some manipulator\",\n \"ROLE_Manner\": \"some manner\",\n \"ROLE_Means\": \"some means\",\n \"ROLE_Medication\": \"some medication\",\n \"ROLE_Medium\": \"some medium\",\n \"ROLE_Member\": \"some member\",\n \"ROLE_Message\": \"some message\",\n \"ROLE_Money\": \"some money\",\n \"ROLE_New_leader\": \"some new leader\",\n \"ROLE_New_member\": \"some new member\",\n \"ROLE_Object\": \"some object\",\n \"ROLE_Occasion\": \"some occasion\",\n \"ROLE_Offender\": \"some offender\",\n \"ROLE_Offense\": \"some offense\",\n \"ROLE_Offerer\": \"some offerer\",\n \"ROLE_Old_leader\": \"some old leader\",\n \"ROLE_Old_order\": \"some old order\",\n \"ROLE_Part_1\": \"some part 1\",\n \"ROLE_Part_2\": \"some part 2\",\n \"ROLE_Participants\": \"some participants\",\n \"ROLE_Partners\": \"some partners\",\n \"ROLE_Parts\": \"some parts\",\n \"ROLE_Path\": \"some path\",\n \"ROLE_Patient\": \"some patient\",\n \"ROLE_Payer\": \"some payer\",\n \"ROLE_Perceiver_agentive\": \"some perceiver agentive\",\n \"ROLE_Perpetrator\": \"some perpetrator\",\n \"ROLE_Phenomenon\": \"some phenomenon\",\n \"ROLE_Place\": \"some place\",\n \"ROLE_Place_of_employment\": \"some place of employment\",\n \"ROLE_Position\": \"some position\",\n \"ROLE_Possibilities\": \"some possibilities\",\n \"ROLE_Potential_hindrance\": \"some potential hindrance\",\n \"ROLE_Problem\": \"some problem\",\n \"ROLE_Process\": \"some process\",\n \"ROLE_Producer\": \"some producer\",\n \"ROLE_Product\": \"some product\",\n \"ROLE_Project\": \"some project\",\n \"ROLE_Proposal\": \"some proposal\",\n \"ROLE_Proposed_action\": \"some proposed action\",\n \"ROLE_Protagonist\": \"some protagonist\",\n \"ROLE_Punishment\": \"some punishment\",\n \"ROLE_Purpose\": \"some purpose\",\n \"ROLE_Rate\": \"some rate\",\n \"ROLE_Ratifier\": \"some ratifier\",\n \"ROLE_Reason\": \"some reason\",\n \"ROLE_Recipient\": \"some recipient\",\n \"ROLE_Researcher\": \"some researcher\",\n \"ROLE_Resource\": \"some resource\",\n \"ROLE_Responding_entity\": \"some responding entity\",\n \"ROLE_Response\": \"some response\",\n \"ROLE_Result\": \"some result\",\n \"ROLE_Result_size\": \"some result size\",\n \"ROLE_Role\": \"some role\",\n \"ROLE_Selector\": \"some selector\",\n \"ROLE_Self_mover\": \"some self mover\",\n \"ROLE_Seller\": \"some seller\",\n \"ROLE_Sender\": \"some sender\",\n \"ROLE_Side_1\": \"some side 1\",\n \"ROLE_Side_2\": \"some side 2\",\n \"ROLE_Sides\": \"some sides\",\n \"ROLE_Signatory\": \"some signatory\",\n \"ROLE_Situation\": \"some situation\",\n \"ROLE_Skill\": \"some skill\",\n \"ROLE_Social_event\": \"some social event\",\n \"ROLE_Source\": \"some source\",\n \"ROLE_Speaker\": \"some speaker\",\n \"ROLE_Specified_entity\": \"some specified entity\",\n \"ROLE_Speed\": \"some speed\",\n \"ROLE_State\": \"some state\",\n \"ROLE_Student\": \"some student\",\n \"ROLE_Subject\": \"some subject\",\n \"ROLE_Supplier\": \"some supplier\",\n \"ROLE_Supported\": \"some supported\",\n \"ROLE_Supporter\": \"some supporter\",\n \"ROLE_Suspect\": \"some suspect\",\n \"ROLE_Task\": \"some task\",\n \"ROLE_Teacher\": \"some teacher\",\n \"ROLE_Terrorist\": \"some terrorist\",\n \"ROLE_Tested_property\": \"some tested property\",\n \"ROLE_Tester\": \"some tester\",\n \"ROLE_Text\": \"some text\",\n \"ROLE_Theme\": \"some theme\",\n \"ROLE_Theme_1\": \"some theme 1\",\n \"ROLE_Theme_2\": \"some theme 2\",\n \"ROLE_Themes\": \"some themes\",\n \"ROLE_Time\": \"some time\",\n \"ROLE_Topic\": \"some topic\",\n \"ROLE_Transferors\": \"some transferors\",\n \"ROLE_Traveler\": \"some traveler\",\n \"ROLE_Traveller\": \"some traveller\",\n \"ROLE_Treatment\": \"some treatment\",\n \"ROLE_Trigger\": \"some trigger\",\n \"ROLE_Type\": \"some type\",\n \"ROLE_Unconfirmed_content\": \"some unconfirmed content\",\n \"ROLE_Undertaking\": \"some undertaking\",\n \"ROLE_Undesirable_event\": \"some undesirable event\",\n \"ROLE_Unwanted_entity\": \"some unwanted entity\",\n \"ROLE_Useful_location\": \"some useful location\",\n \"ROLE_Value_1\": \"some value 1\",\n \"ROLE_Value_2\": \"some value 2\",\n \"ROLE_Vehicle\": \"some vehicle\",\n \"ROLE_Venue\": \"some venue\",\n \"ROLE_Victim\": \"some victim\",\n \"ROLE_Weapon\": \"some weapon\",\n \"ROLE_Wearer\": \"some wearer\",\n \"ROLE_Whole\": \"some whole\",\n },\n \"maven\": {\n },\n \"mee-en\": {\n },\n \"fewevent\": {\n },\n \"rams\": {\n \"ROLE_artifact\": \"some artifact\",\n \"ROLE_artifactmoney\": \"some artifact money\",\n \"ROLE_attacker\": \"some attacker\",\n \"ROLE_ballot\": \"some ballot\",\n \"ROLE_beneficiary\": \"some beneficiary\",\n \"ROLE_candidate\": \"some candidate\",\n \"ROLE_communicator\": \"some communicator\",\n \"ROLE_crashobject\": \"some crash object\",\n \"ROLE_crime\": \"some crime\",\n \"ROLE_damager\": \"some damager\",\n \"ROLE_damagerdestroyer\": \"some damager destroyer\",\n \"ROLE_deceased\": \"some deceased\",\n \"ROLE_defendant\": \"some defendant\",\n \"ROLE_demonstrator\": \"some demonstrator\",\n \"ROLE_destination\": \"some destination\",\n \"ROLE_destroyer\": \"some destroyer\",\n \"ROLE_detainee\": \"some detainee\",\n \"ROLE_driverpassenger\": \"some driver passenger\",\n \"ROLE_employee\": \"some employee\",\n \"ROLE_executioner\": \"some executioner\",\n \"ROLE_extraditer\": \"some extraditer\",\n \"ROLE_fireexplosionobject\": \"some fire explosion object\",\n \"ROLE_founder\": \"some founder\",\n \"ROLE_giver\": \"some giver\",\n \"ROLE_governmentbody\": \"some government body\",\n \"ROLE_gpe\": \"some gpe\",\n \"ROLE_granter\": \"some granter\",\n \"ROLE_hidingplace\": \"some hiding place\",\n \"ROLE_injurer\": \"some injurer\",\n \"ROLE_inspectedentity\": \"some inspected entity\",\n \"ROLE_inspector\": \"some inspector\",\n \"ROLE_instrument\": \"some instrument\",\n \"ROLE_investigator\": \"some investigator\",\n \"ROLE_jailer\": \"some jailer\",\n \"ROLE_judgecourt\": \"some judge court\",\n \"ROLE_killer\": \"some killer\",\n \"ROLE_law\": \"some law\",\n \"ROLE_manufacturer\": \"some manufacturer\",\n \"ROLE_money\": \"some money\",\n \"ROLE_monitor\": \"some monitor\",\n \"ROLE_monitoredentity\": \"some monitored entity\",\n \"ROLE_observedentity\": \"some observed entity\",\n \"ROLE_observer\": \"some observer\",\n \"ROLE_origin\": \"some origin\",\n \"ROLE_otherparticipant\": \"some other participant\",\n \"ROLE_participant\": \"some participant\",\n \"ROLE_passenger\": \"some passenger\",\n \"ROLE_place\": \"some place\",\n \"ROLE_placeofemployment\": \"some place of employment\",\n \"ROLE_preventer\": \"some preventer\",\n \"ROLE_prosecutor\": \"some prosecutor\",\n \"ROLE_recipient\": \"some recipient\",\n \"ROLE_rejecternullifier\": \"some rejecter nullifier\",\n \"ROLE_result\": \"some result\",\n \"ROLE_retreater\": \"some retreater\",\n \"ROLE_spy\": \"some spy\",\n \"ROLE_surrenderer\": \"some surrenderer\",\n \"ROLE_target\": \"some target\",\n \"ROLE_territoryorfacility\": \"some territoryor facility\",\n \"ROLE_transporter\": \"some transporter\",\n \"ROLE_vehicle\": \"some vehicle\",\n \"ROLE_victim\": \"some victim\",\n \"ROLE_violator\": \"some violator\",\n \"ROLE_voter\": \"some voter\",\n \"ROLE_yielder\": \"some yielder\", \n },\n \"wikievents\": {\n 'ROLE_AcquiredEntity': 'some acquired entity',\n 'ROLE_Artifact': 'some artifact',\n 'ROLE_ArtifactMoney': 'some artifact money',\n 'ROLE_Attacker': 'some attacker',\n 'ROLE_BodyPart': 'some body part',\n 'ROLE_Communicator': 'some communicator',\n 'ROLE_Components': 'some components',\n 'ROLE_CrashObject': 'some crash object',\n 'ROLE_Damager': 'some damager',\n 'ROLE_DamagerDestroyer': 'some damager destroyer',\n 'ROLE_Defeated': 'some defeated',\n 'ROLE_Defendant': 'some defendant',\n 'ROLE_Demonstrator': 'some demonstrator',\n 'ROLE_Destination': 'some destination',\n 'ROLE_Destroyer': 'some destroyer',\n 'ROLE_Detainee': 'some detainee',\n 'ROLE_Disabler': 'some disabler',\n 'ROLE_Dismantler': 'some dismantler',\n 'ROLE_Employee': 'some employee',\n 'ROLE_ExplosiveDevice': 'some explosive device',\n 'ROLE_Giver': 'some giver',\n 'ROLE_IdentifiedObject': 'some identified object',\n 'ROLE_IdentifiedRole': 'some identified role',\n 'ROLE_Identifier': 'some identifier',\n 'ROLE_Impeder': 'some impeder',\n 'ROLE_Injurer': 'some injurer',\n 'ROLE_Instrument': 'some instrument',\n 'ROLE_Investigator': 'some investigator',\n 'ROLE_Jailer': 'some jailer',\n 'ROLE_JudgeCourt': 'some judge court',\n 'ROLE_Killer': 'some killer',\n 'ROLE_Learner': 'some learner',\n 'ROLE_ManufacturerAssembler': 'some manufacturer assembler',\n 'ROLE_ObservedEntity': 'some observed entity',\n 'ROLE_Observer': 'some observer',\n 'ROLE_Origin': 'some origin',\n 'ROLE_Participant': 'some participant',\n 'ROLE_PassengerArtifact': 'some passenger artifact',\n 'ROLE_Patient': 'some patient',\n 'ROLE_PaymentBarter': 'some payment barter',\n 'ROLE_Perpetrator': 'some perpetrator',\n 'ROLE_Place': 'some place',\n 'ROLE_PlaceOfEmployment': 'some place of employment',\n 'ROLE_Position': 'some position',\n 'ROLE_Preventer': 'some preventer',\n 'ROLE_Prosecutor': 'some prosecutor',\n 'ROLE_Recipient': 'some recipient',\n 'ROLE_Regulator': 'some regulator',\n 'ROLE_Researcher': 'some researcher',\n 'ROLE_Subject': 'some subject',\n 'ROLE_Target': 'some target',\n 'ROLE_TeacherTrainer': 'some teacher trainer',\n 'ROLE_Topic': 'some topic',\n 'ROLE_Transporter': 'some transporter',\n 'ROLE_Treater': 'some treater',\n 'ROLE_Vehicle': 'some vehicle',\n 'ROLE_Victim': 'some victim',\n 'ROLE_Victor': 'some victor',\n },\n \"phee\": {\n \"ROLE_Combination_Drug\": \"some combination drug\",\n \"ROLE_Effect\": \"some effect\",\n \"ROLE_Subject\": \"some subject\",\n \"ROLE_Subject_Age\": \"some subject age\",\n \"ROLE_Subject_Disorder\": \"some subject disorder\",\n \"ROLE_Subject_Gender\": \"some subject gender\",\n \"ROLE_Subject_Population\": \"some subject population\",\n \"ROLE_Subject_Race\": \"some subject race\",\n \"ROLE_Treatment\": \"some treatment\",\n \"ROLE_Treatment_Disorder\": \"some treatment disorder\",\n \"ROLE_Treatment_Dosage\": \"some treatment dosage\",\n \"ROLE_Treatment_Drug\": \"some treatment drug\",\n \"ROLE_Treatment_Duration\": \"some treatment duration\",\n \"ROLE_Treatment_Freq\": \"some treatment frequency\",\n \"ROLE_Treatment_Route\": \"some treatment route\",\n \"ROLE_Treatment_Time_elapsed\": \"some treatment time elapsed\",\n },\n \"casie\": {\n \"ROLE_Attack-Pattern\": \"some attack pattern\",\n \"ROLE_Attacker\": \"some attacker\",\n \"ROLE_CVE\": \"some cve\",\n \"ROLE_Capabilities\": \"some capabilities\",\n \"ROLE_Compromised-Data\": \"some compromised data\",\n \"ROLE_Damage-Amount\": \"some damage amount\",\n \"ROLE_Discoverer\": \"some discoverer\",\n \"ROLE_Issues-Addressed\": \"some issues addressed\",\n \"ROLE_Number-of-Data\": \"some number of data\",\n \"ROLE_Number-of-Victim\": \"some number of victim\",\n \"ROLE_Patch\": \"some patch\",\n \"ROLE_Patch-Number\": \"some patch number\",\n \"ROLE_Payment-Method\": \"some payment method\",\n \"ROLE_Place\": \"some place\",\n \"ROLE_Price\": \"some price\",\n \"ROLE_Purpose\": \"some purpose\",\n \"ROLE_Releaser\": \"some releaser\",\n \"ROLE_Supported_Platform\": \"some supported platform\",\n \"ROLE_Time\": \"some time\",\n \"ROLE_Tool\": \"some tool\",\n \"ROLE_Trusted-Entity\": \"some trusted entity\",\n \"ROLE_Victim\": \"some victim\",\n \"ROLE_Vulnerability\": \"some vulnerability\",\n \"ROLE_Vulnerable_System\": \"some vulnerable system\",\n \"ROLE_Vulnerable_System_Owner\": \"some vulnerable system owner\",\n \"ROLE_Vulnerable_System_Version\": \"some vulnerable system version\",\n },\n \"mlee\": {\n \"ROLE_AtLoc\": \"some at loc\",\n \"ROLE_CSite\": \"some csite\",\n \"ROLE_Cause\": \"some cause\",\n \"ROLE_FromLoc\": \"some from loc\",\n \"ROLE_Instrument\": \"some instrument\",\n \"ROLE_Instrument2\": \"some instrument 2\",\n \"ROLE_Participant\": \"some participant\",\n \"ROLE_Participant2\": \"some participant 2\",\n \"ROLE_Participant3\": \"some participant 3\",\n \"ROLE_Participant4\": \"some participant 4\",\n \"ROLE_Site\": \"some site\",\n \"ROLE_Theme\": \"some theme\",\n \"ROLE_Theme2\": \"some theme 2\",\n \"ROLE_ToLoc\": \"some to loc\",\n },\n \"genia2011\": {\n \"ROLE_AtLoc\": \"some at loc\",\n \"ROLE_CSite\": \"some csite\",\n \"ROLE_Cause\": \"some cause\",\n \"ROLE_Site\": \"some site\",\n \"ROLE_Site2\": \"some site 2\",\n \"ROLE_Theme\": \"some theme\",\n \"ROLE_Theme2\": \"some theme 2\",\n \"ROLE_Theme3\": \"some theme 3\",\n \"ROLE_Theme4\": \"some theme 4\",\n \"ROLE_ToLoc\": \"some to loc\",\n\n },\n \"genia2013\": {\n \"ROLE_CSite\": \"some csite\",\n \"ROLE_Cause\": \"some cause\",\n \"ROLE_Site\": \"some site\",\n \"ROLE_Site2\": \"some site 2\",\n \"ROLE_Theme\": \"some theme\",\n \"ROLE_Theme2\": \"some theme 2\",\n \"ROLE_ToLoc\": \"some to loc\",\n },\n}" } ]
import os, sys, logging, tqdm, pprint import torch import numpy as np import ipdb from collections import namedtuple from transformers import BartTokenizer, AutoTokenizer, get_linear_schedule_with_warmup from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .EAEmodel import DegreeEAEModel from .template_generate import event_template, eve_template_generator from .pattern import patterns, ROLE_PH_MAP from scorer import compute_EAE_scores, print_scores
11,981
logger = logging.getLogger(__name__) EAEBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_text', 'batch_piece_idxs', 'batch_token_start_idxs', 'batch_trigger', 'batch_arguments', 'batch_input', 'batch_target'] EAEBatch = namedtuple('EAEBatch', field_names=EAEBatch_fields, defaults=[None] * len(EAEBatch_fields)) def EAE_collate_fn(batch): return EAEBatch( batch_doc_id=[instance["doc_id"] for instance in batch], batch_wnd_id=[instance["wnd_id"] for instance in batch], batch_tokens=[instance["tokens"] for instance in batch], batch_text=[instance["text"] for instance in batch], batch_piece_idxs=[instance["piece_idxs"] for instance in batch], batch_token_start_idxs=[instance["token_start_idxs"] for instance in batch], batch_trigger=[instance["trigger"] for instance in batch], batch_arguments=[instance["arguments"] for instance in batch], batch_input=[instance["input"] for instance in batch], batch_target=[instance["target"] for instance in batch], ) def get_span_idx(pieces, token_start_idxs, span, tokenizer, trigger_span=None): """ This function is how we map the generated prediction back to span prediction. Detailed Explanation: We will first split our prediction and use tokenizer to tokenize our predicted "span" into pieces. Then, we will find whether we can find a continuous span in the original "pieces" can match tokenized "span". If it is an argument/relation extraction task, we will return the one which is closest to the trigger_span. """ words = [] for s in span.split(' '): words.extend(tokenizer.encode(s, add_special_tokens=False)) candidates = [] for i in range(len(pieces)): j = 0 k = 0 while j < len(words) and i+k < len(pieces): if pieces[i+k] == words[j]: j += 1 k += 1 elif tokenizer.decode(words[j]) == "": j += 1 elif tokenizer.decode(pieces[i+k]) == "": k += 1 else: break if j == len(words): candidates.append((i, i+k)) candidates = [(token_start_idxs.index(c1), token_start_idxs.index(c2)) for c1, c2 in candidates if c1 in token_start_idxs and c2 in token_start_idxs] if len(candidates) < 1: return -1, -1 else: if trigger_span is None: return candidates[0] else: return sorted(candidates, key=lambda x: np.abs(trigger_span[0]-x[0]))[0] class DegreeEAETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None def load_model(self, checkpoint=None): if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.tokenizer = state["tokenizer"] self.type_set = state["type_set"]
logger = logging.getLogger(__name__) EAEBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_text', 'batch_piece_idxs', 'batch_token_start_idxs', 'batch_trigger', 'batch_arguments', 'batch_input', 'batch_target'] EAEBatch = namedtuple('EAEBatch', field_names=EAEBatch_fields, defaults=[None] * len(EAEBatch_fields)) def EAE_collate_fn(batch): return EAEBatch( batch_doc_id=[instance["doc_id"] for instance in batch], batch_wnd_id=[instance["wnd_id"] for instance in batch], batch_tokens=[instance["tokens"] for instance in batch], batch_text=[instance["text"] for instance in batch], batch_piece_idxs=[instance["piece_idxs"] for instance in batch], batch_token_start_idxs=[instance["token_start_idxs"] for instance in batch], batch_trigger=[instance["trigger"] for instance in batch], batch_arguments=[instance["arguments"] for instance in batch], batch_input=[instance["input"] for instance in batch], batch_target=[instance["target"] for instance in batch], ) def get_span_idx(pieces, token_start_idxs, span, tokenizer, trigger_span=None): """ This function is how we map the generated prediction back to span prediction. Detailed Explanation: We will first split our prediction and use tokenizer to tokenize our predicted "span" into pieces. Then, we will find whether we can find a continuous span in the original "pieces" can match tokenized "span". If it is an argument/relation extraction task, we will return the one which is closest to the trigger_span. """ words = [] for s in span.split(' '): words.extend(tokenizer.encode(s, add_special_tokens=False)) candidates = [] for i in range(len(pieces)): j = 0 k = 0 while j < len(words) and i+k < len(pieces): if pieces[i+k] == words[j]: j += 1 k += 1 elif tokenizer.decode(words[j]) == "": j += 1 elif tokenizer.decode(pieces[i+k]) == "": k += 1 else: break if j == len(words): candidates.append((i, i+k)) candidates = [(token_start_idxs.index(c1), token_start_idxs.index(c2)) for c1, c2 in candidates if c1 in token_start_idxs and c2 in token_start_idxs] if len(candidates) < 1: return -1, -1 else: if trigger_span is None: return candidates[0] else: return sorted(candidates, key=lambda x: np.abs(trigger_span[0]-x[0]))[0] class DegreeEAETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None def load_model(self, checkpoint=None): if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.tokenizer = state["tokenizer"] self.type_set = state["type_set"]
self.model = DegreeEAEModel(self.config, self.tokenizer, self.type_set)
1
2023-11-15 21:32:56+00:00
16k
ahayler/s4c
scripts/videos/gen_vid_transition.py
[ { "identifier": "BTSNet", "path": "models/bts/model/models_bts.py", "snippet": "class BTSNet(torch.nn.Module):\n def __init__(self, conf):\n super().__init__()\n\n self.d_min = conf.get(\"z_near\")\n self.d_max = conf.get(\"z_far\")\n\n self.learn_empty = conf.get(\"learn_empty\", True)\n self.empty_empty = conf.get(\"empty_empty\", False)\n self.inv_z = conf.get(\"inv_z\", True)\n\n self.color_interpolation = conf.get(\"color_interpolation\", \"bilinear\")\n self.code_mode = conf.get(\"code_mode\", \"z\")\n if self.code_mode not in [\"z\", \"distance\"]:\n raise NotImplementedError(f\"Unknown mode for positional encoding: {self.code_mode}\")\n\n self.encoder = make_backbone(conf[\"encoder\"])\n self.code_xyz = PositionalEncoding.from_conf(conf[\"code\"], d_in=3)\n\n self.flip_augmentation = conf.get(\"flip_augmentation\", False)\n\n self.return_sample_depth = conf.get(\"return_sample_depth\", False)\n\n self.sample_color = conf.get(\"sample_color\", True)\n\n d_in = self.encoder.latent_size + self.code_xyz.d_out\n d_out = 1 if self.sample_color else 4\n\n self._d_in = d_in\n self._d_out = d_out\n\n self.mlp_coarse = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=d_out)\n self.mlp_fine = make_mlp(conf[\"mlp_fine\"], d_in, d_out=d_out, allow_empty=True)\n\n # MLP for segmentation classes\n # TODO: Find the output dimensions automatically\n self.segmentation_mode = conf.get('segmentation_mode', None)\n if self.segmentation_mode == 'KITTI-360':\n self.mlp_segmentation = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=21)\n # self.mlp_segmentation = make_segnet(d_in=d_in, d_out=21, d_hidden_list=[64])\n elif self.segmentation_mode == 'panoptic_deeplab':\n # self.mlp_segmentation = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=19)\n self.mlp_segmentation = make_segnet(d_in=d_in, d_out=19, d_hidden_list=[64])\n # self.mlp_segmentation = make_intercept_model(d_in, d_out=21)\n\n if self.learn_empty:\n self.empty_feature = nn.Parameter(torch.randn((self.encoder.latent_size,), requires_grad=True))\n\n self._scale = 0\n\n def set_scale(self, scale):\n self._scale = scale\n\n def get_scale(self):\n return self._scale\n\n def compute_grid_transforms(self, *args, **kwargs):\n pass\n\n def encode(self, images, Ks, poses_c2w, ids_encoder=None, ids_render=None, images_alt=None, combine_ids=None):\n poses_w2c = torch.inverse(poses_c2w)\n\n if ids_encoder is None:\n images_encoder = images\n Ks_encoder = Ks\n poses_w2c_encoder = poses_w2c\n ids_encoder = list(range(len(images)))\n else:\n images_encoder = images[:, ids_encoder]\n Ks_encoder = Ks[:, ids_encoder]\n poses_w2c_encoder = poses_w2c[:, ids_encoder]\n\n if images_alt is not None:\n images = images_alt\n else:\n images = images * .5 + .5\n\n if ids_render is None:\n images_render = images\n Ks_render = Ks\n poses_w2c_render = poses_w2c\n ids_render = list(range(len(images)))\n else:\n images_render = images[:, ids_render]\n Ks_render = Ks[:, ids_render]\n poses_w2c_render = poses_w2c[:, ids_render]\n\n if combine_ids is not None:\n combine_ids = list(list(group) for group in combine_ids)\n get_combined = set(sum(combine_ids, []))\n for i in range(images.shape[1]):\n if i not in get_combined:\n combine_ids.append((i,))\n remap_encoder = {v: i for i, v in enumerate(ids_encoder)}\n remap_render = {v: i for i, v in enumerate(ids_render)}\n comb_encoder = [[remap_encoder[i] for i in group if i in ids_encoder] for group in combine_ids]\n comb_render = [[remap_render[i] for i in group if i in ids_render] for group in combine_ids]\n comb_encoder = [group for group in comb_encoder if len(group) > 0]\n comb_render = [group for group in comb_render if len(group) > 0]\n else:\n comb_encoder = None\n comb_render = None\n\n n, nv, c, h, w = images_encoder.shape\n c_l = self.encoder.latent_size\n\n if self.flip_augmentation and self.training:\n do_flip = (torch.rand(1) > .5).item()\n else:\n do_flip = False\n\n if do_flip:\n images_encoder = torch.flip(images_encoder, dims=(-1, ))\n\n image_latents_ms = self.encoder(images_encoder.view(n * nv, c, h, w))\n\n if do_flip:\n image_latents_ms = [torch.flip(il, dims=(-1, )) for il in image_latents_ms]\n\n _, _, h_, w_ = image_latents_ms[0].shape\n image_latents_ms = [F.interpolate(image_latents, (h_, w_)).view(n, nv, c_l, h_, w_) for image_latents in image_latents_ms]\n\n if torch.any(torch.isnan(torch.stack(image_latents_ms))):\n self.encoder(images_encoder.view(n * nv, c, h, w))\n # raise Exception(\"NaN in encoded features.\")\n\n self.grid_f_features = image_latents_ms\n self.grid_f_Ks = Ks_encoder\n self.grid_f_poses_w2c = poses_w2c_encoder\n self.grid_f_combine = comb_encoder\n\n self.grid_c_imgs = images_render\n self.grid_c_Ks = Ks_render\n self.grid_c_poses_w2c = poses_w2c_render\n self.grid_c_combine = comb_render\n\n def sample_features(self, xyz, use_single_featuremap=True):\n n, n_pts, _ = xyz.shape\n n, nv, c, h, w = self.grid_f_features[self._scale].shape\n\n # if use_single_featuremap:\n # nv = 1\n\n xyz = xyz.unsqueeze(1) # (n, 1, pts, 3)\n ones = torch.ones_like(xyz[..., :1])\n xyz = torch.cat((xyz, ones), dim=-1)\n xyz_projected = ((self.grid_f_poses_w2c[:, :nv, :3, :]) @ xyz.permute(0, 1, 3, 2))\n distance = torch.norm(xyz_projected, dim=-2).unsqueeze(-1)\n xyz_projected = (self.grid_f_Ks[:, :nv] @ xyz_projected).permute(0, 1, 3, 2)\n xy = xyz_projected[:, :, :, [0, 1]]\n z = xyz_projected[:, :, :, 2:3]\n\n xy = xy / z.clamp_min(EPS)\n invalid = (z <= EPS) | (xy[:, :, :, :1] < -1) | (xy[:, :, :, :1] > 1) | (xy[:, :, :, 1:2] < -1) | (xy[:, :, :, 1:2] > 1)\n\n if self.code_mode == \"z\":\n # Get z into [-1, 1] range\n if self.inv_z:\n z = (1 / z.clamp_min(EPS) - 1 / self.d_max) / (1 / self.d_min - 1 / self.d_max)\n else:\n z = (z - self.d_min) / (self.d_max - self.d_min)\n z = 2 * z - 1\n xyz_projected = torch.cat((xy, z), dim=-1)\n elif self.code_mode == \"distance\":\n if self.inv_z:\n distance = (1 / distance.clamp_min(EPS) - 1 / self.d_max) / (1 / self.d_min - 1 / self.d_max)\n else:\n distance = (distance - self.d_min) / (self.d_max - self.d_min)\n distance = 2 * distance - 1\n xyz_projected = torch.cat((xy, distance), dim=-1)\n xyz_code = self.code_xyz(xyz_projected.view(n * nv * n_pts, -1)).view(n, nv, n_pts, -1)\n\n feature_map = self.grid_f_features[self._scale][:, :nv]\n # These samples are from different scales\n if self.learn_empty:\n empty_feature_expanded = self.empty_feature.view(1, 1, 1, c).expand(n, nv, n_pts, c)\n\n sampled_features = F.grid_sample(feature_map.view(n * nv, c, h, w), xy.view(n * nv, 1, -1, 2), mode=\"bilinear\", padding_mode=\"border\", align_corners=False).view(n, nv, c, n_pts).permute(0, 1, 3, 2)\n\n if self.learn_empty:\n sampled_features[invalid.expand(-1, -1, -1, c)] = empty_feature_expanded[invalid.expand(-1, -1, -1, c)]\n\n sampled_features = torch.cat((sampled_features, xyz_code), dim=-1)\n\n # If there are multiple frames with predictions, reduce them.\n # TODO: Technically, this implementations should be improved if we use multiple frames.\n # The reduction should only happen after we perform the unprojection.\n\n if self.grid_f_combine is not None:\n invalid_groups = []\n sampled_features_groups = []\n\n for group in self.grid_f_combine:\n if len(group) == 1:\n invalid_groups.append(invalid[:, group])\n sampled_features_groups.append(sampled_features[:, group])\n\n invalid_to_combine = invalid[:, group]\n features_to_combine = sampled_features[:, group]\n\n indices = torch.min(invalid_to_combine, dim=1, keepdim=True)[1]\n invalid_picked = torch.gather(invalid_to_combine, dim=1, index=indices)\n features_picked = torch.gather(features_to_combine, dim=1, index=indices.expand(-1, -1, -1, features_to_combine.shape[-1]))\n\n invalid_groups.append(invalid_picked)\n sampled_features_groups.append(features_picked)\n\n invalid = torch.cat(invalid_groups, dim=1)\n sampled_features = torch.cat(sampled_features_groups, dim=1)\n\n if use_single_featuremap:\n sampled_features = sampled_features.mean(dim=1)\n invalid = torch.any(invalid, dim=1)\n\n return sampled_features, invalid\n\n def sample_colors(self, xyz):\n n, n_pts, _ = xyz.shape\n n, nv, c, h, w = self.grid_c_imgs.shape\n xyz = xyz.unsqueeze(1) # (n, 1, pts, 3)\n ones = torch.ones_like(xyz[..., :1])\n xyz = torch.cat((xyz, ones), dim=-1)\n xyz_projected = ((self.grid_c_poses_w2c[:, :, :3, :]) @ xyz.permute(0, 1, 3, 2))\n distance = torch.norm(xyz_projected, dim=-2).unsqueeze(-1)\n xyz_projected = (self.grid_c_Ks @ xyz_projected).permute(0, 1, 3, 2)\n xy = xyz_projected[:, :, :, [0, 1]]\n z = xyz_projected[:, :, :, 2:3]\n\n # This scales the x-axis into the right range.\n xy = xy / z.clamp_min(EPS)\n invalid = (z <= EPS) | (xy[:, :, :, :1] < -1) | (xy[:, :, :, :1] > 1) | (xy[:, :, :, 1:2] < -1) | (xy[:, :, :, 1:2] > 1)\n\n sampled_colors = F.grid_sample(self.grid_c_imgs.view(n * nv, c, h, w), xy.view(n * nv, 1, -1, 2), mode=self.color_interpolation, padding_mode=\"border\", align_corners=False).view(n, nv, c, n_pts).permute(0, 1, 3, 2)\n assert not torch.any(torch.isnan(sampled_colors))\n\n if self.grid_c_combine is not None:\n invalid_groups = []\n sampled_colors_groups = []\n\n for group in self.grid_c_combine:\n if len(group) == 1:\n invalid_groups.append(invalid[:, group])\n sampled_colors_groups.append(sampled_colors[:, group])\n continue\n\n invalid_to_combine = invalid[:, group]\n colors_to_combine = sampled_colors[:, group]\n\n indices = torch.min(invalid_to_combine, dim=1, keepdim=True)[1]\n invalid_picked = torch.gather(invalid_to_combine, dim=1, index=indices)\n colors_picked = torch.gather(colors_to_combine, dim=1, index=indices.expand(-1, -1, -1, colors_to_combine.shape[-1]))\n\n invalid_groups.append(invalid_picked)\n sampled_colors_groups.append(colors_picked)\n\n invalid = torch.cat(invalid_groups, dim=1)\n sampled_colors = torch.cat(sampled_colors_groups, dim=1)\n\n if self.return_sample_depth:\n distance = distance.view(n, nv, n_pts, 1)\n sampled_colors = torch.cat((sampled_colors, distance), dim=-1)\n\n return sampled_colors, invalid\n\n def forward(self, xyz, coarse=True, viewdirs=None, far=False, only_density=False, predict_segmentation=False):\n \"\"\"\n Predict (r, g, b, sigma) at world space points xyz.\n Please call encode first!\n :param xyz (B, 3)\n B is batch of points (in rays)\n :param predict_segmentation, if true also return the segmentation distribution for all the points\n :return (B, 4) r g b sigma\n \"\"\"\n\n with profiler.record_function(\"model_inference\"):\n n, n_pts, _ = xyz.shape\n nv = self.grid_c_imgs.shape[1]\n\n if self.grid_c_combine is not None:\n nv = len(self.grid_c_combine)\n\n # Sampled features all has shape: scales [n, n_pts, c + xyz_code]\n sampled_features, invalid_features = self.sample_features(xyz, use_single_featuremap=not only_density) # invalid features (n, n_pts, 1)\n sampled_features = sampled_features.reshape(n * n_pts, -1)\n\n mlp_input = sampled_features.view(n, n_pts, -1)\n\n # Camera frustum culling stuff, currently disabled\n combine_index = None\n dim_size = None\n\n # Run main NeRF network\n if coarse or self.mlp_fine is None:\n mlp_output = self.mlp_coarse(\n mlp_input,\n combine_inner_dims=(n_pts,),\n combine_index=combine_index,\n dim_size=dim_size,\n )\n else:\n mlp_output = self.mlp_fine(\n mlp_input,\n combine_inner_dims=(n_pts,),\n combine_index=combine_index,\n dim_size=dim_size,\n )\n\n segs = None\n if predict_segmentation:\n segs = self.mlp_segmentation(mlp_input)\n # print(next(self.mlp_segmentation.parameters()))\n # softmax to get a class distribution\n segs = F.softmax(segs, dim=2)\n # (n, pts, c) -> (n, n_pts, c)\n mlp_output = mlp_output.reshape(n, n_pts, self._d_out)\n\n if self.sample_color:\n sigma = mlp_output[..., :1]\n sigma = F.softplus(sigma)\n rgb, invalid_colors = self.sample_colors(xyz) # (n, nv, pts, 3)\n else:\n sigma = mlp_output[..., :1]\n sigma = F.relu(sigma)\n rgb = mlp_output[..., 1:4].reshape(n, 1, n_pts, 3)\n rgb = F.sigmoid(rgb)\n invalid_colors = invalid_features.unsqueeze(-2)\n nv = 1\n\n if self.empty_empty:\n sigma[invalid_features[..., 0]] = 0\n # TODO: Think about this!\n # Since we don't train the colors directly, lets use softplus instead of relu\n\n if not only_density:\n _, _, _, c = rgb.shape\n rgb = rgb.permute(0, 2, 1, 3).reshape(n, n_pts, nv * c) # (n, pts, nv * 3)\n invalid_colors = invalid_colors.permute(0, 2, 1, 3).reshape(n, n_pts, nv)\n\n invalid = invalid_colors | invalid_features # Invalid features gets broadcasted to (n, n_pts, nv)\n invalid = invalid.to(rgb.dtype)\n else:\n rgb = torch.zeros((n, n_pts, nv * 3), device=sigma.device)\n invalid = invalid_features.to(sigma.dtype)\n\n if predict_segmentation:\n return rgb, invalid, sigma, segs\n else:\n return rgb, invalid, sigma" }, { "identifier": "ImageRaySampler", "path": "models/bts/model/ray_sampler.py", "snippet": "class ImageRaySampler(RaySampler):\n def __init__(self, z_near, z_far, height=None, width=None, channels=3, norm_dir=True):\n self.z_near = z_near\n self.z_far = z_far\n self.height = height\n self.width = width\n self.channels = channels\n self.norm_dir = norm_dir\n\n def sample(self, images, poses, projs, segs=None, sample_segs=False):\n n, v, _, _ = poses.shape\n\n if self.height is None:\n self.height, self.width = images.shape[-2:]\n\n all_rgb_gt = []\n all_rays = []\n all_segs_gt = []\n\n for n_ in range(n):\n focals = projs[n_, :, [0, 1], [0, 1]]\n centers = projs[n_, :, [0, 1], [2, 2]]\n\n rays = util.gen_rays(poses[n_].view(-1, 4, 4), self.width, self.height, focal=focals, c=centers, z_near=self.z_near, z_far=self.z_far, norm_dir=self.norm_dir).view(-1, 8)\n all_rays.append(rays)\n\n if images is not None:\n rgb_gt = images[n_].view(-1, self.channels, self.height, self.width)\n rgb_gt = (rgb_gt.permute(0, 2, 3, 1).contiguous().reshape(-1, self.channels))\n all_rgb_gt.append(rgb_gt)\n\n if sample_segs:\n segs_gt = segs[n_].view(-1, 1, self.height, self.width)\n segs_gt = (segs_gt.permute(0, 2, 3, 1).contiguous().reshape(-1, 1))\n all_segs_gt.append(segs_gt)\n\n all_rays = torch.stack(all_rays)\n if images is not None:\n all_rgb_gt = torch.stack(all_rgb_gt)\n else:\n all_rgb_gt = None\n\n if sample_segs:\n all_segs_gt = torch.stack(all_segs_gt)\n # the None accounts for the patch_to_image\n return all_rays, all_rgb_gt, all_segs_gt, None\n else:\n return all_rays, all_rgb_gt\n\n def reconstruct(self, render_dict, channels=None, reconstruct_segmentation=False):\n coarse = render_dict[\"coarse\"]\n fine = render_dict[\"fine\"]\n\n if channels is None:\n channels = self.channels\n\n if reconstruct_segmentation:\n c_segmentation = coarse[\"segs\"]\n # c_segmentation_raw = coarse[\"segs_raw\"]\n n_classes = c_segmentation.shape[-1]\n # n_samples = c_segmentation_raw.shape[-2]\n\n c_rgb = coarse[\"rgb\"] # n, n_pts, v * 3\n c_weights = coarse[\"weights\"]\n c_depth = coarse[\"depth\"]\n c_invalid = coarse[\"invalid\"]\n\n f_rgb = fine[\"rgb\"] # n, n_pts, v * 3\n f_weights = fine[\"weights\"]\n f_depth = fine[\"depth\"]\n f_invalid = fine[\"invalid\"]\n\n n, n_pts, v_c = c_rgb.shape\n v_in = n_pts // (self.height * self.width)\n v_render = v_c // channels\n c_n_smps = c_weights.shape[-1]\n f_n_smps = f_weights.shape[-1]\n # (This can be a different v from the sample method)\n\n if reconstruct_segmentation:\n coarse[\"segs\"] = c_segmentation.view(n, v_in, self.height, self.width, n_classes)\n # coarse[\"segs_raw\"] = c_segmentation_raw.view(n, v_in, self.height, self.width, n_samples, n_classes)\n\n coarse[\"rgb\"] = c_rgb.view(n, v_in, self.height, self.width, v_render, channels)\n coarse[\"weights\"] = c_weights.view(n, v_in, self.height, self.width, c_n_smps)\n coarse[\"depth\"] = c_depth.view(n, v_in, self.height, self.width)\n coarse[\"invalid\"] = c_invalid.view(n, v_in, self.height, self.width, c_n_smps, v_render)\n\n fine[\"rgb\"] = f_rgb.view(n, v_in, self.height, self.width, v_render, channels)\n fine[\"weights\"] = f_weights.view(n, v_in, self.height, self.width, f_n_smps)\n fine[\"depth\"] = f_depth.view(n, v_in, self.height, self.width)\n fine[\"invalid\"] = f_invalid.view(n, v_in, self.height, self.width, f_n_smps, v_render)\n\n if \"alphas\" in coarse:\n c_alphas = coarse[\"alphas\"]\n f_alphas = fine[\"alphas\"]\n coarse[\"alphas\"] = c_alphas.view(n, v_in, self.height, self.width, c_n_smps)\n fine[\"alphas\"] = f_alphas.view(n, v_in, self.height, self.width, f_n_smps)\n\n if \"z_samps\" in coarse:\n c_z_samps = coarse[\"z_samps\"]\n f_z_samps = fine[\"z_samps\"]\n coarse[\"z_samps\"] = c_z_samps.view(n, v_in, self.height, self.width, c_n_smps)\n fine[\"z_samps\"] = f_z_samps.view(n, v_in, self.height, self.width, f_n_smps)\n\n if \"rgb_samps\" in coarse:\n c_rgb_samps = coarse[\"rgb_samps\"]\n f_rgb_samps = fine[\"rgb_samps\"]\n coarse[\"rgb_samps\"] = c_rgb_samps.view(n, v_in, self.height, self.width, c_n_smps, v_render, channels)\n fine[\"rgb_samps\"] = f_rgb_samps.view(n, v_in, self.height, self.width, f_n_smps, v_render, channels)\n\n render_dict[\"coarse\"] = coarse\n render_dict[\"fine\"] = fine\n\n if \"rgb_gt\" in render_dict:\n rgb_gt = render_dict[\"rgb_gt\"]\n render_dict[\"rgb_gt\"] = rgb_gt.view(n, v_in, self.height, self.width, channels)\n\n return render_dict" }, { "identifier": "NeRFRenderer", "path": "models/common/render/nerf.py", "snippet": "class NeRFRenderer(torch.nn.Module):\n \"\"\"\n NeRF differentiable renderer\n :param n_coarse number of coarse (binned uniform) samples\n :param n_fine number of fine (importance) samples\n :param n_fine_depth number of expected depth samples\n :param noise_std noise to add to sigma. We do not use it\n :param depth_std noise for depth samples\n :param eval_batch_size ray batch size for evaluation\n :param white_bkgd if true, background color is white; else black\n :param lindisp if to use samples linear in disparity instead of distance\n :param sched ray sampling schedule. list containing 3 lists of equal length.\n sched[0] is list of iteration numbers,\n sched[1] is list of coarse sample numbers,\n sched[2] is list of fine sample numbers\n \"\"\"\n\n def __init__(\n self,\n n_coarse=128,\n n_fine=0,\n n_fine_depth=0,\n noise_std=0.0,\n depth_std=0.01,\n eval_batch_size=100000,\n white_bkgd=False,\n lindisp=False,\n sched=None, # ray sampling schedule for coarse and fine rays\n hard_alpha_cap=False\n ):\n super().__init__()\n self.n_coarse = n_coarse\n self.n_fine = n_fine\n self.n_fine_depth = n_fine_depth\n\n self.noise_std = noise_std\n self.depth_std = depth_std\n\n self.eval_batch_size = eval_batch_size\n self.white_bkgd = white_bkgd\n self.lindisp = lindisp\n if lindisp:\n print(\"Using linear displacement rays\")\n self.using_fine = n_fine > 0\n self.sched = sched\n if sched is not None and len(sched) == 0:\n self.sched = None\n self.register_buffer(\n \"iter_idx\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.register_buffer(\n \"last_sched\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.hard_alpha_cap = hard_alpha_cap\n\n def sample_coarse(self, rays):\n \"\"\"\n Stratified sampling. Note this is different from original NeRF slightly.\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :return (B, Kc)\n \"\"\"\n device = rays.device\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n\n step = 1.0 / self.n_coarse\n B = rays.shape[0]\n z_steps = torch.linspace(0, 1 - step, self.n_coarse, device=device) # (Kc)\n z_steps = z_steps.unsqueeze(0).repeat(B, 1) # (B, Kc)\n z_steps += torch.rand_like(z_steps) * step\n if not self.lindisp: # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n return 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kc)\n\n def sample_coarse_from_dist(self, rays, weights, z_samp):\n device = rays.device\n B = rays.shape[0]\n\n num_bins = weights.shape[-1]\n num_samples = self.n_coarse\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(B, num_samples, dtype=torch.float32, device=device) # (B, Kf)\n interval_ids = torch.searchsorted(cdf, u, right=True) - 1 # (B, Kf)\n interval_ids = torch.clamp(interval_ids, 0, num_samples-1)\n interval_interp = torch.rand_like(interval_ids, dtype=torch.float32)\n\n # z_samps describe the centers of the respective histogram bins. Therefore, we have to extend them to the left and right\n if self.lindisp:\n z_samp = 1 / z_samp\n\n centers = .5 * (z_samp[:, 1:] + z_samp[:, :-1])\n interval_borders = torch.cat((z_samp[:, :1], centers, z_samp[:, -1:]), dim=-1)\n\n left_border = torch.gather(interval_borders, dim=-1, index=interval_ids)\n right_border = torch.gather(interval_borders, dim=-1, index=interval_ids+1)\n\n z_samp_new = left_border * (1 - interval_interp) + right_border * interval_interp\n\n if self.lindisp:\n z_samp_new = 1 / z_samp_new\n\n assert not torch.any(torch.isnan(z_samp_new))\n\n return z_samp_new\n\n def sample_fine(self, rays, weights):\n \"\"\"min\n Weighted stratified (importance) sample\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param weights (B, Kc)\n :return (B, Kf-Kfd)\n \"\"\"\n device = rays.device\n B = rays.shape[0]\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(\n B, self.n_fine - self.n_fine_depth, dtype=torch.float32, device=device\n ) # (B, Kf)\n inds = torch.searchsorted(cdf, u, right=True).float() - 1.0 # (B, Kf)\n inds = torch.clamp_min(inds, 0.0)\n\n z_steps = (inds + torch.rand_like(inds)) / self.n_coarse # (B, Kf)\n\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n if not self.lindisp: # Use linear sampling in depth space\n z_samp = near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n z_samp = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def sample_fine_depth(self, rays, depth):\n \"\"\"\n Sample around specified depth\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param depth (B)\n :return (B, Kfd)\n \"\"\"\n z_samp = depth.unsqueeze(1).repeat((1, self.n_fine_depth))\n z_samp += torch.randn_like(z_samp) * self.depth_std\n # Clamp does not support tensor bounds\n z_samp = torch.max(torch.min(z_samp, rays[:, -1:]), rays[:, -2:-1])\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def composite(self, model, rays, z_samp, coarse=True, sb=0, predict_segmentation=False):\n \"\"\"\n Render RGB and depth for each ray using NeRF alpha-compositing formula,\n given sampled positions along each ray (see sample_*)\n :param model should return (B, (r, g, b, sigma)) when called with (B, (x, y, z))\n should also support 'coarse' boolean argument\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param z_samp z positions sampled for each ray (B, K)\n :param coarse whether to evaluate using coarse NeRF\n :param predict_segmentation if true also predict the semantic distribution\n :param sb super-batch dimension; 0 = disable\n :return weights (B, K), rgb (B, 3), depth (B)\n \"\"\"\n with profiler.record_function(\"renderer_composite\"):\n B, K = z_samp.shape\n\n deltas = z_samp[:, 1:] - z_samp[:, :-1] # (B, K-1)\n delta_inf = 1e10 * torch.ones_like(deltas[:, :1]) # infty (B, 1)\n # delta_inf = rays[:, -1:] - z_samp[:, -1:]\n deltas = torch.cat([deltas, delta_inf], -1) # (B, K)\n\n # (B, K, 3)\n points = rays[:, None, :3] + z_samp.unsqueeze(2) * rays[:, None, 3:6]\n points = points.reshape(-1, 3) # (B*K, 3)\n\n use_viewdirs = hasattr(model, \"use_viewdirs\") and model.use_viewdirs\n\n rgbs_all, invalid_all, sigmas_all, segs_all = [], [], [], []\n if sb > 0:\n points = points.reshape(\n sb, -1, 3\n ) # (SB, B'*K, 3) B' is real ray batch size\n eval_batch_size = (self.eval_batch_size - 1) // sb + 1\n eval_batch_dim = 1\n else:\n eval_batch_size = self.eval_batch_size\n eval_batch_dim = 0\n\n split_points = torch.split(points, eval_batch_size, dim=eval_batch_dim)\n if use_viewdirs:\n dim1 = K\n viewdirs = rays[:, None, 3:6].expand(-1, dim1, -1) # (B, K, 3)\n if sb > 0:\n viewdirs = viewdirs.reshape(sb, -1, 3) # (SB, B'*K, 3)\n else:\n viewdirs = viewdirs.reshape(-1, 3) # (B*K, 3)\n split_viewdirs = torch.split(\n viewdirs, eval_batch_size, dim=eval_batch_dim\n )\n for pnts, dirs in zip(split_points, split_viewdirs):\n rgbs, invalid, sigmas = model(pnts, coarse=coarse, viewdirs=dirs)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n else:\n for pnts in split_points:\n if predict_segmentation:\n rgbs, invalid, sigmas, segs = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n segs_all.append(segs)\n else:\n rgbs, invalid, sigmas = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n points = None\n viewdirs = None\n # (B*K, 4) OR (SB, B'*K, 4)\n rgbs = torch.cat(rgbs_all, dim=eval_batch_dim)\n invalid = torch.cat(invalid_all, dim=eval_batch_dim)\n sigmas = torch.cat(sigmas_all, dim=eval_batch_dim)\n\n if predict_segmentation:\n segs = torch.cat(segs_all, dim=eval_batch_dim)\n segs = segs.reshape(B, K, -1) # (B, K, n_classes)\n\n rgbs = rgbs.reshape(B, K, -1) # (B, K, 4 or 5)\n invalid = invalid.reshape(B, K, -1)\n sigmas = sigmas.reshape(B, K)\n\n if self.training and self.noise_std > 0.0:\n sigmas = sigmas + torch.randn_like(sigmas) * self.noise_std\n\n alphas = 1 - torch.exp(-deltas.abs() * torch.relu(sigmas)) # (B, K) (delta should be positive anyways)\n\n if self.hard_alpha_cap:\n alphas[:, -1] = 1\n\n deltas = None\n sigmas = None\n alphas_shifted = torch.cat(\n [torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1\n ) # (B, K+1) = [1, a1, a2, ...]\n T = torch.cumprod(alphas_shifted, -1) # (B)\n weights = alphas * T[:, :-1] # (B, K)\n # alphas = None\n alphas_shifted = None\n\n rgb_final = torch.sum(weights.unsqueeze(-1) * rgbs, -2) # (B, 3)\n depth_final = torch.sum(weights * z_samp, -1) # (B)\n\n\n\n if self.white_bkgd:\n # White background\n pix_alpha = weights.sum(dim=1) # (B), pixel alpha\n rgb_final = rgb_final + 1 - pix_alpha.unsqueeze(-1) # (B, 3)\n\n if predict_segmentation:\n segs_final = torch.sum(weights.unsqueeze(-1) * segs, dim=-2) # (B, n_classes)\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs,\n # segs,\n segs_final\n )\n else:\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs\n )\n\n def forward(\n self, model, rays, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, predict_segmentation=False, sample_from_dist=None):\n \"\"\"\n :model nerf model, should return (SB, B, (r, g, b, sigma))\n when called with (SB, B, (x, y, z)), for multi-object:\n SB = 'super-batch' = size of object batch,\n B = size of per-object ray batch.\n Should also support 'coarse' boolean argument for coarse NeRF.\n :param rays ray spec [origins (3), directions (3), near (1), far (1)] (SB, B, 8)\n :param want_weights if true, returns compositing weights (SB, B, K)\n :param predict_segmentation if true, return the segmentation class distribution for each pixel\n :return render dict\n \"\"\"\n with profiler.record_function(\"renderer_forward\"):\n if self.sched is not None and self.last_sched.item() > 0:\n self.n_coarse = self.sched[1][self.last_sched.item() - 1]\n self.n_fine = self.sched[2][self.last_sched.item() - 1]\n\n assert len(rays.shape) == 3\n superbatch_size = rays.shape[0]\n rays = rays.reshape(-1, 8) # (SB * B, 8)\n\n if sample_from_dist is None:\n z_coarse = self.sample_coarse(rays) # (B, Kc)\n else:\n prop_weights, prop_z_samp = sample_from_dist\n n_samples = prop_weights.shape[-1]\n prop_weights = prop_weights.reshape(-1, n_samples)\n prop_z_samp = prop_z_samp.reshape(-1, n_samples)\n z_coarse = self.sample_coarse_from_dist(rays, prop_weights, prop_z_samp)\n z_coarse, _ = torch.sort(z_coarse, dim=-1)\n\n coarse_composite = self.composite(\n model, rays, z_coarse, coarse=True, sb=superbatch_size, predict_segmentation=predict_segmentation\n )\n\n outputs = DotMap(\n coarse=self._format_outputs(\n coarse_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas,\n want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps, want_segmentation=predict_segmentation\n ),\n )\n\n if self.using_fine:\n all_samps = [z_coarse]\n if self.n_fine - self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine(rays, coarse_composite[0].detach())\n ) # (B, Kf - Kfd)\n if self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine_depth(rays, coarse_composite[2])\n ) # (B, Kfd)\n z_combine = torch.cat(all_samps, dim=-1) # (B, Kc + Kf)\n z_combine_sorted, argsort = torch.sort(z_combine, dim=-1)\n fine_composite = self.composite(\n model, rays, z_combine_sorted, coarse=False, sb=superbatch_size,\n )\n outputs.fine = self._format_outputs(\n fine_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas, want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps\n )\n\n return outputs\n\n def _format_outputs(\n self, rendered_outputs, superbatch_size, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, want_segmentation=False\n ):\n if want_segmentation:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps, segs_final = rendered_outputs\n else:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps = rendered_outputs\n\n n_smps = weights.shape[-1]\n out_d_rgb = rgb_final.shape[-1]\n out_d_i = invalid.shape[-1]\n\n if superbatch_size > 0:\n rgb_final = rgb_final.reshape(superbatch_size, -1, out_d_rgb)\n depth = depth.reshape(superbatch_size, -1)\n weights = weights.reshape(superbatch_size, -1, n_smps)\n alphas = alphas.reshape(superbatch_size, -1, n_smps)\n invalid = invalid.reshape(superbatch_size, -1, n_smps, out_d_i)\n z_samps = z_samps.reshape(superbatch_size, -1, n_smps)\n rgb_samps = rgb_samps.reshape(superbatch_size, -1, n_smps, out_d_rgb)\n\n if want_segmentation:\n out_segs = segs_final.shape[-1]\n segs_final = segs_final.reshape(superbatch_size, -1, out_segs)\n\n ret_dict = DotMap(rgb=rgb_final, depth=depth, invalid=invalid)\n if want_weights:\n ret_dict.weights = weights\n if want_alphas:\n ret_dict.alphas = alphas\n if want_z_samps:\n ret_dict.z_samps = z_samps\n if want_rgb_samps:\n ret_dict.rgb_samps = rgb_samps\n if want_segmentation:\n ret_dict.segs = segs_final\n # ret_dict.segs_raw = segs_raw\n return ret_dict\n\n def sched_step(self, steps=1):\n \"\"\"\n Called each training iteration to update sample numbers\n according to schedule\n \"\"\"\n if self.sched is None:\n return\n self.iter_idx += steps\n while (\n self.last_sched.item() < len(self.sched[0])\n and self.iter_idx.item() >= self.sched[0][self.last_sched.item()]\n ):\n self.n_coarse = self.sched[1][self.last_sched.item()]\n self.n_fine = self.sched[2][self.last_sched.item()]\n print(\n \"INFO: NeRF sampling resolution changed on schedule ==> c\",\n self.n_coarse,\n \"f\",\n self.n_fine,\n )\n self.last_sched += 1\n\n @classmethod\n def from_conf(cls, conf, white_bkgd=False, eval_batch_size=100000):\n return cls(\n conf.get(\"n_coarse\", 128),\n conf.get(\"n_fine\", 0),\n n_fine_depth=conf.get(\"n_fine_depth\", 0),\n noise_std=conf.get(\"noise_std\", 0.0),\n depth_std=conf.get(\"depth_std\", 0.01),\n white_bkgd=conf.get(\"white_bkgd\", white_bkgd),\n lindisp=conf.get(\"lindisp\", True),\n eval_batch_size=conf.get(\"eval_batch_size\", eval_batch_size),\n sched=conf.get(\"sched\", None),\n hard_alpha_cap=conf.get(\"hard_alpha_cap\", False)\n )\n\n def bind_parallel(self, net, gpus=None, simple_output=False):\n \"\"\"\n Returns a wrapper module compatible with DataParallel.\n Specifically, it renders rays with this renderer\n but always using the given network instance.\n Specify a list of GPU ids in 'gpus' to apply DataParallel automatically.\n :param net A PixelNeRF network\n :param gpus list of GPU ids to parallize to. If length is 1,\n does not parallelize\n :param simple_output only returns rendered (rgb, depth) instead of the \n full render output map. Saves data tranfer cost.\n :return torch module\n \"\"\"\n wrapped = _RenderWrapper(net, self, simple_output=simple_output)\n if gpus is not None and len(gpus) > 1:\n print(\"Using multi-GPU\", gpus)\n wrapped = torch.nn.DataParallel(wrapped, gpus, dim=1)\n return wrapped" }, { "identifier": "map_fn", "path": "utils/array_operations.py", "snippet": "def map_fn(batch, fn):\ndef to(data, device, non_blocking=True):\ndef set_requires_grad(nets, requires_grad=False):\ndef mask_mean(t: torch.Tensor, m: torch.Tensor, dim=None, keepdim=False):\ndef apply_crop(array, crop):\ndef shrink_mask(mask, shrink=3):\ndef get_mask(size, border=5, device=None):\ndef get_grid(H, W, normalize=True):\ndef detach(t):" }, { "identifier": "color_tensor", "path": "utils/plotting.py", "snippet": "def color_tensor(tensor: torch.Tensor, cmap, norm=False):\n if norm:\n tensor = (tensor - tensor.min()) / (tensor.max() - tensor.min())\n map = plt.cm.get_cmap(cmap)\n tensor = torch.tensor(map(tensor.cpu().numpy()), device=tensor.device)[..., :3]\n return tensor" } ]
import numpy as np import sys import copy import hydra import torch from moviepy.video.io.ImageSequenceClip import ImageSequenceClip from scipy.spatial.transform import Rotation from tqdm import tqdm from scripts.inference_setup import * from models.bts.model import BTSNet, ImageRaySampler from models.common.render import NeRFRenderer from utils.array_operations import map_fn, unsqueezer from utils.plotting import color_tensor
12,693
if task == "KITTI-360": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kitti360("videos/transition", "test") z_top = 10 y_top = -6 t_near = 5 t_far = 7 target_angle = math.radians(85) elif task == "KITTI-Raw": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kittiraw("videos/transition", "test") z_top = 14 y_top = -8 t_near = 8 t_far = 10 target_angle = math.radians(90) else: raise ValueError(f"Invalid task: {task}") # Slightly hacky, but we need to load the config based on the task global config config = {} @hydra.main(version_base=None, config_path="../../configs", config_name=config_path) def main_dummy(cfg): global config config = copy.deepcopy(cfg) main_dummy() print("Setup folders") out_path.mkdir(exist_ok=True, parents=True) print('Loading checkpoint') cp = torch.load(cp_path, map_location=device) net = BTSNet(config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() renderer.renderer.n_coarse = 64 renderer.renderer.lindisp = True class _Wrapper(nn.Module): def __init__(self): super().__init__() self.renderer = renderer _wrapper = _Wrapper() _wrapper.load_state_dict(cp["model"], strict=False) renderer.to(device) renderer.eval() ray_sampler = ImageRaySampler(config["model_conf"]["z_near"], config["model_conf"]["z_far"], *resolution, norm_dir=False) z_near = config["model_conf"]["z_near"] z_far = config["model_conf"]["z_far"] z_near = d_min z_far = d_max with torch.no_grad(): for idx in indices: data = dataset[idx] data_batch = map_fn(map_fn(data, torch.tensor), unsqueezer) images = torch.stack(data_batch["imgs"], dim=1).to(device) poses = torch.stack(data_batch["poses"], dim=1).to(device) projs = torch.stack(data_batch["projs"], dim=1).to(device) # Move coordinate system to input frame poses = torch.inverse(poses[:, :1, :, :]) @ poses net.encode(images, projs, poses, ids_encoder=[0], ids_render=[0]) net.set_scale(0) frames = [] for i in tqdm(range(length + 5)): prog = (i / (length - 1)) ** 2 prog = min(prog, 1) pose = torch.eye(4, device=device) angle = -target_angle * prog rotation = torch.tensor(Rotation.from_euler("x", angle, degrees=False).as_matrix(), device=device) pose[:3, :3] = rotation z = z_top - math.cos(-angle) * z_top y = math.sin(-angle) * y_top pose[1, 3] = y pose[2, 3] = z z_near_ = z_near * (1 - prog) + t_near * prog z_far_ = z_far * (1 - prog) + t_far * prog target_width = int(resolution[1] * (1 - prog) + resolution[0] * prog) pad_left = (resolution[1] - target_width) // 2 pad_right = (resolution[1] - target_width) - pad_left projs_ = projs[:, :1].clone() projs_[0, 0, 1, 1] = projs_[0, 0, 1, 1] * (target_width / resolution[1]) ray_sampler.width = target_width ray_sampler.z_near = z_near_ ray_sampler.z_far = z_far_ novel_view, depth = render_poses(renderer, ray_sampler, pose.view(1, 1, 4, 4), projs_[:, :1]) depth = 1 / depth.squeeze() depth = ((depth - 1 / z_far_) / (1 / z_near_ - 1 / z_far_)).clamp(0, 1) novel_view = novel_view.squeeze(-2).squeeze(0) if i > 0: depth_ = torch.zeros(*resolution, device=device) depth_[:, pad_left:-pad_right] = depth depth = depth_ novel_view_ = torch.zeros(*resolution, 3, device=device) novel_view_[:, pad_left:-pad_right, :] = novel_view novel_view = novel_view_ novel_view = novel_view[:, :].cpu().numpy()
sys.path.append(".") def main(): s_img = True s_depth = True dry_run = False indices = [0] task = "KITTI-360" assert task in ["KITTI-360", "KITTI-Raw"] length = 30 d_min = 3 d_max = 40 if task == "KITTI-360": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kitti360("videos/transition", "test") z_top = 10 y_top = -6 t_near = 5 t_far = 7 target_angle = math.radians(85) elif task == "KITTI-Raw": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kittiraw("videos/transition", "test") z_top = 14 y_top = -8 t_near = 8 t_far = 10 target_angle = math.radians(90) else: raise ValueError(f"Invalid task: {task}") # Slightly hacky, but we need to load the config based on the task global config config = {} @hydra.main(version_base=None, config_path="../../configs", config_name=config_path) def main_dummy(cfg): global config config = copy.deepcopy(cfg) main_dummy() print("Setup folders") out_path.mkdir(exist_ok=True, parents=True) print('Loading checkpoint') cp = torch.load(cp_path, map_location=device) net = BTSNet(config["model_conf"]) renderer = NeRFRenderer.from_conf(config["renderer"]) renderer = renderer.bind_parallel(net, gpus=None).eval() renderer.renderer.n_coarse = 64 renderer.renderer.lindisp = True class _Wrapper(nn.Module): def __init__(self): super().__init__() self.renderer = renderer _wrapper = _Wrapper() _wrapper.load_state_dict(cp["model"], strict=False) renderer.to(device) renderer.eval() ray_sampler = ImageRaySampler(config["model_conf"]["z_near"], config["model_conf"]["z_far"], *resolution, norm_dir=False) z_near = config["model_conf"]["z_near"] z_far = config["model_conf"]["z_far"] z_near = d_min z_far = d_max with torch.no_grad(): for idx in indices: data = dataset[idx] data_batch = map_fn(map_fn(data, torch.tensor), unsqueezer) images = torch.stack(data_batch["imgs"], dim=1).to(device) poses = torch.stack(data_batch["poses"], dim=1).to(device) projs = torch.stack(data_batch["projs"], dim=1).to(device) # Move coordinate system to input frame poses = torch.inverse(poses[:, :1, :, :]) @ poses net.encode(images, projs, poses, ids_encoder=[0], ids_render=[0]) net.set_scale(0) frames = [] for i in tqdm(range(length + 5)): prog = (i / (length - 1)) ** 2 prog = min(prog, 1) pose = torch.eye(4, device=device) angle = -target_angle * prog rotation = torch.tensor(Rotation.from_euler("x", angle, degrees=False).as_matrix(), device=device) pose[:3, :3] = rotation z = z_top - math.cos(-angle) * z_top y = math.sin(-angle) * y_top pose[1, 3] = y pose[2, 3] = z z_near_ = z_near * (1 - prog) + t_near * prog z_far_ = z_far * (1 - prog) + t_far * prog target_width = int(resolution[1] * (1 - prog) + resolution[0] * prog) pad_left = (resolution[1] - target_width) // 2 pad_right = (resolution[1] - target_width) - pad_left projs_ = projs[:, :1].clone() projs_[0, 0, 1, 1] = projs_[0, 0, 1, 1] * (target_width / resolution[1]) ray_sampler.width = target_width ray_sampler.z_near = z_near_ ray_sampler.z_far = z_far_ novel_view, depth = render_poses(renderer, ray_sampler, pose.view(1, 1, 4, 4), projs_[:, :1]) depth = 1 / depth.squeeze() depth = ((depth - 1 / z_far_) / (1 / z_near_ - 1 / z_far_)).clamp(0, 1) novel_view = novel_view.squeeze(-2).squeeze(0) if i > 0: depth_ = torch.zeros(*resolution, device=device) depth_[:, pad_left:-pad_right] = depth depth = depth_ novel_view_ = torch.zeros(*resolution, 3, device=device) novel_view_[:, pad_left:-pad_right, :] = novel_view novel_view = novel_view_ novel_view = novel_view[:, :].cpu().numpy()
depth = color_tensor(depth.cpu(), "magma", norm=False).numpy()
4
2023-11-12 21:53:27+00:00
16k
TCLResearchEurope/torch-dag
torch_dag_algorithms/pruning/module_multipliers.py
[ { "identifier": "structured_modules", "path": "torch_dag/structured_modules.py", "snippet": "ACTIVATION_MODULES_T = Union[\n nn.ReLU,\n nn.ReLU6,\n nn.SiLU,\n nn.Softmax,\n nn.Sigmoid,\n nn.Hardswish,\n nn.Hardsigmoid,\n nn.GELU,\n nn.LeakyReLU,\n nn.ELU,\n nn.Tanh,\n nn.Identity,\n]\nACTIVATION_MODULES = get_args(ACTIVATION_MODULES_T) # -ish...\n B, C, H, W = x.shape\n B, C, H, W = x.shape\n B, N, C = x.shape\n H = W = math.isqrt(N)\n B, N, C = x.shape\n SPECS = (\n '(B,C,H,W)->(B,H*W,C)',\n '(B,N,C)->(B,N,target)',\n )\n PREDECESSOR_KEYWORD = 'predecessor'\n B, C, H, W = x.shape\n B, N, C = x.shape\n SPECS = (\n '(B,C,H,W)->(B,H*W,C)',\n '(B,N,C)->(B,N,target)',\n )\n PREDECESSOR_KEYWORD = 'predecessor'\n B, C, H, W = x.shape\n B, N, C = x.shape\n B, C, H, W = x.shape\n B, C, H, W = x.shape\n B, N, C = x.shape\n H = W = math.isqrt(N)\n B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)\n N = int(h * scale_factor)\n B, T, C, _ = q.size()\n PREDECESSOR_KEYWORD = 'predecessor'\n B, N, C = x.shape\n B, N, C = x.shape\ndef space_to_depth(x: torch.Tensor, block_size: int):\ndef depth_to_space(x: torch.Tensor, block_size: int):\n def build_activation_module(cls, activation_name):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(\n self,\n dim: int,\n ):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, perm: Tuple[int, ...]):\n def forward(self, inputs: torch.Tensor):\n def __init__(self, dim0: int, dim1: int):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n dim,\n keepdim: bool,\n ):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n block_size: int,\n ):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def forward(self, inputs: torch.Tensor, target_shape=None):\n def __init__(\n self,\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def forward(self, inputs: torch.Tensor, target_shape=None):\n def forward(self, inputs: torch.Tensor):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(\n self,\n index: Union[int, Tuple[int, ...]],\n ):\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]):\n def calc_same_pad(self, i: int, k: int, s: int, d: int) -> int:\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, slice_spec):\n def replace_ellipses_by_slices(slice_spec):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, transpose: bool, normalize: bool = True):\n def forward(self, x: List[torch.Tensor]) -> torch.Tensor:\n def forward(self, x: List[torch.Tensor]) -> torch.Tensor:\n def __init__(self, num_channels: int, use_bias: bool, weight_init_value: float = 1e-5):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: Union[torch.Tensor, List[torch.Tensor]]) -> torch.Tensor:\n def __init__(self, bn: torch.nn.BatchNorm1d):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, scalar):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, param: torch.nn.Parameter):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, p: str = 'fro', dim=None, keepdim=False):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, dim, keepdim=False):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, dim, keepdim=False):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, split_size_or_sections, dim=0):\n def forward(self, x) -> List[torch.Tensor]:\n def __init__(\n self,\n spec: Union[str, Dict],\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def clear_custom_buffers(self):\n def forward(self, x) -> torch.Tensor:\n def __init__(\n self,\n spec: Union[str, Dict],\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, dim: int):\n def forward(self, x: torch.Tensor) -> List[torch.Tensor]:\n def __init__(self, chunks, dim: int):\n def forward(self, x: torch.Tensor) -> List[torch.Tensor]:\n def __init__(self, dim: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self):\n def forward(self, x: List[torch.Tensor]) -> torch.Tensor:\n def __init__(self, start_dim: int = 0, end_dim: int = - 1):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, block_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, block_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(\n self,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None,\n recompute_scale_factor=None,\n antialias=False,\n ):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, p=2.0, dim=1, ):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, pad: tuple, mode: str = 'constant', value: int = None):\n def forward(self, input):\n def __init__(self, ndim, bias):\n def forward(self, input):\n def forward(self, x):\n def forward(self, idx):\n def __init__(self, config):\n def forward(self, x):\n def __init__(self, scale_factor=2.0):\n def forward(self, x):\n def __init__(self,\n dim: int,\n num_heads: int,\n use_bias: bool = True,\n dropout_rate: float = 0.0,\n output_dropout_rate: float = 0.0,\n include_reshapings: bool = False,\n ):\n def forward(self, x: List[torch.Tensor]):\n def __init__(self, in_features: int, out_features):\n def forward(self, x):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self):\n def entropy_loss(self, epsilon: float = 0.01):\n def trainable_params(self):\n def fuse(self):\n def __init__(self, in_features: int, out_features, hidden_dim: int):\n def forward(self, x):\n def trainable_params(self):\n def __init__(self, in_features: int, out_features):\n def forward(self, x):\n def __init__(self, scale_factor: int, align_corners: bool = False):\n def forward(self, x):\n def __init__(\n self,\n num_heads: int,\n ):\n def forward(self, input: torch.Tensor):\n def __init__(\n self,\n num_heads: int,\n ):\n def forward(self, input: torch.Tensor):\n def __init__(self, in_features: int, out_features: int):\n def trainable_params(self):\n def non_logits_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self):\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01):\n def fuse(self):\n def __init__(self, in_features: int, out_features: int, bias: bool = True):\n def forward(self, x):\n def __init__(self, in_features: int, out_features: int):\n def trainable_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self):\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01):\n def __init__(\n self,\n dim,\n num_ss_tokens: int,\n s_ratio: int = 4,\n use_bias: bool = True,\n activation=nn.ReLU(),\n ):\n def forward(self, x):\n def __init__(self, in_features: int, out_features: int, ks: int, padding, stride, bias: bool):\n def non_logits_params(self):\n def trainable_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self) -> torch.Tensor:\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01) -> torch.Tensor:\n def fuse(self) -> Tuple[nn.Conv2d, nn.Conv2d]:\n def build_from_conv(cls, module: nn.Conv2d) -> \"DecomposedConv\":\n def __init__(self, in_features: int, out_features: int, bias: bool):\n def trainable_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self) -> torch.Tensor:\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01) -> torch.Tensor:\n def fuse(self) -> Tuple[nn.Linear, nn.Linear]:\n def build_from_linear(cls, module: nn.Linear) -> \"DecomposedLinear\":\n def __init__(self, pow: Union[float, int]):\n def forward(self, inputs: torch.Tensor):\n def __init__(self, dim: int):\n def forward(self, inputs: torch.Tensor):\n def __init__(self, sizes: Union[torch.Size, int]):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, dropout_p=0.0, is_causal: bool = False):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, function: Callable, spec: Dict):\n def _build_inputs(self, spec: Dict, inputs=None, inputs_counter: int = 0):\n def forward(self, inputs):\n def __init__(self, dim):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, arg):\n def forward(self, x):\n def __init__(self, shifts: Union[int, Tuple[int, ...]], dims: Union[int, Tuple[int, ...]] = None):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n index: Union[int, Tuple[int, ...]],\n ):\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]):\n def __init__(\n self,\n dim,\n num_heads: int = 8,\n use_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n use_out_bias: bool = True,\n width_multiplier: float = 0.25,\n ):\n def forward(self, x):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=False,\n qk_norm=False,\n attn_drop=0.,\n proj_drop=0.,\n norm_layer=nn.LayerNorm,\n ):\n def forward(self, x):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=False,\n qk_norm=False,\n attn_drop=0.,\n proj_drop=0.,\n norm_layer=nn.LayerNorm,\n ):\n def forward(self, x):\n def convert_from_timm(cls, module: Attention):\n def __init__(\n self,\n dim,\n num_heads: int = 8,\n use_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n use_out_bias: bool = True,\n width_multiplier: float = 0.25,\n ):\n def forward(self, x):\n def __init__(\n self,\n dim,\n num_heads: int = 8,\n use_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n use_out_bias: bool = True,\n ):\n def forward(self, x):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n use_bias: bool = False,\n ):\n def rank(self) -> int:\n def s(self):\n def build_from_linear(cls, module: nn.Linear):\n def forward(self, x):\nclass ActivationModuleBuilder:\nclass EmptyModule(torch.nn.Module):\nclass AddModule(torch.nn.Module):\nclass SubModule(torch.nn.Module):\nclass MulModule(torch.nn.Module):\nclass DivModule(torch.nn.Module):\nclass ConcatModule(torch.nn.Module):\nclass PermuteModule(torch.nn.Module):\nclass TransposeModule(torch.nn.Module):\nclass GlobalMeanPool2DModule(torch.nn.Module):\nclass SpaceToDepthModule(torch.nn.Module):\nclass ReshapeModule(torch.nn.Module):\nclass ReshapeModuleV2(torch.nn.Module):\nclass PatchifyModule(torch.nn.Module):\nclass DePatchifyModule(torch.nn.Module):\nclass TensorMergerModule(torch.nn.Module):\nclass TensorExtractorModule(torch.nn.Module):\nclass Conv2DSameModule(torch.nn.Conv2d):\nclass SliceModule(torch.nn.Module):\nclass GetShapeModule(torch.nn.Module):\nclass GetShapeModuleV2(torch.nn.Module):\nclass TfMatmulModule(torch.nn.Module):\nclass MatmulModule(torch.nn.Module):\nclass ChannelAffineModule(torch.nn.Module):\nclass TfTokenizeModule(torch.nn.Module):\nclass TfDetokenizeModule(torch.nn.Module):\nclass TfBatchNorm1d(torch.nn.Module):\nclass ScalarMul(torch.nn.Module):\nclass ParameterModule(torch.nn.Module):\nclass NormModule(torch.nn.Module):\nclass MeanModule(torch.nn.Module):\nclass SumModule(torch.nn.Module):\nclass SplitModule(torch.nn.Module):\nclass ReshapeWithSpecModule(torch.nn.Module):\nclass ReshapeWithSpecModuleV2(torch.nn.Module):\nclass TokenizeModule(torch.nn.Module):\nclass DetokenizeModule(torch.nn.Module):\nclass UnbindModule(torch.nn.Module):\nclass ChunkModule(torch.nn.Module):\nclass AuxiliaryTokenModule(torch.nn.Module):\nclass ExpandAsModule(torch.nn.Module):\nclass FlattenModule(torch.nn.Module):\nclass DepthToSpaceModule(torch.nn.Module):\nclass SpaceToDepthModule(torch.nn.Module):\nclass InterpolateModule(torch.nn.Module):\nclass NormalizeModule(torch.nn.Module):\nclass PadModule(torch.nn.Module):\nclass LayerNormWithOptionalBias(nn.Module):\nclass GeluGPT(nn.Module):\nclass PositionalEmbeddingGPT(nn.Module):\nclass CausalSelfAttention(nn.Module):\nclass BilinearUpsampling(nn.Module):\nclass EfficientAttention(nn.Module):\nclass AdjustableQueryKeyMatmul(nn.Module):\nclass PreFusedAdjustableQueryKeyMatmul(nn.Module):\nclass FusedAdjustableQueryKeyMatmul(nn.Module):\nclass HalfPixelCentersFalseBilinearUpsample(nn.Module):\nclass MakeHeadsModule(torch.nn.Module):\nclass UnmakeHeadsModule(torch.nn.Module):\nclass SparseAdjustableLinear(nn.Module):\nclass SparseLinear(nn.Linear):\nclass DecomposedSparseLinear(nn.Module):\nclass StateSpaceAttentionV2(torch.nn.Module):\nclass DecomposedConv(nn.Module):\nclass DecomposedLinear(nn.Module):\nclass PowerModule(torch.nn.Module):\nclass UnsqueezeModule(torch.nn.Module):\nclass ExpandTokenModule(torch.nn.Module):\nclass AddcmulModule(torch.nn.Module):\nclass ScaledDotProductAttentionModule(torch.nn.Module):\nclass AutoWrapFunctionModule(torch.nn.Module):\nclass StackModule(torch.nn.Module):\nclass ArgModule(torch.nn.Module):\nclass RollModule(torch.nn.Module):\nclass GetItemModule(torch.nn.Module):\nclass StageZeroSllrcAttention(torch.nn.Module):\nclass Attention(nn.Module):\nclass BatchedAttention(torch.nn.Module):\nclass SllrcAttention(torch.nn.Module):\nclass MultiQueryAttention(torch.nn.Module):\nclass SvdLinear(nn.Module):" }, { "identifier": "DagModule", "path": "torch_dag/core/dag_module.py", "snippet": "class DagModule(torch.nn.Module):\n MAX_LEN_REPR = None\n\n def __init__(\n self,\n name: str,\n vertices: Optional[List[Vertex]] = None,\n output_vertex: Optional[InnerVertex] = None,\n ):\n super().__init__()\n self.name = name\n self.vertices = vertices if vertices is not None else []\n self.output_vertex = output_vertex\n self.forward_dict = None\n self.inputs_dict = None\n self.cache_forward_dict = False\n self._inner_modules = None\n self.forward_scaffold = {}\n self.output_index = None\n self.compiled = False\n self.update_inner_modules()\n\n def compile(self, inputs: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None):\n \"\"\"\n In general `forward` method of DagModule is not `torch.compile` friendly. To overcome that\n we need to use a modified implementation of the forward pass, with no cacheing of intermediate tensors.\n Additionally, some modules may require a compile-type step for `torch.compile` usage.\n :param inputs: optional input (a dummy tensor for a single forward pass)\n \"\"\"\n if inputs is not None:\n is_training = self.training\n if is_training:\n self.eval()\n _ = self(inputs)\n if is_training:\n self.train()\n\n self.forward_scaffold, self.output_index = self.get_forward_scaffold()\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n v.module.compile()\n self.compiled = True\n\n @property\n def inner_modules(self) -> torch.nn.ModuleList:\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n return self._inner_modules\n\n @property\n def input_vertices(self) -> List[InputVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InputVertex)]\n\n @property\n def inner_vertices(self) -> List[InnerVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InnerVertex)]\n\n def update_inner_modules(self):\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n for iv in self.inner_vertices:\n if isinstance(iv.module, DagModule):\n iv.module.update_inner_modules()\n\n def get_vertex_by_name(self, name: str) -> Union[InnerVertex, InputVertex]:\n result = [vertex for vertex in self.vertices if vertex.name == name]\n if len(result) == 1:\n return result[0]\n elif len(result) > 1:\n raise AssertionError(f'Multiple vertices found with name: {name} -> {result}')\n else:\n return\n\n def get_forward_scaffold(self):\n # a mapping between vertices index and its predecessors indices\n forward_scaffold = {}\n for k, vertex in enumerate(self.vertices):\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n predecessors_indices = [\n self.vertices.index(pd) for pd in predecessors\n ]\n forward_scaffold[k] = predecessors_indices\n\n output_index = self.vertices.index(self.output_vertex)\n\n return forward_scaffold, output_index\n\n def compiled_forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n\n assert self.compiled\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_list = [None for _ in range(len(self.vertices))]\n\n for k, input in enumerate(inputs):\n forward_list[k] = input\n\n num_inputs = len(inputs)\n\n for k in range(len(self.vertices)):\n if k < num_inputs:\n pass\n else:\n\n pd_indices = self.forward_scaffold[k]\n module_inputs = [forward_list[pd_index] for pd_index in pd_indices]\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n try:\n result = self.vertices[k].module(module_inputs)\n except (TypeError, AttributeError):\n result = self.vertices[k].module(*module_inputs)\n result = _postprocess_module_output(result)\n\n forward_list[k] = result\n\n return forward_list[self.output_index]\n\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n # this is for `torch.compile` usage\n if self.compiled:\n return self.compiled_forward(inputs)\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_dict = {}\n for k, v in enumerate(self.input_vertices):\n forward_dict[v] = inputs[k]\n\n # forward_dict = {vertex: tensor for vertex, tensor in zip(self.input_vertices, inputs)}\n inputs_dict = {}\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n module_inputs = [forward_dict[pd] for pd in predecessors]\n inputs_dict[vertex] = module_inputs\n\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n\n try:\n result = vertex.module(module_inputs)\n except (TypeError, AttributeError):\n result = vertex.module(*module_inputs)\n # if isinstance(result, Tuple):\n result = _postprocess_module_output(result)\n\n forward_dict[vertex] = result\n if self.cache_forward_dict:\n self.forward_dict = forward_dict\n self.inputs_dict = inputs_dict\n return forward_dict[self.output_vertex]\n\n def traverse(\n self,\n processor: VertexProcessor = None,\n ):\n if processor is None:\n inner_vertices = []\n for inner_vertex in self.inner_vertices:\n if isinstance(inner_vertex.module, DagModule):\n inner_vertices.extend(inner_vertex.module.traverse())\n inner_vertices.append(inner_vertex)\n return inner_vertices\n else:\n for inner_vertex in self.traverse():\n processor(inner_vertex)\n # TODO: Remove after validation\n # self._update_inner_modules()\n\n def _check_if_name_unique(self, name: str):\n if name in [v.name for v in self.vertices]:\n raise ValueError(\n f'{self.name} already has an Vertex with name {name}. Please use different name.'\n )\n\n def add_input_vertex(self, name: str) -> InputVertex:\n self._check_if_name_unique(name)\n input_vertex = InputVertex(name)\n self.vertices.append(input_vertex)\n return input_vertex\n\n def add_vertex(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ) -> InnerVertex:\n self._check_if_name_unique(name)\n assert isinstance(module, torch.nn.Module)\n\n inner_vertex = InnerVertex(\n name=name,\n module=module,\n predecessors=predecessors,\n )\n for predecessor in predecessors:\n if predecessor not in self.vertices:\n raise ValueError(f'The predecessor: {predecessor} of InnerVertex: {InnerVertex} is not in '\n f'the DagModule: {self.name}')\n self.vertices.append(inner_vertex)\n self.inner_modules.append(module)\n inner_vertex.dag_module = self\n return inner_vertex\n\n def __repr__(self):\n representation = f'{self.__class__.__name__}[{self.name}]'\n if len(self.vertices) == 0:\n return representation\n for inner_vertex in self.inner_vertices:\n inner_vertex.MAX_LEN_REPR = self.MAX_LEN_REPR\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n representation += f'\\n << {vertex.name} '\n else:\n index = self.inner_vertices.index(vertex)\n prefix = '>>' if vertex == self.output_vertex else '*'\n if isinstance(vertex.module, DagModule):\n prefix += '#'\n representation += f\"\\n{prefix} {index}: {vertex} \" \\\n f\"--> predecessors: {vertex.predecessors}, \" \\\n f\"successors: {vertex.successors}\"\n representation += f' {self.add_custom_module_info(vertex)}'\n for vertex in self.inner_vertices:\n vertex.MAX_LEN_REPR = None\n return representation\n\n def add_custom_module_info(self, vertex: InnerVertex):\n m = vertex.module\n if isinstance(m, torch.nn.Conv2d):\n return f'Conv2d(in={m.in_channels}, out={m.out_channels}, ks={m.kernel_size}, padding={m.padding})'\n if isinstance(m, torch.nn.Linear):\n return f'Linear(in={m.in_features}, out={m.out_features})'\n return ''\n\n def mark_current_top_vertex_as_output(self):\n if not self.inner_vertices:\n raise ValueError(f'One cannot mark top node in an empty {self}')\n if self.output_vertex is not None:\n logger.warning(f'{self} already has an output vertex. Replacing...')\n self.output_vertex = self.inner_vertices[-1]\n\n @property\n def module_classes(self) -> Set:\n return set([m.__class__ for m in self.inner_modules])\n\n def unroll_inner_modules(self) -> List[torch.nn.Module]:\n result = []\n for m in self.inner_modules:\n if not isinstance(m, DagModule):\n result.append(m)\n else:\n result.extend(m.unroll_inner_modules())\n return result\n\n def save(self, path: str):\n # TODO: Remove after validation\n # self._update_inner_modules()\n self.enforce_names_uniqueness()\n os.makedirs(path, exist_ok=True)\n atomic_modules = self.unroll_inner_modules()\n self.clear_custom_buffers()\n torch.save(torch.nn.ModuleList(atomic_modules), os.path.join(path, 'modules.pt'))\n with open(os.path.join(path, 'config.dict.json'), 'w') as f:\n json.dump(self.config_dict(), f)\n\n def clear_custom_buffers(self):\n for module in self.unroll_inner_modules():\n if hasattr(module, 'clear_custom_buffers'):\n module._buffers.clear()\n\n @classmethod\n def load(\n cls,\n path: str,\n map_location='cpu',\n custom_module_classes: Tuple[Type[torch.nn.Module]] = (),\n ) -> \"DagModule\":\n \"\"\"\n\n :param path: directory from which model should be loaded\n :param map_location: defaults to `cpu`\n :param custom_module_classes: custom torch module classes needed for loading a `DagModule` that was built\n using these modules\n \"\"\"\n with open(os.path.join(path, 'config.dict.json'), 'r') as f:\n config_dict = json.load(f)\n m = torch.load(os.path.join(path, 'modules.pt'), map_location=map_location)\n return cls.load_from_config_dict_and_atomic_modules(\n config_dict=config_dict,\n atomic_modules=m\n )\n\n @classmethod\n def load_from_config_dict_and_atomic_modules(\n cls,\n config_dict: Dict,\n atomic_modules: List[torch.nn.Module]\n ) -> \"DagModule\":\n output_index = config_dict.pop('output_index')\n name = config_dict.pop('name')\n if 'class' in config_dict:\n class_name = config_dict.pop('class')\n else:\n class_name = cls.__name__\n dag = None\n if class_name == cls.__name__:\n dag = cls(name)\n for subclass in cls.__subclasses__():\n if subclass.__name__ == class_name:\n dag = subclass(name)\n\n if dag is None:\n raise NotImplementedError(f'There is no subclass with name: {class_name}.')\n\n for k, (key, config) in enumerate(config_dict.items()):\n if config['type'] == 'input':\n dag.add_input_vertex(name=config['name'])\n else:\n predecessors = [dag.vertices[index] for index in config['predecessor_indices']]\n if config['is_atomic']:\n module = atomic_modules[config['module_index']]\n else:\n module = cls.load_from_config_dict_and_atomic_modules(\n config_dict=config['module_dict'],\n atomic_modules=atomic_modules,\n )\n vertex = dag.add_vertex(\n name=config['name'],\n module=module,\n predecessors=predecessors,\n )\n orbit = config.get('orbit')\n if orbit:\n vertex.orbit = orbit\n if k == output_index:\n dag.output_vertex = vertex\n\n return dag\n\n def config_dict(self, atomic_modules: List[torch.nn.Module] = None) -> Dict:\n if atomic_modules is None:\n atomic_modules = self.unroll_inner_modules()\n config_dict = {}\n for k, vertex in enumerate(self.vertices):\n _config = vertex.config_dict(atomic_modules)\n config_dict[k] = _config\n\n config_dict['name'] = self.name\n config_dict['class'] = self.__class__.__name__\n config_dict['output_index'] = self.vertices.index(self.output_vertex)\n return config_dict\n\n def _get_inner_vertex_predecessor_indices(self, inner_vertex: InnerVertex) -> List[int]:\n result = [\n self.vertices.index(predecessor)\n for predecessor in inner_vertex.predecessors\n ]\n return result\n\n @property\n def flat(self) -> bool:\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n return False\n return True\n\n def flatten(self, input_shape_for_verification: Optional[Tuple[int, ...]] = None) -> \"DagModule\":\n \"\"\"\n This method will switch the `dag` to `eval` mode if `input_shape_for_verification` is provided.\n :param input_shape_for_verification:\n :return:\n \"\"\"\n dag_copy = deepcopy(self)\n if self.flat:\n return dag_copy\n\n if input_shape_for_verification:\n dag_copy.eval()\n x = torch.normal(mean=torch.zeros(size=input_shape_for_verification))\n reference_output = dag_copy(x)\n\n # builds a new cell (not in place flatten)\n dag = self.__class__(name=dag_copy.name, vertices=dag_copy.input_vertices)\n for v in dag_copy.inner_vertices:\n if not isinstance(v.module, DagModule):\n dag.vertices.append(v)\n v.dag_module = dag\n if v == dag_copy.output_vertex:\n dag.output_vertex = v\n else:\n inner_dag_predecessors = v.predecessors\n inner_dag_successors = v.successors\n inner_dag = v.module.flatten()\n for iv in inner_dag.inner_vertices:\n for pd in iv.predecessors: # remap predecessors where needed\n if isinstance(pd, InputVertex):\n pd_index_in_inner_dag = inner_dag.input_vertices.index(pd)\n index = iv.predecessors.index(pd)\n iv.predecessors[index] = inner_dag_predecessors[pd_index_in_inner_dag]\n if inner_dag.output_vertex == iv: # remap output of inner dag\n for suc in inner_dag_successors:\n index = suc.predecessors.index(v)\n suc.predecessors[index] = iv\n iv.dag_module = dag\n dag.vertices.append(iv)\n if v == dag_copy.output_vertex:\n dag.output_vertex = iv\n assert all([e in dag.vertices for e in iv.predecessors])\n\n if input_shape_for_verification:\n dag.eval()\n new_output = dag(x)\n assert torch.abs(reference_output - new_output).sum() == 0.0\n\n # TODO: Remove after validation\n # self._update_inner_modules()\n dag.enforce_names_uniqueness()\n\n return dag\n\n def enforce_names_uniqueness(self):\n names = [v.name for v in self.vertices]\n while len(names) != len(set(names)):\n names_counter = Counter()\n for v in self.vertices:\n name = v.name\n names_counter[name] += 1\n if names_counter[name] > 1:\n new_name = f'{name}_{names_counter[name] - 1}'\n logger.debug(f'Renaming: {name} -> {new_name}')\n v.name = new_name\n names = [v.name for v in self.vertices]\n\n def clear_tensor_dicts(self):\n self.forward_dict = None\n self.inputs_dict = None\n\n @property\n def device(self):\n # https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180/10\n # useful, but may be dangerous\n self.update_inner_modules()\n device_ = next(iter(self.parameters())).device\n if not all([p.device == device_ for p in self.parameters()]):\n raise AssertionError(f'Not all parameters of {self.name} are on the same device')\n return device_" }, { "identifier": "InputVertex", "path": "torch_dag/core/dag_module.py", "snippet": "class InputVertex(Vertex):\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n return {\n 'name': self.name,\n 'type': 'input',\n }" }, { "identifier": "InnerVertex", "path": "torch_dag/core/dag_module.py", "snippet": "class InnerVertex(Vertex):\n def __init__(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ):\n super().__init__(name=name)\n self._module = module\n self._predecessors = list(predecessors)\n self.dag_module: \"DagModule\" = None\n self.orbit = None\n\n @property\n def successors(self) -> List['InnerVertex']:\n if self.dag_module is None:\n logger.error(f'Trying to get successors of an InnerVertex that has not been assigned to any DagModule.')\n return [vertex for vertex in self.dag_module.inner_vertices if self in vertex.predecessors]\n\n @property\n def predecessors(self) -> List[Vertex]:\n return self._predecessors\n\n @property\n def predecessor_indices(self) -> List[Vertex]:\n return [self.dag_module.vertices.index(pd) for pd in self.predecessors]\n\n @predecessors.setter\n def predecessors(self, new_predecessors: List[Vertex]):\n if not isinstance(new_predecessors, list):\n logger.error(f'Predecessors is expected to be a list. Got {type(new_predecessors)} except.')\n self._predecessors = new_predecessors\n\n @property\n def module(self) -> torch.nn.Module:\n return self._module\n\n @module.setter\n def module(self, module: torch.nn.Module):\n self._module = module\n # TODO: Remove after validation\n self.dag_module.update_inner_modules()\n\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n is_atomic = not isinstance(self.module, DagModule)\n result = {\n 'name': self.name,\n 'predecessor_indices': self.predecessor_indices,\n 'is_atomic': is_atomic,\n 'type': 'inner',\n 'orbit': self.orbit,\n }\n if not is_atomic:\n result['module_dict'] = self.module.config_dict(atomic_modules)\n else:\n result['module_index'] = atomic_modules.index(self.module)\n return result" }, { "identifier": "Vertex", "path": "torch_dag/core/dag_module.py", "snippet": "class Vertex:\n MAX_LEN_REPR = None\n\n def __init__(self, name: str):\n self.name = name\n\n def __repr__(self):\n if self.MAX_LEN_REPR is not None and len(self.name) > self.MAX_LEN_REPR:\n return f'{self.name[:self.MAX_LEN_REPR // 2]}...{self.name[-self.MAX_LEN_REPR // 2:]}'\n return self.name\n\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n return {\n 'name': self.name,\n }" }, { "identifier": "PASS_THROUGH_CHANNELS_CLASSES", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "PASS_THROUGH_CHANNELS_CLASSES = (\n smodules.ChannelAffineModule,\n smodules.NormalizeModule,\n smodules.LayerNormWithOptionalBias,\n smodules.TfBatchNorm1d,\n nn.BatchNorm2d,\n nn.MaxPool2d,\n nn.AvgPool2d,\n nn.AdaptiveAvgPool2d,\n nn.Dropout,\n nn.Upsample,\n nn.LayerNorm,\n nn.BatchNorm1d,\n MaskModule,\n smodules.PowerModule,\n smodules.AddcmulModule,\n smodules.HalfPixelCentersFalseBilinearUpsample,\n smodules.BilinearUpsampling,\n smodules.PadModule,\n smodules.NormalizeModule,\n smodules.InterpolateModule,\n smodules.ScalarMul,\n smodules.MeanModule,\n\n)" }, { "identifier": "is_source", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def is_source(module: nn.Module):\n return is_linear_source(module) or is_conv_source(module)" }, { "identifier": "get_orbits_dict", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def get_orbits_dict(dag) -> Dict:\n all_orbit_modules = set([v.module.orbit for v in dag.inner_vertices if isinstance(v.module, MaskModule)])\n return {orbit.name: orbit for orbit in all_orbit_modules}" }, { "identifier": "is_linear_source", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def is_linear_source(module: nn.Module):\n if isinstance(module, nn.Linear):\n return True\n return False" }, { "identifier": "is_depthwise_conv", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def is_depthwise_conv(module: nn.Module) -> bool:\n return isinstance(module, (\n nn.Conv2d, nn.ConvTranspose2d)) and module.in_channels == module.groups and module.in_channels > 1" }, { "identifier": "OrbitModule", "path": "torch_dag_algorithms/pruning/modules.py", "snippet": "class OrbitModule(torch.nn.Module):\n\n def __init__(\n self,\n name: str,\n num_channels: int,\n distillation_mode: str = constants.PRUNING_DEFAULT_MODE_NAME,\n block_size: Optional[int] = None,\n indices_of_source_vertices=None,\n ):\n super().__init__()\n self.name = name\n self.num_channels = num_channels\n self.distillation_mode = distillation_mode\n self.block_size = block_size\n self._testing_logits = None\n self.conv1 = torch.nn.Conv2d(\n in_channels=num_channels, out_channels=num_channels, kernel_size=3, groups=num_channels)\n self.conv2 = torch.nn.Conv2d(\n in_channels=num_channels,\n out_channels=num_channels,\n kernel_size=1,\n )\n self._optionally_set_block_size_for_whole_block_pruning(distillation_mode=distillation_mode)\n self._validate_distilation_mode_and_block_size(distillation_mode=distillation_mode, block_size=block_size)\n self.bkd_masking_losses = {}\n self.indices_of_source_vertices = indices_of_source_vertices\n self.debug_logits = None\n\n def _validate_distilation_mode_and_block_size(self, distillation_mode: str, block_size: int):\n if distillation_mode not in PRUNING_MODES:\n raise NotImplementedError(f'Distillation mode: {distillation_mode} not supported')\n if distillation_mode == constants.PRUNING_BLOCK_SNPE_MODE_NAME and block_size is None:\n raise AssertionError(f'In {constants.PRUNING_BLOCK_SNPE_MODE_NAME} pruning mode block size must not '\n f'be `None`.')\n\n def _optionally_set_block_size_for_whole_block_pruning(self, distillation_mode: str):\n if distillation_mode == constants.PRUNING_WHOLE_BLOCK_MODE_NAME:\n self.block_size = self.num_channels\n\n @staticmethod\n def clip_logits(\n logits: torch.Tensor,\n clip_val=constants.MAX_LOGITS_ABS_VALUE,\n ) -> torch.Tensor:\n return torch.clip(logits, min=-clip_val, max=clip_val)\n\n @property\n def logits(self) -> torch.Tensor:\n # TODO This is a hack for testing, remove/refactor it\n if self.debug_logits is not None:\n return self.debug_logits\n kernel_size = self.conv1.kernel_size\n device = self.conv1.weight.device\n x = torch.ones(size=(1, self.num_channels, *kernel_size), device=device)\n x = self.conv1(x)\n x = self.conv2(x)\n x = (constants.INITIAL_LOGITS_VALUE_FOR_PRUNING + constants.SIMPLE_ORBIT_LOGITS_MULTIPLIER * x)\n return self.clip_logits(torch.mean(x, dim=(0, 2, 3), keepdim=False))\n\n def compute_average_number_of_output_channels(self):\n if self.distillation_mode == constants.PRUNING_DEFAULT_MODE_NAME:\n return torch.sigmoid(self.logits).sum()\n\n elif self.distillation_mode in (\n constants.PRUNING_BLOCK_SNPE_MODE_NAME, constants.PRUNING_WHOLE_BLOCK_MODE_NAME):\n split_list = get_split_list_of_logits(logits=self.logits, block_size=self.block_size)\n max_per_block_logits = get_sorted_per_block_max_logits(\n logits=self.logits,\n block_size=self.block_size,\n )\n num_channels = torch.stack(\n [float(block_size) * torch.sigmoid(max_logit) for \\\n block_size, max_logit in zip(split_list, max_per_block_logits)], dim=0).sum()\n return num_channels\n else:\n msg = f'Mode {self.distillation_mode} not implemented for average channels computation.'\n raise NotImplementedError(msg)\n\n def compute_output_channel_masks(\n self,\n predecessors_channel_masks: List[List[torch.Tensor]] = None,\n ) -> List[torch.Tensor]:\n predecessors_channel_masks = [mask_list for mask_list in predecessors_channel_masks if mask_list is not None]\n logits = self.logits\n num_logits = int(logits.shape[0])\n if self.distillation_mode == constants.PRUNING_DEFAULT_MODE_NAME:\n scores_ = torch.where(\n logits > 0.0,\n 1,\n 0,\n )\n elif self.distillation_mode == constants.PRUNING_WHOLE_BLOCK_MODE_NAME:\n max_logits_per_block = get_sorted_per_block_max_logits(\n logits=logits,\n block_size=self.block_size,\n )\n max_logits_per_block_tensor = torch.stack(max_logits_per_block)\n indices_of_blocks_to_leave = np.where(max_logits_per_block_tensor > 0.)[0]\n if len(indices_of_blocks_to_leave) == 1:\n scores_ = np.ones(shape=(self.block_size,), dtype=np.int32)\n else:\n scores_ = np.zeros(shape=(self.block_size,), dtype=np.int32)\n\n elif self.distillation_mode == constants.PRUNING_BLOCK_SNPE_MODE_NAME:\n max_logits_per_block = get_sorted_per_block_max_logits(\n logits=logits,\n block_size=self.block_size,\n )\n max_logits_per_block_tensor = torch.stack(max_logits_per_block)\n indices_of_blocks_to_leave = np.where(max_logits_per_block_tensor > 0.)[0]\n if len(indices_of_blocks_to_leave) == 0:\n # removing whole orbit\n scores_ = np.zeros(shape=(self.num_channels,), dtype=np.int32)\n\n else:\n # compute block indices that are left\n sorted_logits = torch.sort(logits, descending=True)[0]\n split_list = get_split_list_of_logits(logits=logits, block_size=self.block_size)\n split_sorted_logits = list(torch.split(sorted_logits, split_list))\n residual = num_logits % self.block_size\n if residual != 0:\n logits_fake_tail = split_sorted_logits[-1].mean() * torch.ones(\n size=(self.block_size - residual,))\n split_sorted_logits[-1] = torch.cat([split_sorted_logits[-1], logits_fake_tail], dim=0)\n split_sorted_logits = [e.detach().numpy() for e in split_sorted_logits]\n if len(split_sorted_logits) == 1:\n res = split_sorted_logits\n else:\n res = np.take(\n split_sorted_logits,\n axis=0,\n indices=indices_of_blocks_to_leave,\n )\n threshold_value = torch.tensor(res).min()\n scores_ = np.where(\n logits >= threshold_value,\n 1,\n 0,\n )\n else:\n raise NotImplementedError\n\n if len(predecessors_channel_masks) == 0:\n return [torch.tensor(scores_)]\n else:\n return [torch.tensor(np.where(\n predecessors_channel_masks[0][0].sum() == 0,\n np.array([0] * self.num_channels, dtype=np.int32),\n scores_,\n ))]\n\n def sample(self):\n return sample_from_logits(logits=self.logits)" }, { "identifier": "compute_timm_average_num_channels", "path": "torch_dag_timm_plugin/module_multipliers.py", "snippet": "@singledispatch\ndef compute_timm_average_num_channels(\n module: torch.nn.Module,\n vertex: InnerVertex,\n average_number_input_channels: List[List[torch.Tensor]],\n orbits_dict: Dict[str, OrbitModule],\n forward_dict: Dict[Vertex, Union[torch.Tensor, List[torch.Tensor]]]\n) -> Union[List[torch.Tensor], None]:\n raise NotImplementedError" }, { "identifier": "CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES", "path": "torch_dag_timm_plugin/module_multipliers.py", "snippet": "CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES = ()" } ]
import logging import torch from typing import List, Tuple, Dict, Union from torch_dag import structured_modules as smodules from torch_dag.core.dag_module import DagModule from torch_dag.core.dag_module import InputVertex, InnerVertex, Vertex from torch_dag_algorithms.pruning.commons import PASS_THROUGH_CHANNELS_CLASSES from torch_dag_algorithms.pruning.commons import is_source, get_orbits_dict, is_linear_source, is_depthwise_conv from torch_dag_algorithms.pruning.modules import OrbitModule from torch_dag_timm_plugin.module_multipliers import compute_timm_average_num_channels, \ CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES
11,644
# # Copyright © TCL Research Europe. All rights reserved. # logger = logging.getLogger(__name__) PASS_THROUGH_MULTIPLIER_CLASSES = PASS_THROUGH_CHANNELS_CLASSES def shape_to_float(shape, device, dim=1): return torch.tensor(shape[dim], device=device).to(torch.float32) def compute_elementwise_op_average_channels(average_number_input_channels: List[List[torch.Tensor]], ): average_number_input_channels = [e for e in average_number_input_channels if e is not None] if len(average_number_input_channels) == 0: return None return [torch.max(torch.stack([e[0] for e in average_number_input_channels]))] def compute_average_num_channels(
# # Copyright © TCL Research Europe. All rights reserved. # logger = logging.getLogger(__name__) PASS_THROUGH_MULTIPLIER_CLASSES = PASS_THROUGH_CHANNELS_CLASSES def shape_to_float(shape, device, dim=1): return torch.tensor(shape[dim], device=device).to(torch.float32) def compute_elementwise_op_average_channels(average_number_input_channels: List[List[torch.Tensor]], ): average_number_input_channels = [e for e in average_number_input_channels if e is not None] if len(average_number_input_channels) == 0: return None return [torch.max(torch.stack([e[0] for e in average_number_input_channels]))] def compute_average_num_channels(
vertex: InnerVertex,
3
2023-11-17 15:36:44+00:00
16k
newcastleuniversity/DISPEL
dispel/providers/generic/tasks/gait/lee.py
[ { "identifier": "MeasureValueDefinitionPrototype", "path": "dispel/data/measures.py", "snippet": "class MeasureValueDefinitionPrototype(ValueDefinitionPrototype):\n \"\"\"A task measure value definition prototype.\n\n This is a convenience method that populates the ``cls`` argument with the\n :class:`~dispel.data.measures.MeasureValueDefinition` class.\n \"\"\"\n\n def __init__(self, **kwargs: Any):\n cls = kwargs.pop(\"cls\", MeasureValueDefinition)\n super().__init__(cls=cls, **kwargs)" }, { "identifier": "GREATER_THAN_ZERO", "path": "dispel/data/validators.py", "snippet": "GREATER_THAN_ZERO = RangeValidator(lower_bound=0)" }, { "identifier": "AbbreviatedValue", "path": "dispel/data/values.py", "snippet": "class AbbreviatedValue:\n \"\"\"An abbreviated value.\n\n Examples\n --------\n This class allows to consistently handle abbreviated terms. Assuming you have a name\n of an assessment, e.g. `Cognitive Processing Speed` test and the respective\n abbreviation would be `CPS`, then you can create an abbreviated value like this:\n\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> value = AV('Cognitive Processing Speed test', 'CPS')\n >>> value\n Cognitive Processing Speed test (CPS)\n\n While this seems like a lot of overhead, it comes in handy when describing value\n definitions or higher-level abstractions, such as measure definitions.\n\n Parameters\n ----------\n value\n The full description of the value\n abbr\n The abbreviated form of the value\n\n Attributes\n ----------\n value\n The full description of the value\n \"\"\"\n\n def __init__(self, value: str, abbr: Optional[str] = None):\n self.value = value\n self._abbr = abbr\n\n @property\n def abbr(self):\n \"\"\"Get the abbreviated form of the value.\"\"\"\n return self._abbr or self.value\n\n def __str__(self):\n return self.value\n\n def __repr__(self):\n if self._abbr:\n return f\"{self.value} ({self._abbr})\"\n return self.value\n\n def __hash__(self):\n return hash((self.value, self._abbr))\n\n def __eq__(self, other):\n if isinstance(other, str):\n return self._abbr is None and self.value == other\n if isinstance(other, AbbreviatedValue):\n return self.value == other.value and self.abbr == other.abbr\n return False\n\n def __lt__(self, other):\n if not isinstance(other, AbbreviatedValue):\n raise ValueError(f\"Unsupported type in comparison: {type(other)}\")\n if self.value == other.value:\n return self.abbr < other.abbr\n return self.value < other.value\n\n def format(self, *args, **kwargs):\n \"\"\"Format an abbreviated value.\"\"\"\n return AbbreviatedValue(\n self.value.format(*args, **kwargs),\n self._abbr.format(*args, **kwargs) if self._abbr else None,\n )\n\n @classmethod\n def wrap(cls, value):\n \"\"\"Wrap a value into an abbreviated value.\n\n This is a small helper class to conveniently wrap values into an abbreviated\n value, if they are not already one.\n\n Parameters\n ----------\n value\n The value to be wrapped\n\n Returns\n -------\n AbbreviatedValue\n The passed ``value`` if it is an instance of :class:`AbbreviatedValue`. If a\n string is passed, then the string is passed as ``value`` argument to the\n constructor.\n\n Raises\n ------\n ValueError\n If the passed value is neither a string nor an instance of\n :class:`AbbreviatedValue`.\n \"\"\"\n if isinstance(value, cls):\n return value\n if isinstance(value, str):\n return cls(value)\n\n raise ValueError(f\"Can only wrap string values. Got: {type(value)}\")" }, { "identifier": "ProcessingStep", "path": "dispel/processing/core.py", "snippet": "class ProcessingStep:\n r\"\"\"A processing step in a processing sequence.\n\n :class:`ProcessingStep` is the basic entity through which\n :class:`~dispel.data.core.Reading`\\ s are processed. The processing step's\n :meth:`process_reading` function is called with the reading and additional arguments\n passed to :func:`process`. Results from the process step are expected to be an\n instance of :class:`ProcessingResult`. For a comprehensive description see\n :ref:`measure-extraction`.\n\n The method :meth:`flag_reading` can be overwritten to ensure that the reading\n about to be processed is valid, and return\n :class:`~dispel.data.flags.Flag`\\ s if that is not the case.\n\n Examples\n --------\n .. testsetup:: processing-step\n\n >>> import pandas as pd\n >>> import numpy as np\n\n >>> from dispel.data.core import Reading\n >>> from dispel.data.levels import Level\n >>> from dispel.data.raw import (RawDataSet, RawDataSetDefinition,\n ... RawDataValueDefinition)\n\n >>> reading = Reading(\n ... evaluation=None,\n ... levels=[\n ... Level(id_='my-level', start=0, end=1, raw_data_sets=[\n ... RawDataSet(\n ... RawDataSetDefinition('my-data-set', None, [\n ... RawDataValueDefinition('dummy', 'dummy')\n ... ]),\n ... pd.DataFrame({'dummy': list(range(6))})\n ... )\n ... ])\n ... ])\n\n .. doctest:: processing-step\n\n >>> from dispel.data.measures import MeasureValue\n >>> from dispel.data.values import ValueDefinition\n >>> from dispel.processing import process\n >>> from dispel.processing.core import ProcessingResult, ProcessingStep\n >>> class MyStep(ProcessingStep):\n ... def process_reading(self, reading, **kwargs):\n ... level = reading.get_level('my-level')\n ... raw_data_set = level.get_raw_data_set('my-data-set')\n ... data = raw_data_set.data\n ... yield ProcessingResult(\n ... step=self,\n ... sources=raw_data_set,\n ... result=MeasureValue(\n ... ValueDefinition('my-measure-id','max value'),\n ... data.max().max()\n ... )\n ... )\n >>> _ = process(reading, MyStep())\n >>> reading.measure_set.get_raw_value('my-measure-id')\n 5\n \"\"\"\n\n def __init__(self):\n self.predecessor = None\n self.successor = None\n\n def process(self, reading: Reading, **kwargs) -> ProcessResultType:\n \"\"\"Check reading for validity and process it.\n\n Parameters\n ----------\n reading\n The reading to be processed\n kwargs\n Additional arguments passed by :func:`process`.\n\n Yields\n ------\n ProcessResultType\n The results from processing readings.\n \"\"\"\n for flag in self.flag_reading(reading, **kwargs):\n yield ProcessingControlResult.from_flag(\n flag=flag,\n step=self,\n targets=self.get_reading_flag_targets(reading, **kwargs),\n )\n try:\n self.assert_valid_reading(reading, **kwargs)\n except AssertionError as error:\n yield ProcessingControlResult.from_assertion_error(step=self, error=error)\n else:\n yield from self.process_reading(reading, **kwargs)\n\n def assert_valid_reading(self, reading: Reading, **kwargs):\n \"\"\"Assert that reading is valid.\"\"\"\n\n def flag_reading(self, reading: Reading, **kwargs) -> Generator[Flag, None, None]:\n \"\"\"Flag the provided reading.\n\n Parameters\n ----------\n reading\n The reading to be flagged.\n kwargs\n Additional arguments passed by :func:`~dispel.processing.process`.\n\n Yields\n ------\n Flag\n The resulted flags.\n \"\"\"\n # pylint: disable=unused-argument\n yield from []\n\n def get_reading_flag_targets(\n self, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n \"\"\"Get the reading flag targets.\n\n Parameters\n ----------\n reading\n The reading that is concerned with flagging.\n kwargs\n Additional keyword arguments eventually used for flag targets\n extraction.\n\n Returns\n -------\n Iterable[EntityType]\n An iterable of entities that are flagged.\n \"\"\"\n # pylint: disable=unused-argument\n return [reading]\n\n @abstractmethod\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n \"\"\"Process the provided reading.\n\n Parameters\n ----------\n reading\n The reading to be processed\n kwargs\n Additional arguments passed by :func:`~dispel.processing.process`.\n\n Yields\n ------\n ProcessResultType\n The results from processing readings.\n \"\"\"\n yield NotImplemented\n\n def set_previous(self, step: \"ProcessingStep\"):\n \"\"\"Set the previous step in a processing chain of this step.\"\"\"\n if self.predecessor is not None:\n warnings.warn(\n \"Changing predecessors can lead to side-effects. Previous predecessor \"\n f\"was {self.predecessor}\",\n UserWarning,\n )\n self.predecessor = step\n\n def set_next(self, step: \"ProcessingStep\"):\n \"\"\"Set the next step in a processing chain of this step.\"\"\"\n if self.successor is not None:\n warnings.warn(\n \"Changing successors can lead to side-effects. Previous successor was \"\n f\"{self.successor}\",\n UserWarning,\n )\n self.successor = step\n\n def chain(self, successor: \"ProcessingStep\") -> \"ProcessingStep\":\n \"\"\"Chain this step with the successor step.\"\"\"\n assert isinstance(successor, ProcessingStep), \"Can only chain processing steps\"\n\n self.set_next(successor)\n successor.set_previous(self)\n return _ChainedProcesses([self, successor])\n\n def __and__(self, other):\n \"\"\"See :meth:`ProcessingStep.chain`.\"\"\"\n return self.chain(other)\n\n def get_parameters(self) -> List[Tuple[str, Parameter]]:\n \"\"\"Get all parameters defined by the processing step.\n\n Returns\n -------\n List[Tuple[str, Parameter]]\n A list of tuples of parameter name and :class:`Parameter`\n objects defined by the processing step.\n \"\"\"\n return inspect.getmembers(self, lambda x: isinstance(x, Parameter))" }, { "identifier": "DEFAULT_AGGREGATIONS", "path": "dispel/processing/extract.py", "snippet": "DEFAULT_AGGREGATIONS: List[Tuple[str, str]] = [\n *BASIC_AGGREGATIONS,\n (\"median\", \"median\"),\n (\"min\", \"minimum\"),\n (\"max\", \"maximum\"),\n]" }, { "identifier": "AggregateRawDataSetColumn", "path": "dispel/processing/extract.py", "snippet": "class AggregateRawDataSetColumn(ExtractStep):\n r\"\"\"An extraction step that allows to summarise a column of a dataset.\n\n This processing step encapsulates the class :class:`ExtractMultipleStep` and allows\n to produce multiple :class:`~dispel.data.measures.MeasureValue`\\ s derived on the same\n column of a dataset.\n\n Parameters\n ----------\n data_set_id\n A single data set id\n column_id\n The column id of the dataset on which the transform function will be applied.\n aggregations\n Either a list of tuples (func, label) where ``func`` consumes the data sets\n specified through ``data_set_id`` at the column ``column_id`` and returns a\n single value passed to :class:`~dispel.data.measures.MeasureValue`. The ``label``\n element of the tuple will be passed as ``aggregation`` keyword to\n :meth:`~dispel.data.values.ValueDefinitionPrototype.create_definition`.\n The label can be either a string or an\n :class:`~dispel.data.values.AbbreviatedValue`. If it is a string the label is\n wrapped with the label and aggregation method as abbreviation.\n\n There are three constants :data:`BASIC_AGGREGATIONS`,\n :data:`DEFAULT_AGGREGATIONS` and :data:`EXTENDED_AGGREGATIONS` that can be used\n for common aggregation scenarios.\n\n The function is passed to :meth:`pandas.Series.agg` and hence allows to specify\n some default aggregation functions like ``'mean'`` or ``'std'`` without actually\n having to pass a callable.\n definition\n A :class:`~dispel.data.values.ValueDefinitionPrototype` that is used to create the\n :class:`~dispel.data.measures.MeasureValueDefinition`\\ s for the aggregation\n functions provided in ``aggregations``.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels\n for extraction. If no filter is provided, all levels will be considered. The\n ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\\ s\n and lists of either and passes them to a\n :class:`~dispel.processing.level.LevelIdFilter` for convenience.\n\n Examples\n --------\n To ease the generation of multiple similar measures for the same column of a\n dataset, the :class:`AggregateRawDataSetColumn` provides a convenient way to do so.\n Assume you want to create both the median and standard deviation of a specific\n column of a data set, this can be achieved as follows:\n\n >>> from dispel.data.values import ValueDefinitionPrototype\n >>> from dispel.processing.extract import AggregateRawDataSetColumn\n >>> step = AggregateRawDataSetColumn(\n ... 'data-set-id',\n ... 'column-name',\n ... aggregations=[\n ... ('median', 'median'),\n ... ('std', 'standard deviation')\n ... ],\n ... definition=ValueDefinitionPrototype(\n ... id_='measure-{method}',\n ... name='{method} measure',\n ... unit='s'\n ... )\n ... )\n\n This extraction step will result in two measure values, one for the medianand one\n with the standard deviation of the column ``'column-name'`` of the data set\n identified by ``'data-set-id'``.\n\n This extraction step will result in three measure values, one for the median, one\n for the standard deviation and one for the variation increase of the column\n ``'column-name'`` of the data set identified by ``'data-set-id'``. The median and\n variation increase measures will have associated COI references as provided.\n \"\"\"\n\n column_id: str\n\n aggregations: AggregationsDefinitionType\n\n def __init__(\n self,\n data_set_id: Optional[str] = None,\n column_id: Optional[str] = None,\n aggregations: Optional[AggregationsDefinitionType] = None,\n definition: Optional[ValueDefinitionPrototype] = None,\n level_filter: Optional[LevelFilterType] = None,\n ):\n super().__init__(\n data_set_ids=data_set_id,\n definition=definition,\n level_filter=level_filter,\n yield_if_nan=False,\n )\n self.column_id = column_id or self.column_id\n self.aggregations = aggregations or self.aggregations\n\n def get_column_id(self) -> str:\n \"\"\"Get the id of the column to be aggregated.\"\"\"\n return self.column_id\n\n def get_aggregations(\n self,\n ) -> Iterable[Tuple[AggregationFunctionType, Union[str, AV]]]:\n \"\"\"Get the aggregations to be performed on the specified column.\"\"\"\n return self.aggregations\n\n def get_agg_func_and_kwargs(\n self, func: AggregationFunctionType, label: Union[AV, str]\n ) -> Tuple[Callable[[pd.DataFrame], float], Dict[str, Any]]:\n \"\"\"Get the keyword arguments for the aggregation.\"\"\"\n agg_func = agg_column(self.get_column_id(), func)\n\n if isinstance(label, AV):\n aggregation = label\n elif callable(func):\n aggregation = AV(label, func.__name__)\n else:\n aggregation = AV(label, str(func))\n\n kwargs: Dict[str, Any] = {\"aggregation\": aggregation}\n\n return agg_func, kwargs\n\n def get_transform_functions(self) -> TransformationFunctionGeneratorType:\n \"\"\"Get the functions to transform the specified column.\"\"\"\n for func, label in self.get_aggregations():\n yield self.get_agg_func_and_kwargs(func, label)\n\n def get_definition(self, **kwargs) -> ValueDefinition:\n \"\"\"Get value definition specific for aggregation.\"\"\"\n definition = super().get_definition(**kwargs)\n\n # intercept flag of center-based aggregations\n assert \"aggregation\" in kwargs, \"Aggregation description missing\"\n assert isinstance(\n agg := kwargs[\"aggregation\"], AV\n ), \"Aggregation keyword must be AbbreviatedValue\"\n\n if definition.validator is not None and agg.abbr in AGGREGATION_CENTER_BASED:\n definition.validator = None\n\n return definition" }, { "identifier": "ExtractStep", "path": "dispel/processing/extract.py", "snippet": "class ExtractStep(\n MeasureDefinitionMixin, TransformStepChainMixIn, MutateDataSetProcessingStepBase\n):\n r\"\"\"A measure extraction processing step.\n\n This class provides a convenient way to extract a measure from one or more data sets\n by specifying their id, their level_ids or level filter, a transformation function\n and a measure value definition.\n\n Parameters\n ----------\n data_set_ids\n An optional list of data set ids to be used for the transformation. See\n :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`.\n transform_function\n An optional function to be applied to the data sets. See\n :class:`~dispel.processing.data_set.MutateDataSetProcessingStepBase`.\n definition\n An optional value definition or prototype. See\n :class:`MeasureDefinitionMixin`.\n level_filter\n An optional filter to limit the levels being processed. See\n :class:`~dispel.processing.level.LevelProcessingStep`.\n yield_if_nan\n If ``True``, yield null values as measure values. Otherwise, processing\n will not return a measure value in case of a null result for the extraction.\n\n Examples\n --------\n Assuming we wanted to compute the maximum value of a raw data set we can create the\n following step\n\n >>> from dispel.data.values import ValueDefinition\n >>> from dispel.processing.extract import ExtractStep\n >>> step = ExtractStep(\n ... 'data-set-id',\n ... lambda data: data.max(axis=0),\n ... ValueDefinition('maximum','Maximum value')\n ... )\n\n A common approach is to define a processing step for re-use and leveraging the\n ``@transformation`` decorator to specify the transformation function:\n\n >>> import pandas as pd\n >>> from dispel.data.values import ValueDefinition\n >>> from dispel.processing.extract import ExtractStep\n >>> from dispel.processing.data_set import transformation\n >>> class MyExtractStep(ExtractStep):\n ... data_set_ids = 'data-set-id'\n ... definition = ValueDefinition('maximum','Maximum value')\n ...\n ... @transformation\n ... def _max(self, data: pd.DataFrame) -> float:\n ... return data.max(axis=0)\n\n Often one wants to extract multiple measures from one data set. This can be achieved\n by using prototypes and optional named arguments with ``@transformation``:\n\n >>> import pandas as pd\n >>> from dispel.data.values import ValueDefinitionPrototype\n >>> from dispel.processing.extract import ExtractStep\n >>> from dispel.processing.data_set import transformation\n >>> class MyExtractStep(ExtractStep):\n ... data_set_ids = 'data-set-id'\n ... definition = ValueDefinitionPrototype(\n ... id_='id-{agg_abbr}',\n ... name='{agg} value'\n ... )\n ...\n ... @transformation(agg='Maximum', agg_abbr='max')\n ... def _max(self, data: pd.DataFrame) -> float:\n ... return data.max(axis=0)\n ...\n ... @transformation(agg='Minimum', agg_abbr='min')\n ... def _min(self, data: pd.DataFrame) -> float:\n ... return data.min(axis=0)\n\n \"\"\"\n\n yield_if_nan: bool = False\n\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n transform_function: Optional[Callable[..., Any]] = None,\n definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None,\n level_filter: Optional[LevelFilterType] = None,\n yield_if_nan: Optional[bool] = None,\n ):\n super().__init__(\n definition=definition,\n data_set_ids=data_set_ids,\n transform_function=transform_function,\n level_filter=level_filter,\n )\n self.yield_if_nan = yield_if_nan or self.yield_if_nan\n\n def wrap_result(\n self, res: Any, level: Level, reading: Reading, **kwargs: Any\n ) -> WrapResultGeneratorType:\n \"\"\"Wrap the result from the processing function into a class.\n\n Parameters\n ----------\n res\n Any result returned by the extraction step. If res is a\n :class:`~dispel.data.flags.WrappedResult`, the flag contained\n in the object will be automatically added to the\n :class:`~dispel.data.measures.MeasureValue`, hence the flagged wrapped\n results will always translate into flagged\n :class:`~dispel.data.measures.MeasureValue`.\n level\n The current level\n reading\n The current reading\n kwargs\n Additional kwargs\n\n Yields\n ------\n LevelProcessingResult\n The processing result\n \"\"\"\n try:\n if len(res) == 0:\n res = math.nan\n warnings.warn(\"Extract step returned an iterable!\", UserWarning)\n except TypeError:\n pass\n if is_wrapped := isinstance(res, WrappedResult):\n measure_value = res.measure_value\n else:\n measure_value = res\n\n if not (is_nan := math.isnan(measure_value)) or (is_nan and self.yield_if_nan):\n value = self.get_value(measure_value, **kwargs)\n # If result is wrapped, add the flag to the measure value\n if is_wrapped:\n value.add_flags(res, ignore_duplicates=True)\n\n yield LevelProcessingResult(\n step=self,\n sources=self.get_raw_data_sets(level),\n result=value,\n level=level,\n )" }, { "identifier": "ProcessingStepGroup", "path": "dispel/processing/level.py", "snippet": "class ProcessingStepGroup(LevelFilterProcessingStepMixin, CoreProcessingStepGroup):\n r\"\"\"A group of processing steps with an optional level filter.\n\n For examples see :class:`dispel.processing.core.CoreProcessingStepGroup`. This class\n ensures that level filters are injected to the steps of this group.\n \"\"\"\n\n def set_steps(self, steps: List[ProcessingStep]):\n \"\"\"Set processing steps part of the group.\n\n This method ensures that steps added to the group inherit the level filter of\n the group.\n\n Parameters\n ----------\n steps\n The steps contained in the processing group.\n \"\"\"\n for step in steps:\n if isinstance(step, LevelFilterProcessingStepMixin):\n step.inject_level_filter_from_step(self)\n\n super().set_steps(steps)\n\n def inject_level_filter_from_step(self, step: LevelFilterProcessingStepMixin):\n \"\"\"Inject level filter into group and steps in group.\"\"\"\n super().inject_level_filter_from_step(step)\n for group_step in self.get_steps():\n if isinstance(group_step, LevelFilterProcessingStepMixin):\n group_step.inject_level_filter_from_step(self)" }, { "identifier": "BoutStrategyModality", "path": "dispel/providers/generic/tasks/gait/bout_strategy.py", "snippet": "class BoutStrategyModality(AVEnum):\n \"\"\"Enumerate bout strategy modalities.\"\"\"\n\n FIRST_TWO = AV(\"first two minutes\", \"2min\")\n LONGEST = AV(\"longest bout\", \"lb\")\n BASIC = AV(\"all bouts\", \"ab\")\n\n @property\n def bout_cls(self):\n \"\"\"Return BoutStrategy instance.\"\"\"\n mapping = {\n self.LONGEST: LongestBoutStrategy(),\n self.FIRST_TWO: FirstTwoMinBoutStrategy(),\n self.BASIC: BoutStrategy(),\n }\n return mapping[self]" }, { "identifier": "DetectStepsProcessingBase", "path": "dispel/providers/generic/tasks/gait/core.py", "snippet": "class DetectStepsProcessingBase(TransformStep, metaclass=ABCMeta):\n \"\"\"An abstract base class for step detection.\n\n Given the step detection algorithm specified through the method argument,\n e.g.: :func:`dispel.providers.generic.tasks.gait.lee.detect_steps`, the transform\n step run the step detection on each of the walking bouts and create a\n generic pandas.DataFrame with annotated events as in Bourke et. al.\n \"\"\"\n\n definitions = [DEF_FOOT, DEF_EVENT, DEF_BOUT_ID]\n\n @transformation\n def _detect_steps(self, bouts, *data_sets):\n \"\"\"For each walking bout, run the step_detection.\"\"\"\n # pylint: disable=no-member\n if not (\n hasattr(self, \"step_detection_method\")\n and callable(self.step_detection_method)\n ):\n raise NotImplementedError(\n \"The step detection method has not been defined correctly as \"\n \"a callable.\"\n )\n steps = []\n walk_bouts = bouts[bouts[\"detected_walking\"]]\n for _, bout in walk_bouts.iterrows():\n bout_data_sets = [\n data_set[bout[\"start_time\"] : bout[\"end_time\"]]\n for data_set in data_sets\n ]\n bout_steps = self.step_detection_method(*bout_data_sets)\n bout_steps[\"bout_id\"] = bout[\"bout_id\"]\n steps.append(bout_steps)\n if len(steps) > 0:\n return pd.concat(steps).sort_index()\n return pd.DataFrame(columns=[\"foot\", \"event\", \"bout_id\"])" }, { "identifier": "DetectStepsWithoutBoutsBase", "path": "dispel/providers/generic/tasks/gait/core.py", "snippet": "class DetectStepsWithoutBoutsBase(TransformStep, metaclass=ABCMeta):\n \"\"\"Generic detect steps transform without walking bouts.\"\"\"\n\n definitions = [DEF_FOOT, DEF_EVENT]" }, { "identifier": "ExtractPowerBoutDivSteps", "path": "dispel/providers/generic/tasks/gait/core.py", "snippet": "class ExtractPowerBoutDivSteps(GaitBoutExtractStep):\n \"\"\"Extract step to compute the Step Power.\n\n Parameters\n ----------\n data_set_ids\n The data set ids corresponding to first the walking bouts\n then the signal to take as the magnitude of acceleration and\n the step dataset.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine\n the levels for extraction.\n \"\"\"\n\n def __init__(self, data_set_ids: str, **kwargs):\n step_detector = get_step_detector_from_data_set_ids(data_set_ids, 2)\n\n def _function(acc_magnitude, step_dataset, level):\n return power_bout_div_steps(\n acc_magnitude[\"vertical_acc\"],\n step_dataset,\n level.get_raw_data_set(\"movement_bouts\").data,\n )\n\n description = (\n \"The integral of the centered acceleration magnitude \"\n \"between the first and last step divided by the \"\n f\"number of steps computed with {step_detector} \"\n \"algorithm and the bout strategy {bout_strategy_repr}.\"\n )\n\n definition = MeasureValueDefinitionPrototype(\n measure_name=AV(\"step power\", \"sp\"),\n data_type=\"int16\",\n validator=GREATER_THAN_ZERO,\n description=description,\n )\n super().__init__(data_set_ids, _function, definition, **kwargs)" }, { "identifier": "ExtractStepCount", "path": "dispel/providers/generic/tasks/gait/core.py", "snippet": "class ExtractStepCount(GaitBoutExtractStep):\n \"\"\"Extract step count.\n\n Parameters\n ----------\n data_set_ids\n A list of two elements with the id of the walking bout data set and\n the id of the step_dataset containing the detected steps formatted as\n the output of TransformStepDetection.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine\n the levels for extraction.\n \"\"\"\n\n definition = MeasureValueDefinitionPrototype(\n measure_name=AV(\"step count\", \"sc\"),\n data_type=\"uint16\",\n validator=GREATER_THAN_ZERO,\n description=\"The number of steps detected with {step_detector} \"\n \"algorithm with the bout strategy {bout_strategy_repr}.\",\n )\n\n transform_function = step_count\n\n def get_definition(self, **kwargs) -> ValueDefinition:\n \"\"\"Get the measure definition.\"\"\"\n step_detector = get_step_detector_from_data_set_ids(\n next(iter(self.get_data_set_ids())), 1\n )\n kwargs[\"step_detector\"] = step_detector\n return super().get_definition(**kwargs)" }, { "identifier": "ExtractStepDurationAll", "path": "dispel/providers/generic/tasks/gait/core.py", "snippet": "class ExtractStepDurationAll(GaitBoutAggregateStep):\n \"\"\"Extract step duration related measures.\n\n Parameters\n ----------\n data_set_ids\n The data set ids that will be considered to extract step duration\n measures.\n \"\"\"\n\n def __init__(self, data_set_ids: List[str], **kwargs):\n step_detector = get_step_detector_from_data_set_ids(data_set_ids, 1)\n description = (\n \"The {aggregation} time of a step detected with the \"\n f\"{step_detector} algorithm. It is computed with the bout \"\n \"strategy {bout_strategy_repr}.\"\n )\n\n definition = MeasureValueDefinitionPrototype(\n measure_name=AV(\"step duration\", \"step_dur\"),\n data_type=\"float64\",\n unit=\"s\",\n validator=GREATER_THAN_ZERO,\n description=description,\n )\n\n super().__init__(\n data_set_ids, \"step_duration\", DEFAULT_AGGREGATIONS, definition, **kwargs\n )" }, { "identifier": "FootUsed", "path": "dispel/providers/generic/tasks/gait/core.py", "snippet": "class FootUsed(IntEnum):\n \"\"\"Information on the foot being used for step annotation.\"\"\"\n\n LEFT = 1\n RIGHT = 2\n UNKNOWN = 0" }, { "identifier": "StepEvent", "path": "dispel/providers/generic/tasks/gait/core.py", "snippet": "class StepEvent(IntEnum):\n \"\"\"Generic events for step annotation.\"\"\"\n\n UNKNOWN = 0\n INITIAL_CONTACT = 1\n MID_SWING = 2\n FOOT_OFF = 3" }, { "identifier": "power_bout_div_steps", "path": "dispel/providers/generic/tasks/gait/core.py", "snippet": "def power_bout_div_steps(\n acc_magnitude: pd.Series,\n step_dataset: pd.DataFrame,\n walking_bouts: Optional[pd.DataFrame] = None,\n) -> float:\n \"\"\"Compute the Step Power based on bout power divided by number of steps.\n\n The step power is defined as the integral of centered magnitude\n acceleration divided by the number of steps. For more information\n please see the section 2.3 of [1]_.\n\n Parameters\n ----------\n acc_magnitude\n A pandas series containing the magnitude of the acceleration.\n step_dataset\n A pandas data frame containing the detected steps formatted as the\n output of TransformStepDetection.\n walking_bouts\n A dataframe with the concatenated walking bouts.\n\n Returns\n -------\n float\n The step power.\n\n References\n ----------\n .. [1] Cheng WY. et al. (2018) Large-Scale Continuous Mobility Monitoring\n of Parkinson’s Disease Patients Using Smartphones.\n \"\"\"\n\n def _bout_power(start: int, end: int, magnitude: pd.Series):\n \"\"\"Compute the power of a walking bout.\"\"\"\n abs_acc = magnitude[start:end].dropna().abs()\n return compute_step_power_single(abs_acc)\n\n step_dataset = step_dataset[step_dataset.event == StepEvent.INITIAL_CONTACT]\n if walking_bouts is None:\n t_start = step_dataset.index[0]\n t_end = step_dataset.index[-1]\n return _bout_power(t_start, t_end, acc_magnitude) / step_count(step_dataset)\n # keep true walking bout only\n walk_bouts = walking_bouts[walking_bouts[\"detected_walking\"]]\n\n # Initialize the walking power to zero\n # The step power over several walking bout is defined as the sum\n # of the power of each walking bout divided by the total number of\n # steps, it is different from the sum of the step power for each bouts.\n walking_power = 0\n step_counts = 0\n for bout_it in range(len(walk_bouts)):\n t_start = walk_bouts.iloc[bout_it][\"start_time\"]\n t_end = walk_bouts.iloc[bout_it][\"end_time\"]\n walking_power += _bout_power(t_start, t_end, acc_magnitude)\n step_counts += step_count(step_dataset[t_start:t_end])\n if step_counts == 0:\n return 0\n return walking_power / step_counts" }, { "identifier": "step_count", "path": "dispel/providers/generic/tasks/gait/core.py", "snippet": "def step_count(data: pd.DataFrame) -> int:\n \"\"\"Count the number of steps.\n\n Parameters\n ----------\n data\n A pandas data frame containing the detected steps.\n\n Returns\n -------\n int\n The number of steps.\n \"\"\"\n return (data[\"event\"] == StepEvent.INITIAL_CONTACT).sum()" }, { "identifier": "ExtractHipRotation", "path": "dispel/providers/generic/tasks/gait/hip.py", "snippet": "class ExtractHipRotation(CoreProcessingStepGroup):\n \"\"\"Group processing steps for Hip Rotation measures.\n\n Parameters\n ----------\n step_detection_id\n The raw data set that contains the step detection generic data set.\n e.g.: ``lee``.\n on_walking_bouts\n A boolean indicating if the computation should be made on walking\n bouts only.\n kwargs\n Additional named arguments are passed to the\n :meth:`~dispel.processing.core.ProcessingStep.process` function of each\n step.\n \"\"\"\n\n def __init__(self, data_set_id: str, **kwargs):\n steps: List[ProcessingStep] = []\n for hip_sign in HIP_SIGNS:\n steps.extend([AggHipRotation(data_set_id, hip_sign, **kwargs)])\n super().__init__(steps, **kwargs)" }, { "identifier": "ExtractHipRotationWithoutBouts", "path": "dispel/providers/generic/tasks/gait/hip.py", "snippet": "class ExtractHipRotationWithoutBouts(CoreProcessingStepGroup):\n \"\"\"Group processing steps for Hip Rotation measures without walking bouts.\n\n Parameters\n ----------\n step_detection_id\n The raw data set that contains the step detection generic data set.\n e.g.: ``lee``.\n on_walking_bouts\n A boolean indicating if the computation should be made on walking\n bouts only.\n kwargs\n Additional named arguments are passed to the\n :meth:`~dispel.processing.core.ProcessingStep.process` function of each\n step.\n \"\"\"\n\n def __init__(self, data_set_id: str, **kwargs):\n steps: List[ProcessingStep] = []\n for hip_sign in HIP_SIGNS:\n steps.extend([AggHipRotationWithoutBout(data_set_id, hip_sign, **kwargs)])\n super().__init__(steps, **kwargs)" }, { "identifier": "HipRotationGroup", "path": "dispel/providers/generic/tasks/gait/hip.py", "snippet": "class HipRotationGroup(CoreProcessingStepGroup):\n \"\"\"Group processing steps for Hip Rotation transforms.\n\n Parameters\n ----------\n step_detection_id\n The raw data set that contains the step detection generic data set.\n e.g.: ``lee``.\n on_walking_bouts\n A boolean indicating if the computation should be made on walking\n bouts only.\n kwargs\n Additional named arguments are passed to the\n :meth:`~dispel.processing.core.ProcessingStep.process` function of each\n step.\n \"\"\"\n\n def __init__(self, step_detection_id: str, on_walking_bouts: bool, **kwargs):\n steps: List[ProcessingStep] = [\n # Transform Hip Rotation with lee step detection\n TransformHipRotation(\n rotation_speed_id=\"gyroscope_ts_rotated_resampled_\"\n \"butterworth_low_pass_filter\",\n step_detection_id=step_detection_id,\n component=\"x\",\n on_walking_bouts=on_walking_bouts,\n )\n ]\n for hip_sign in HIP_SIGNS:\n steps.extend(\n [\n SignHipRotation(\n step_detection_id=step_detection_id,\n hip_rotation_sign=hip_sign,\n on_walking_bouts=on_walking_bouts,\n )\n ]\n )\n\n super().__init__(steps, **kwargs)" }, { "identifier": "index_time_diff", "path": "dispel/signal/core.py", "snippet": "def index_time_diff(data: Union[pd.Series, pd.DataFrame]) -> pd.Series:\n \"\"\"Get the time difference from the index in seconds.\n\n Parameters\n ----------\n data\n The series or data frame with a time-based index\n\n Returns\n -------\n pandas.Series\n A series containing the time difference in seconds between each row based on the\n index.\n \"\"\"\n assert isinstance(\n data.index, (pd.DatetimeIndex, pd.TimedeltaIndex)\n ), \"Index must be a pandas DatetimeIndex or TimedeltaIndex\"\n\n return data.index.to_series().diff().dt.total_seconds()" } ]
import enum import pandas as pd from typing import List, Optional, Tuple from dispel.data.measures import MeasureValueDefinitionPrototype from dispel.data.validators import GREATER_THAN_ZERO from dispel.data.values import AbbreviatedValue as AV from dispel.processing.core import ProcessingStep from dispel.processing.extract import ( DEFAULT_AGGREGATIONS, AggregateRawDataSetColumn, ExtractStep, ) from dispel.processing.level import ProcessingStepGroup from dispel.providers.generic.tasks.gait.bout_strategy import BoutStrategyModality from dispel.providers.generic.tasks.gait.core import ( DetectStepsProcessingBase, DetectStepsWithoutBoutsBase, ExtractPowerBoutDivSteps, ExtractStepCount, ExtractStepDurationAll, FootUsed, StepEvent, power_bout_div_steps, step_count, ) from dispel.providers.generic.tasks.gait.hip import ( ExtractHipRotation, ExtractHipRotationWithoutBouts, HipRotationGroup, ) from dispel.signal.core import index_time_diff
11,440
t_thresh=peak_threshold, acc_threshold=acc_peak, further=False, ): # (3) _set_state(it_peak, StepState.INTMD) _set_state(it_n, StepState.PEAK) last_state = StepState.PEAK it_peak = it_n acc_peak, peak_threshold = _update_peak_valley( res, StepState.PEAK, it_n, BETA, M_SIGMA ) # This should only be triggered once at the initialization of the # algorithm, when it is 1. elif last_state == StepState.INITIAL: # (1) _set_state(it_n, StepState.PEAK) last_state = StepState.PEAK it_peak = it_n acc_peak, peak_threshold = _update_peak_valley( res, StepState.PEAK, it_n, BETA, M_SIGMA ) elif candidate_state == StepState.VALLEY: if _check_state( data=res["vertical_acc"], last_state=last_state, expected_state=StepState.PEAK, index_s=it_valley, index_c=it_n, t_thresh=valley_threshold, ): # (4) _set_state(it_n, StepState.VALLEY) last_state = StepState.VALLEY it_valley = it_n acc_valley, valley_threshold = _update_peak_valley( res, StepState.VALLEY, it_n, BETA, M_SIGMA ) step_index += 1 mu_a = (acc_peak + acc_valley) / 2 elif _check_state( data=res["vertical_acc"], last_state=last_state, expected_state=StepState.VALLEY, index_s=it_valley, index_c=it_n, t_thresh=valley_threshold, acc_threshold=acc_valley, greater=False, further=False, ): # (5) _set_state(it_valley, StepState.INTMD) _set_state(it_n, StepState.VALLEY) last_state = StepState.VALLEY it_valley = it_n acc_valley, valley_threshold = _update_peak_valley( res, StepState.VALLEY, it_n, BETA, M_SIGMA ) # Update sigma sigma_a = _update_sigma(res.iloc[max(0, it_n - K_SIGMA) : it_n]["vertical_acc"]) res.loc[res.index[it_n], "step_index"] = step_index return res def detect_steps(data: pd.DataFrame) -> pd.DataFrame: """Run step Detection Algorithm from Lee et al. and format the results. We use a revisited Lee et al. algorithm since we don't perform step detection on the acceleration norm but on the vertical acceleration. The results are formatted to return a generic data frame with the following columns: ``timestamp``, ``event``, ``foot``. Where ``event`` annotate what is happening as in Bourke et al. doi:10.3390/s20205906. Parameters ---------- data A data frame containing a column 'vertical_acc' referring to the vertical acceleration. Returns ------- pandas.DataFrame A pandas data frame with columns ``event``, ``foot`` and ``timestamp``. """ detected_steps = _detect_steps(data["vertical_acc"]) timestamp = detected_steps.index[detected_steps["state"] == StepState.PEAK] return pd.DataFrame( { "event": StepEvent.INITIAL_CONTACT, "foot": FootUsed.UNKNOWN, "timestamp": timestamp, } ).set_index(keys="timestamp") class LeeDetectSteps(DetectStepsProcessingBase): """Detect steps using Lee et al. algorithm on vertical acceleration.""" new_data_set_id = "lee_with_walking_bouts" @staticmethod def step_detection_method(data: pd.DataFrame) -> pd.DataFrame: """Define and declare the step detection as a static method.""" return detect_steps(data) class LeeDetectStepsWithoutBout(DetectStepsWithoutBoutsBase): """Detect steps using Lee et al. algorithm on vertical acceleration.""" data_set_ids = "vertical_acceleration" new_data_set_id = "lee" transform_function = detect_steps class LeeStepCountWithoutBout(ExtractStep): """Extract step count with lee dataset without walking bouts.""" def __init__(self, **kwargs): data_set_ids = "lee"
"""Step detection module specific to Lee et al. algorithm. This module contains functionality to perform step detection with a revisited version of the Lee et al. algorithm. """ class StepState(enum.IntEnum): """Step detection states for Lee et al. algorithm.""" PEAK = 2 VALLEY = 1 INTMD = 0 INITIAL = -1 LEE_MOD = AV("Lee algorithm", "lee") r"""A modality indicating something has been computed with Lee algorithm.""" # MODEL CONSTANTS K_SIGMA = 25 r"""Parameter K_SIGMA should be selected such that the step deviation can reflect the long-term variation in the statistics of the vertical acceleration. The value of 25 is assigned to K to cover one step cycle in normal walking speed with the sampling rate of 50 Hz.""" M_SIGMA = 10 r"""Parameter M should be selected with :math:`\beta` such that the statistics of peak or valley intervals can reflect the time-varying speed of walking or running and the noisy peaks or valleys can be delineated from real peaks or valleys.""" ALPHA = 4 r"""Parameter :math:`\alpha` is a magnitude constant that should be assigned so as not to disturb the peak or valley detection due to large step deviation during step mode change, especially from running to walking.""" BETA = 1 / 3 r"""Parameter :math:`\beta` is a time scale constant that should be assigned with M. It is used to rescale (as a denominator) the standard deviation of the last M peak or valley time-intervals when computing the time threshold used to accept or reject any peak or valley candidate.""" # Parameters Initialization (not defined in the paper). DEFAULT_MU = 0.25 r"""Default parameter :math:`\mu` is used to initiate the average time between two consecutive peaks (valleys) for the last M peaks (valleys).""" DEFAULT_SIGMA = 0.0 r"""Default parameter :math:`\sigma` is used to initiate the standard deviation of the time between two consecutive peaks (valleys) for the last M peaks (valleys).""" DEFAULT_SIGMA_A = 0.0 r"""Default parameter :math:`\sigma` is used to initiate the standard deviation of the vertical acceleration for recent K_SIGMA acceleration samples.""" DEFAULT_PEAK_THRESHOLD = 0.025 # the adaptive time threshold for peaks r"""Default parameter peak threshold is used to initialize the adaptive time threshold for peaks. This threshold will be used to accept or reject a peak candidate based on the time-interval separating it from the previous peak in addition to other conditions.""" DEFAULT_VALLEY_THRESHOLD = 0.025 # the adaptive time threshold for valley r"""Default parameter valley threshold is used to initialize the valley threshold. This threshold will be used to accept or reject any valley candidates based on the time interval separating it from the previous valley candidate and other conditions.""" DEFAULT_VALLEY_ACC = 0.0 r"""Default parameter used to initialize the vertical acceleration of a valley.""" DEFAULT_PEAK_ACC = 1.0 r"""Default parameter used to initialize the vertical acceleration of a peak.""" def _detect_candidate( data: pd.Series, index_sample: int, mu_a: float, sigma_a: float, alpha: float ) -> StepState: """Detect peak and valley candidates in the signal. This function labels each sample as valley, peak, or intermediate. The sample is considered a peak if: case 1 it is the first sample or case 2 , if the sample vertical acceleration is greater than the previous and next vertical acceleration and more significant than the average detection step (plus a modulation). Parameters ---------- data A series of the vertical acceleration. index_sample An integer indicating which sample is under examination. mu_a The average of the vertical acceleration of a step. Defined as the mean of the magnitude of the recent peak and recent valley. sigma_a The standard deviation of the vertical acceleration. alpha A constant to modulate the threshold on vertical acceleration used to label a sample. Returns ------- StepState A label indicating if the sample is a good candidate for a peak, a valley or an intermediate sample. """ if index_sample == 1: return StepState.PEAK acc_minus, acc, acc_plus = data.iloc[index_sample - 1 : index_sample + 2] if acc > max(max(acc_minus, acc_plus), mu_a + sigma_a / alpha): return StepState.PEAK if acc < min(min(acc_minus, acc_plus), mu_a - sigma_a / alpha): return StepState.VALLEY return StepState.INTMD def _update_peak_valley( data: pd.DataFrame, new_state: StepState, index_sample: int, beta: float, m_sigma: int, ) -> Tuple[float, float]: """Update a peak or a valley. Parameters ---------- data A data frame of the vertical acceleration and states. new_state Either peak or valley, it indicates if a peak or a valley is to be updated. index_sample An integer indicating which sample is under examination. beta A time scale constant. m_sigma A parameter used to delineate noisy peaks or valley from real peaks or valleys. Returns ------- Tuple[float, float] The vertical acceleration of current sample. And the minimum time distance to the recent peak (or valley). """ peaks_or_valley = data.loc[data["state"] == new_state] t_between = index_time_diff(peaks_or_valley)[1:] if len(t_between) > 1: # enough data sigma = t_between.tail(m_sigma).std() mu_x = t_between.tail(m_sigma).mean() elif len(t_between) == 1: # just enough for the mean mu_x = t_between.tail(m_sigma).mean() sigma = DEFAULT_SIGMA else: # initialization sigma = DEFAULT_SIGMA mu_x = DEFAULT_MU threshold = mu_x - sigma / beta return data.iloc[index_sample]["vertical_acc"], threshold def _update_sigma(data: pd.Series) -> float: """Update sigma. ``sigma_a`` is defined as the standard deviation of the vertical acceleration for recent ``k_sigma`` acceleration samples. Parameters ---------- data A series of the last k_sigma vertical acceleration. Returns ------- float The standard deviation of the vertical acceleration over the last k_sigma samples. """ if len(data) > 1: return data.std() return DEFAULT_SIGMA_A def _check_state( data: pd.Series, last_state: StepState, expected_state: StepState, index_s: int, index_c: int, t_thresh: float, acc_threshold: Optional[float] = None, greater: bool = True, further: bool = True, ) -> bool: """Check conditions on the last_state, time, and vertical acceleration. Parameters ---------- data A series of the vertical acceleration. last_state Either peak or valley, it indicates if a peak or a valley was the last specific state. expected_state Expected value for the last_state. index_s Index of the last state. index_c Index of the current state. t_thresh A threshold on time to remove close peaks/valleys acc_threshold A threshold on the vertical acceleration to potentially replace peaks/valleys. greater A boolean deciding if acc_threshold should be compared with a greater or less than comparator. further A boolean deciding if the time threshold should be compared with a a greater or less than comparator. Returns ------- bool A boolean indicating if the conditions are respected """ # Does the last_state matches the expected state c_1 = last_state == expected_state # Is the current sample far enough from the previous specific state c_2 = (data.index[index_c] - data.index[index_s]).total_seconds() if further: c_2 = c_2 > t_thresh else: c_2 = c_2 <= t_thresh # If the acceleration threshold is provided compare it to the current # sample if acc_threshold: c_3 = data[data.index[index_c]] if greater: c_3 = c_3 > acc_threshold else: c_3 = c_3 <= acc_threshold return c_1 and c_2 and c_3 return c_1 and c_2 def _detect_steps(data: pd.Series) -> pd.DataFrame: """Step Detection Algorithm from Lee et al. 2015. Full reference: Lee, H.-H.; Choi, S.; Lee, M.-J. Step Detection Robust against the Dynamics of Smartphones. Sensors 2015, 15, 27230-27250. Parameters ---------- data A series of the vertical acceleration. Returns ------- pandas.DataFrame A data frame containing the candidate and final state detected, as well as the step_index, a variable keeping track of when a step is detected. """ res = pd.DataFrame( { "vertical_acc": data.copy(), "state": StepState.INTMD, "candidate_state": None, "step_index": 0, } ) # the adaptive time threshold for peaks peak_threshold = DEFAULT_PEAK_THRESHOLD # the adaptive time threshold for valley valley_threshold = DEFAULT_VALLEY_THRESHOLD acc_valley = DEFAULT_VALLEY_ACC acc_peak = DEFAULT_PEAK_ACC # the step average mu_a = res["vertical_acc"].mean() # the step deviation of the vertical acceleration sigma_a = DEFAULT_SIGMA_A # ``last_state`` tracks the last particular state, either peak or valley # and will replace Sn-1 in the algorithm description page 27240. last_state = StepState.INITIAL step_index = 0 it_peak = 0 it_valley = 0 def _set_state(i: int, state: StepState, state_column: str = "state"): """Set the state at a given index.""" res.loc[res.index[i], state_column] = state for it_n in range(1, len(res) - 1): # Determine if the sample is a potential peak or valley candidate candidate_state = _detect_candidate( res["vertical_acc"], it_n, mu_a, sigma_a, ALPHA ) # Save the candidate state _set_state(it_n, candidate_state, "candidate_state") # Initialize the ``state`` to 'intermediate' _set_state(it_n, StepState.INTMD) if candidate_state == StepState.PEAK: if _check_state( data=res["vertical_acc"], last_state=last_state, expected_state=StepState.VALLEY, index_s=it_peak, index_c=it_n, t_thresh=peak_threshold, ): # (2) _set_state(it_n, StepState.PEAK) last_state = StepState.PEAK it_peak = it_n acc_peak, peak_threshold = _update_peak_valley( res, StepState.PEAK, it_n, BETA, M_SIGMA ) mu_a = (acc_peak + acc_valley) / 2 elif _check_state( data=res["vertical_acc"], last_state=last_state, expected_state=StepState.PEAK, index_s=it_peak, index_c=it_n, t_thresh=peak_threshold, acc_threshold=acc_peak, further=False, ): # (3) _set_state(it_peak, StepState.INTMD) _set_state(it_n, StepState.PEAK) last_state = StepState.PEAK it_peak = it_n acc_peak, peak_threshold = _update_peak_valley( res, StepState.PEAK, it_n, BETA, M_SIGMA ) # This should only be triggered once at the initialization of the # algorithm, when it is 1. elif last_state == StepState.INITIAL: # (1) _set_state(it_n, StepState.PEAK) last_state = StepState.PEAK it_peak = it_n acc_peak, peak_threshold = _update_peak_valley( res, StepState.PEAK, it_n, BETA, M_SIGMA ) elif candidate_state == StepState.VALLEY: if _check_state( data=res["vertical_acc"], last_state=last_state, expected_state=StepState.PEAK, index_s=it_valley, index_c=it_n, t_thresh=valley_threshold, ): # (4) _set_state(it_n, StepState.VALLEY) last_state = StepState.VALLEY it_valley = it_n acc_valley, valley_threshold = _update_peak_valley( res, StepState.VALLEY, it_n, BETA, M_SIGMA ) step_index += 1 mu_a = (acc_peak + acc_valley) / 2 elif _check_state( data=res["vertical_acc"], last_state=last_state, expected_state=StepState.VALLEY, index_s=it_valley, index_c=it_n, t_thresh=valley_threshold, acc_threshold=acc_valley, greater=False, further=False, ): # (5) _set_state(it_valley, StepState.INTMD) _set_state(it_n, StepState.VALLEY) last_state = StepState.VALLEY it_valley = it_n acc_valley, valley_threshold = _update_peak_valley( res, StepState.VALLEY, it_n, BETA, M_SIGMA ) # Update sigma sigma_a = _update_sigma(res.iloc[max(0, it_n - K_SIGMA) : it_n]["vertical_acc"]) res.loc[res.index[it_n], "step_index"] = step_index return res def detect_steps(data: pd.DataFrame) -> pd.DataFrame: """Run step Detection Algorithm from Lee et al. and format the results. We use a revisited Lee et al. algorithm since we don't perform step detection on the acceleration norm but on the vertical acceleration. The results are formatted to return a generic data frame with the following columns: ``timestamp``, ``event``, ``foot``. Where ``event`` annotate what is happening as in Bourke et al. doi:10.3390/s20205906. Parameters ---------- data A data frame containing a column 'vertical_acc' referring to the vertical acceleration. Returns ------- pandas.DataFrame A pandas data frame with columns ``event``, ``foot`` and ``timestamp``. """ detected_steps = _detect_steps(data["vertical_acc"]) timestamp = detected_steps.index[detected_steps["state"] == StepState.PEAK] return pd.DataFrame( { "event": StepEvent.INITIAL_CONTACT, "foot": FootUsed.UNKNOWN, "timestamp": timestamp, } ).set_index(keys="timestamp") class LeeDetectSteps(DetectStepsProcessingBase): """Detect steps using Lee et al. algorithm on vertical acceleration.""" new_data_set_id = "lee_with_walking_bouts" @staticmethod def step_detection_method(data: pd.DataFrame) -> pd.DataFrame: """Define and declare the step detection as a static method.""" return detect_steps(data) class LeeDetectStepsWithoutBout(DetectStepsWithoutBoutsBase): """Detect steps using Lee et al. algorithm on vertical acceleration.""" data_set_ids = "vertical_acceleration" new_data_set_id = "lee" transform_function = detect_steps class LeeStepCountWithoutBout(ExtractStep): """Extract step count with lee dataset without walking bouts.""" def __init__(self, **kwargs): data_set_ids = "lee"
definition = MeasureValueDefinitionPrototype(
0
2023-11-14 10:06:46+00:00
16k
shinomakoi/AI-Messenger
main.py
[ { "identifier": "Ui_CharacterForm", "path": "character_window.py", "snippet": "class Ui_CharacterForm(object):\n def setupUi(self, CharacterForm):\n if not CharacterForm.objectName():\n CharacterForm.setObjectName(u\"CharacterForm\")\n CharacterForm.resize(674, 856)\n self.gridLayout = QGridLayout(CharacterForm)\n self.gridLayout.setObjectName(u\"gridLayout\")\n self.saveButton = QPushButton(CharacterForm)\n self.saveButton.setObjectName(u\"saveButton\")\n\n self.gridLayout.addWidget(self.saveButton, 7, 0, 1, 1)\n\n self.charExampleDialog = QPlainTextEdit(CharacterForm)\n self.charExampleDialog.setObjectName(u\"charExampleDialog\")\n\n self.gridLayout.addWidget(self.charExampleDialog, 3, 0, 1, 1)\n\n self.charScenario = QPlainTextEdit(CharacterForm)\n self.charScenario.setObjectName(u\"charScenario\")\n\n self.gridLayout.addWidget(self.charScenario, 2, 0, 1, 1)\n\n self.charName = QLineEdit(CharacterForm)\n self.charName.setObjectName(u\"charName\")\n\n self.gridLayout.addWidget(self.charName, 0, 0, 1, 1)\n\n self.charTags = QLineEdit(CharacterForm)\n self.charTags.setObjectName(u\"charTags\")\n\n self.gridLayout.addWidget(self.charTags, 5, 0, 1, 1)\n\n self.charPersona = QPlainTextEdit(CharacterForm)\n self.charPersona.setObjectName(u\"charPersona\")\n\n self.gridLayout.addWidget(self.charPersona, 1, 0, 1, 1)\n\n self.charTemplate = QLineEdit(CharacterForm)\n self.charTemplate.setObjectName(u\"charTemplate\")\n\n self.gridLayout.addWidget(self.charTemplate, 6, 0, 1, 1)\n\n\n self.retranslateUi(CharacterForm)\n\n QMetaObject.connectSlotsByName(CharacterForm)\n # setupUi\n\n def retranslateUi(self, CharacterForm):\n CharacterForm.setWindowTitle(QCoreApplication.translate(\"CharacterForm\", u\"Character create\", None))\n self.saveButton.setText(QCoreApplication.translate(\"CharacterForm\", u\"Save\", None))\n self.charExampleDialog.setPlaceholderText(QCoreApplication.translate(\"CharacterForm\", u\"Example dialog\", None))\n#if QT_CONFIG(tooltip)\n self.charScenario.setToolTip(QCoreApplication.translate(\"CharacterForm\", u\"Use {{char}} as user name placeholder\", None))\n#endif // QT_CONFIG(tooltip)\n self.charScenario.setPlaceholderText(QCoreApplication.translate(\"CharacterForm\", u\"Scenario\", None))\n#if QT_CONFIG(tooltip)\n self.charName.setToolTip(\"\")\n#endif // QT_CONFIG(tooltip)\n self.charName.setPlaceholderText(QCoreApplication.translate(\"CharacterForm\", u\"Character's name\", None))\n self.charTags.setPlaceholderText(QCoreApplication.translate(\"CharacterForm\", u\"Tags\", None))\n self.charPersona.setPlaceholderText(QCoreApplication.translate(\"CharacterForm\", u\"Character's personality\", None))\n self.charTemplate.setText(\"\")\n self.charTemplate.setPlaceholderText(QCoreApplication.translate(\"CharacterForm\", u\"Turn template (leave empty for default)\", None))\n # retranslateUi" }, { "identifier": "Ui_ChatWindow", "path": "chat_window.py", "snippet": "class Ui_ChatWindow(object):\n def setupUi(self, ChatWindow):\n if not ChatWindow.objectName():\n ChatWindow.setObjectName(u\"ChatWindow\")\n ChatWindow.resize(1582, 1117)\n self.actionSettings = QAction(ChatWindow)\n self.actionSettings.setObjectName(u\"actionSettings\")\n self.actionExit = QAction(ChatWindow)\n self.actionExit.setObjectName(u\"actionExit\")\n self.actionAbout = QAction(ChatWindow)\n self.actionAbout.setObjectName(u\"actionAbout\")\n self.actionSave_settings = QAction(ChatWindow)\n self.actionSave_settings.setObjectName(u\"actionSave_settings\")\n self.actionSave_session = QAction(ChatWindow)\n self.actionSave_session.setObjectName(u\"actionSave_session\")\n self.actionReload_contacts = QAction(ChatWindow)\n self.actionReload_contacts.setObjectName(u\"actionReload_contacts\")\n self.actionCharacter = QAction(ChatWindow)\n self.actionCharacter.setObjectName(u\"actionCharacter\")\n self.actionLoad_session = QAction(ChatWindow)\n self.actionLoad_session.setObjectName(u\"actionLoad_session\")\n self.centralwidget = QWidget(ChatWindow)\n self.centralwidget.setObjectName(u\"centralwidget\")\n self.gridLayout_5 = QGridLayout(self.centralwidget)\n self.gridLayout_5.setObjectName(u\"gridLayout_5\")\n self.retryButton = QPushButton(self.centralwidget)\n self.retryButton.setObjectName(u\"retryButton\")\n self.retryButton.setEnabled(False)\n self.retryButton.setMinimumSize(QSize(64, 64))\n self.retryButton.setMaximumSize(QSize(64, 64))\n\n self.gridLayout_5.addWidget(self.retryButton, 2, 2, 1, 1)\n\n self.splitter = QSplitter(self.centralwidget)\n self.splitter.setObjectName(u\"splitter\")\n self.splitter.setOrientation(Qt.Horizontal)\n self.textTabWidget = QTabWidget(self.splitter)\n self.textTabWidget.setObjectName(u\"textTabWidget\")\n self.chatTab = QWidget()\n self.chatTab.setObjectName(u\"chatTab\")\n self.gridLayout = QGridLayout(self.chatTab)\n self.gridLayout.setObjectName(u\"gridLayout\")\n self.chatTextEdit = QTextEdit(self.chatTab)\n self.chatTextEdit.setObjectName(u\"chatTextEdit\")\n self.chatTextEdit.setReadOnly(True)\n self.chatTextEdit.setAcceptRichText(False)\n\n self.gridLayout.addWidget(self.chatTextEdit, 0, 0, 1, 1)\n\n self.textTabWidget.addTab(self.chatTab, \"\")\n self.notebookTab = QWidget()\n self.notebookTab.setObjectName(u\"notebookTab\")\n self.gridLayout_6 = QGridLayout(self.notebookTab)\n self.gridLayout_6.setObjectName(u\"gridLayout_6\")\n self.notebookTextEdit = QTextEdit(self.notebookTab)\n self.notebookTextEdit.setObjectName(u\"notebookTextEdit\")\n self.notebookTextEdit.setAcceptRichText(False)\n\n self.gridLayout_6.addWidget(self.notebookTextEdit, 0, 0, 1, 1)\n\n self.textTabWidget.addTab(self.notebookTab, \"\")\n self.splitter.addWidget(self.textTabWidget)\n self.rightToolbox = QToolBox(self.splitter)\n self.rightToolbox.setObjectName(u\"rightToolbox\")\n self.rightToolbox.setMaximumSize(QSize(292, 16777215))\n self.page = QWidget()\n self.page.setObjectName(u\"page\")\n self.gridLayout_9 = QGridLayout(self.page)\n self.gridLayout_9.setObjectName(u\"gridLayout_9\")\n self.contactsTree = QTreeWidget(self.page)\n self.contactsTree.setObjectName(u\"contactsTree\")\n self.contactsTree.setMaximumSize(QSize(256, 16777215))\n self.contactsTree.setEditTriggers(QAbstractItemView.NoEditTriggers)\n\n self.gridLayout_9.addWidget(self.contactsTree, 0, 0, 1, 1)\n\n self.rightToolbox.addItem(self.page, u\"Chat presets\")\n self.paramsBasicPage = QWidget()\n self.paramsBasicPage.setObjectName(u\"paramsBasicPage\")\n self.paramsBasicPage.setGeometry(QRect(0, 0, 292, 760))\n self.gridLayout_2 = QGridLayout(self.paramsBasicPage)\n self.gridLayout_2.setObjectName(u\"gridLayout_2\")\n self.label_5 = QLabel(self.paramsBasicPage)\n self.label_5.setObjectName(u\"label_5\")\n\n self.gridLayout_2.addWidget(self.label_5, 4, 0, 1, 1)\n\n self.label_8 = QLabel(self.paramsBasicPage)\n self.label_8.setObjectName(u\"label_8\")\n\n self.gridLayout_2.addWidget(self.label_8, 12, 0, 1, 2)\n\n self.label_11 = QLabel(self.paramsBasicPage)\n self.label_11.setObjectName(u\"label_11\")\n\n self.gridLayout_2.addWidget(self.label_11, 8, 0, 1, 1)\n\n self.verticalSpacer = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)\n\n self.gridLayout_2.addItem(self.verticalSpacer, 14, 0, 1, 3)\n\n self.top_pSpin = QDoubleSpinBox(self.paramsBasicPage)\n self.top_pSpin.setObjectName(u\"top_pSpin\")\n self.top_pSpin.setMaximum(1.000000000000000)\n self.top_pSpin.setSingleStep(0.010000000000000)\n self.top_pSpin.setValue(0.900000000000000)\n\n self.gridLayout_2.addWidget(self.top_pSpin, 7, 2, 1, 1)\n\n self.label_15 = QLabel(self.paramsBasicPage)\n self.label_15.setObjectName(u\"label_15\")\n\n self.gridLayout_2.addWidget(self.label_15, 2, 0, 1, 1)\n\n self.top_pSlider = QSlider(self.paramsBasicPage)\n self.top_pSlider.setObjectName(u\"top_pSlider\")\n self.top_pSlider.setMaximum(100)\n self.top_pSlider.setValue(90)\n self.top_pSlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_2.addWidget(self.top_pSlider, 7, 0, 1, 2)\n\n self.max_new_tokensSpin = QSpinBox(self.paramsBasicPage)\n self.max_new_tokensSpin.setObjectName(u\"max_new_tokensSpin\")\n self.max_new_tokensSpin.setMinimum(-2)\n self.max_new_tokensSpin.setMaximum(4096)\n self.max_new_tokensSpin.setSingleStep(64)\n self.max_new_tokensSpin.setValue(512)\n\n self.gridLayout_2.addWidget(self.max_new_tokensSpin, 9, 2, 1, 1)\n\n self.label_17 = QLabel(self.paramsBasicPage)\n self.label_17.setObjectName(u\"label_17\")\n\n self.gridLayout_2.addWidget(self.label_17, 6, 0, 1, 1)\n\n self.temperatureSlider = QSlider(self.paramsBasicPage)\n self.temperatureSlider.setObjectName(u\"temperatureSlider\")\n self.temperatureSlider.setMaximum(400)\n self.temperatureSlider.setValue(70)\n self.temperatureSlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_2.addWidget(self.temperatureSlider, 3, 0, 1, 2)\n\n self.max_new_tokensSlider = QSlider(self.paramsBasicPage)\n self.max_new_tokensSlider.setObjectName(u\"max_new_tokensSlider\")\n self.max_new_tokensSlider.setMinimum(-2)\n self.max_new_tokensSlider.setMaximum(4096)\n self.max_new_tokensSlider.setPageStep(32)\n self.max_new_tokensSlider.setValue(512)\n self.max_new_tokensSlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_2.addWidget(self.max_new_tokensSlider, 9, 0, 1, 2)\n\n self.typical_pSlider = QSlider(self.paramsBasicPage)\n self.typical_pSlider.setObjectName(u\"typical_pSlider\")\n self.typical_pSlider.setMinimum(1)\n self.typical_pSlider.setMaximum(100)\n self.typical_pSlider.setValue(100)\n self.typical_pSlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_2.addWidget(self.typical_pSlider, 13, 0, 1, 1)\n\n self.repetition_penaltySpin = QDoubleSpinBox(self.paramsBasicPage)\n self.repetition_penaltySpin.setObjectName(u\"repetition_penaltySpin\")\n self.repetition_penaltySpin.setMinimum(1.000000000000000)\n self.repetition_penaltySpin.setMaximum(1.800000000000000)\n self.repetition_penaltySpin.setSingleStep(0.010000000000000)\n self.repetition_penaltySpin.setValue(1.120000000000000)\n\n self.gridLayout_2.addWidget(self.repetition_penaltySpin, 11, 2, 1, 1)\n\n self.temperatureSpin = QDoubleSpinBox(self.paramsBasicPage)\n self.temperatureSpin.setObjectName(u\"temperatureSpin\")\n self.temperatureSpin.setMaximum(4.000000000000000)\n self.temperatureSpin.setSingleStep(0.010000000000000)\n self.temperatureSpin.setValue(0.700000000000000)\n\n self.gridLayout_2.addWidget(self.temperatureSpin, 3, 2, 1, 1)\n\n self.paramPresets_comboBox = QComboBox(self.paramsBasicPage)\n self.paramPresets_comboBox.setObjectName(u\"paramPresets_comboBox\")\n self.paramPresets_comboBox.setInsertPolicy(QComboBox.InsertAlphabetically)\n\n self.gridLayout_2.addWidget(self.paramPresets_comboBox, 1, 0, 1, 3)\n\n self.repetition_penaltySlider = QSlider(self.paramsBasicPage)\n self.repetition_penaltySlider.setObjectName(u\"repetition_penaltySlider\")\n self.repetition_penaltySlider.setMinimum(100)\n self.repetition_penaltySlider.setMaximum(180)\n self.repetition_penaltySlider.setValue(120)\n self.repetition_penaltySlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_2.addWidget(self.repetition_penaltySlider, 11, 0, 1, 2)\n\n self.label_2 = QLabel(self.paramsBasicPage)\n self.label_2.setObjectName(u\"label_2\")\n\n self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 1)\n\n self.typical_pSpin = QDoubleSpinBox(self.paramsBasicPage)\n self.typical_pSpin.setObjectName(u\"typical_pSpin\")\n self.typical_pSpin.setMinimum(0.010000000000000)\n self.typical_pSpin.setMaximum(1.000000000000000)\n self.typical_pSpin.setSingleStep(0.010000000000000)\n self.typical_pSpin.setValue(1.000000000000000)\n\n self.gridLayout_2.addWidget(self.typical_pSpin, 13, 2, 1, 1)\n\n self.top_kSlider = QSlider(self.paramsBasicPage)\n self.top_kSlider.setObjectName(u\"top_kSlider\")\n self.top_kSlider.setMaximum(200)\n self.top_kSlider.setValue(20)\n self.top_kSlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_2.addWidget(self.top_kSlider, 5, 0, 1, 2)\n\n self.top_kSpin = QSpinBox(self.paramsBasicPage)\n self.top_kSpin.setObjectName(u\"top_kSpin\")\n self.top_kSpin.setMaximum(200)\n self.top_kSpin.setValue(20)\n\n self.gridLayout_2.addWidget(self.top_kSpin, 5, 2, 1, 1)\n\n self.label = QLabel(self.paramsBasicPage)\n self.label.setObjectName(u\"label\")\n\n self.gridLayout_2.addWidget(self.label, 10, 0, 1, 2)\n\n self.rightToolbox.addItem(self.paramsBasicPage, u\"Params - Shared\")\n self.paramAdvPage = QWidget()\n self.paramAdvPage.setObjectName(u\"paramAdvPage\")\n self.paramAdvPage.setGeometry(QRect(0, 0, 292, 760))\n self.gridLayout_4 = QGridLayout(self.paramAdvPage)\n self.gridLayout_4.setObjectName(u\"gridLayout_4\")\n self.tfszSlider = QSlider(self.paramAdvPage)\n self.tfszSlider.setObjectName(u\"tfszSlider\")\n self.tfszSlider.setMinimum(1)\n self.tfszSlider.setMaximum(100)\n self.tfszSlider.setValue(100)\n self.tfszSlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_4.addWidget(self.tfszSlider, 10, 0, 1, 1)\n\n self.label_9 = QLabel(self.paramAdvPage)\n self.label_9.setObjectName(u\"label_9\")\n\n self.gridLayout_4.addWidget(self.label_9, 16, 0, 1, 1)\n\n self.label_4 = QLabel(self.paramAdvPage)\n self.label_4.setObjectName(u\"label_4\")\n\n self.gridLayout_4.addWidget(self.label_4, 5, 0, 1, 1)\n\n self.tfszSpin = QDoubleSpinBox(self.paramAdvPage)\n self.tfszSpin.setObjectName(u\"tfszSpin\")\n self.tfszSpin.setMinimum(0.010000000000000)\n self.tfszSpin.setMaximum(1.000000000000000)\n self.tfszSpin.setSingleStep(0.010000000000000)\n self.tfszSpin.setValue(1.000000000000000)\n\n self.gridLayout_4.addWidget(self.tfszSpin, 10, 1, 1, 1)\n\n self.keepLastNSlider = QSlider(self.paramAdvPage)\n self.keepLastNSlider.setObjectName(u\"keepLastNSlider\")\n self.keepLastNSlider.setMinimum(-1)\n self.keepLastNSlider.setMaximum(8192)\n self.keepLastNSlider.setValue(2048)\n self.keepLastNSlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_4.addWidget(self.keepLastNSlider, 3, 0, 1, 1)\n\n self.label_20 = QLabel(self.paramAdvPage)\n self.label_20.setObjectName(u\"label_20\")\n\n self.gridLayout_4.addWidget(self.label_20, 2, 0, 1, 1)\n\n self.presencePenaltySpin = QDoubleSpinBox(self.paramAdvPage)\n self.presencePenaltySpin.setObjectName(u\"presencePenaltySpin\")\n self.presencePenaltySpin.setMaximum(3.000000000000000)\n self.presencePenaltySpin.setSingleStep(0.010000000000000)\n\n self.gridLayout_4.addWidget(self.presencePenaltySpin, 8, 1, 1, 1)\n\n self.label_3 = QLabel(self.paramAdvPage)\n self.label_3.setObjectName(u\"label_3\")\n\n self.gridLayout_4.addWidget(self.label_3, 15, 0, 1, 1)\n\n self.keepLastNSpin = QSpinBox(self.paramAdvPage)\n self.keepLastNSpin.setObjectName(u\"keepLastNSpin\")\n self.keepLastNSpin.setMinimum(-1)\n self.keepLastNSpin.setMaximum(8192)\n self.keepLastNSpin.setValue(2048)\n\n self.gridLayout_4.addWidget(self.keepLastNSpin, 3, 1, 1, 1)\n\n self.mirostatEta = QSpinBox(self.paramAdvPage)\n self.mirostatEta.setObjectName(u\"mirostatEta\")\n self.mirostatEta.setMaximum(2)\n\n self.gridLayout_4.addWidget(self.mirostatEta, 14, 1, 1, 1)\n\n self.label_31 = QLabel(self.paramAdvPage)\n self.label_31.setObjectName(u\"label_31\")\n\n self.gridLayout_4.addWidget(self.label_31, 9, 0, 1, 1)\n\n self.line_2 = QFrame(self.paramAdvPage)\n self.line_2.setObjectName(u\"line_2\")\n self.line_2.setFrameShape(QFrame.HLine)\n self.line_2.setFrameShadow(QFrame.Sunken)\n\n self.gridLayout_4.addWidget(self.line_2, 13, 0, 1, 1)\n\n self.label_14 = QLabel(self.paramAdvPage)\n self.label_14.setObjectName(u\"label_14\")\n\n self.gridLayout_4.addWidget(self.label_14, 14, 0, 1, 1)\n\n self.label_7 = QLabel(self.paramAdvPage)\n self.label_7.setObjectName(u\"label_7\")\n\n self.gridLayout_4.addWidget(self.label_7, 7, 0, 1, 1)\n\n self.repeatLastSlider = QSlider(self.paramAdvPage)\n self.repeatLastSlider.setObjectName(u\"repeatLastSlider\")\n self.repeatLastSlider.setMinimum(-1)\n self.repeatLastSlider.setMaximum(2048)\n self.repeatLastSlider.setValue(256)\n self.repeatLastSlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_4.addWidget(self.repeatLastSlider, 1, 0, 1, 1)\n\n self.minPSlider = QSlider(self.paramAdvPage)\n self.minPSlider.setObjectName(u\"minPSlider\")\n self.minPSlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_4.addWidget(self.minPSlider, 12, 0, 1, 1)\n\n self.seedSpin = QSpinBox(self.paramAdvPage)\n self.seedSpin.setObjectName(u\"seedSpin\")\n self.seedSpin.setMinimum(-1)\n self.seedSpin.setMaximum(999999999)\n self.seedSpin.setValue(-1)\n\n self.gridLayout_4.addWidget(self.seedSpin, 19, 0, 1, 1)\n\n self.line = QFrame(self.paramAdvPage)\n self.line.setObjectName(u\"line\")\n self.line.setFrameShape(QFrame.HLine)\n self.line.setFrameShadow(QFrame.Sunken)\n\n self.gridLayout_4.addWidget(self.line, 4, 0, 1, 1)\n\n self.freqPenaltySpin = QDoubleSpinBox(self.paramAdvPage)\n self.freqPenaltySpin.setObjectName(u\"freqPenaltySpin\")\n self.freqPenaltySpin.setMaximum(3.000000000000000)\n self.freqPenaltySpin.setSingleStep(0.010000000000000)\n\n self.gridLayout_4.addWidget(self.freqPenaltySpin, 6, 1, 1, 1)\n\n self.presencePenaltySlider = QSlider(self.paramAdvPage)\n self.presencePenaltySlider.setObjectName(u\"presencePenaltySlider\")\n self.presencePenaltySlider.setMaximum(300)\n self.presencePenaltySlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_4.addWidget(self.presencePenaltySlider, 8, 0, 1, 1)\n\n self.label_6 = QLabel(self.paramAdvPage)\n self.label_6.setObjectName(u\"label_6\")\n\n self.gridLayout_4.addWidget(self.label_6, 11, 0, 1, 1)\n\n self.label_19 = QLabel(self.paramAdvPage)\n self.label_19.setObjectName(u\"label_19\")\n\n self.gridLayout_4.addWidget(self.label_19, 0, 0, 1, 1)\n\n self.mirostatTau = QSpinBox(self.paramAdvPage)\n self.mirostatTau.setObjectName(u\"mirostatTau\")\n self.mirostatTau.setMinimum(2)\n self.mirostatTau.setMaximum(12)\n self.mirostatTau.setValue(5)\n\n self.gridLayout_4.addWidget(self.mirostatTau, 15, 1, 1, 1)\n\n self.mirostatMode = QDoubleSpinBox(self.paramAdvPage)\n self.mirostatMode.setObjectName(u\"mirostatMode\")\n self.mirostatMode.setMaximum(2.000000000000000)\n self.mirostatMode.setSingleStep(0.010000000000000)\n self.mirostatMode.setValue(0.100000000000000)\n\n self.gridLayout_4.addWidget(self.mirostatMode, 16, 1, 1, 1)\n\n self.verticalSpacer_3 = QSpacerItem(20, 718, QSizePolicy.Minimum, QSizePolicy.Expanding)\n\n self.gridLayout_4.addItem(self.verticalSpacer_3, 20, 0, 1, 2)\n\n self.repeatLastSpin = QSpinBox(self.paramAdvPage)\n self.repeatLastSpin.setObjectName(u\"repeatLastSpin\")\n self.repeatLastSpin.setMinimum(-1)\n self.repeatLastSpin.setMaximum(2048)\n self.repeatLastSpin.setValue(256)\n\n self.gridLayout_4.addWidget(self.repeatLastSpin, 1, 1, 1, 1)\n\n self.minpSpin = QDoubleSpinBox(self.paramAdvPage)\n self.minpSpin.setObjectName(u\"minpSpin\")\n self.minpSpin.setMaximum(1.000000000000000)\n self.minpSpin.setSingleStep(0.010000000000000)\n self.minpSpin.setValue(0.100000000000000)\n\n self.gridLayout_4.addWidget(self.minpSpin, 12, 1, 1, 1)\n\n self.label_21 = QLabel(self.paramAdvPage)\n self.label_21.setObjectName(u\"label_21\")\n\n self.gridLayout_4.addWidget(self.label_21, 18, 0, 1, 1)\n\n self.freqPenaltySlider = QSlider(self.paramAdvPage)\n self.freqPenaltySlider.setObjectName(u\"freqPenaltySlider\")\n self.freqPenaltySlider.setMaximum(300)\n self.freqPenaltySlider.setValue(0)\n self.freqPenaltySlider.setOrientation(Qt.Horizontal)\n\n self.gridLayout_4.addWidget(self.freqPenaltySlider, 6, 0, 1, 1)\n\n self.line_3 = QFrame(self.paramAdvPage)\n self.line_3.setObjectName(u\"line_3\")\n self.line_3.setFrameShape(QFrame.HLine)\n self.line_3.setFrameShadow(QFrame.Sunken)\n\n self.gridLayout_4.addWidget(self.line_3, 17, 0, 1, 1)\n\n self.rightToolbox.addItem(self.paramAdvPage, u\"Params - More\")\n self.preferencesPage = QWidget()\n self.preferencesPage.setObjectName(u\"preferencesPage\")\n self.preferencesPage.setGeometry(QRect(0, 0, 292, 760))\n self.gridLayout_3 = QGridLayout(self.preferencesPage)\n self.gridLayout_3.setObjectName(u\"gridLayout_3\")\n self.customSysPromptCheck = QCheckBox(self.preferencesPage)\n self.customSysPromptCheck.setObjectName(u\"customSysPromptCheck\")\n\n self.gridLayout_3.addWidget(self.customSysPromptCheck, 9, 0, 1, 1)\n\n self.botnameLine = QLineEdit(self.preferencesPage)\n self.botnameLine.setObjectName(u\"botnameLine\")\n\n self.gridLayout_3.addWidget(self.botnameLine, 6, 0, 1, 1)\n\n self.usernameLine = QLineEdit(self.preferencesPage)\n self.usernameLine.setObjectName(u\"usernameLine\")\n self.usernameLine.setFrame(True)\n self.usernameLine.setClearButtonEnabled(False)\n\n self.gridLayout_3.addWidget(self.usernameLine, 4, 0, 1, 1)\n\n self.customSysPromptText = QPlainTextEdit(self.preferencesPage)\n self.customSysPromptText.setObjectName(u\"customSysPromptText\")\n sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.customSysPromptText.sizePolicy().hasHeightForWidth())\n self.customSysPromptText.setSizePolicy(sizePolicy)\n self.customSysPromptText.setMinimumSize(QSize(0, 168))\n\n self.gridLayout_3.addWidget(self.customSysPromptText, 10, 0, 1, 1)\n\n self.verticalSpacer_4 = QSpacerItem(20, 493, QSizePolicy.Minimum, QSizePolicy.Expanding)\n\n self.gridLayout_3.addItem(self.verticalSpacer_4, 14, 0, 1, 1)\n\n self.streamCheck = QCheckBox(self.preferencesPage)\n self.streamCheck.setObjectName(u\"streamCheck\")\n self.streamCheck.setChecked(True)\n\n self.gridLayout_3.addWidget(self.streamCheck, 1, 0, 1, 1)\n\n self.cacheCheck = QCheckBox(self.preferencesPage)\n self.cacheCheck.setObjectName(u\"cacheCheck\")\n self.cacheCheck.setChecked(True)\n\n self.gridLayout_3.addWidget(self.cacheCheck, 2, 0, 1, 1)\n\n self.label_12 = QLabel(self.preferencesPage)\n self.label_12.setObjectName(u\"label_12\")\n\n self.gridLayout_3.addWidget(self.label_12, 5, 0, 1, 1)\n\n self.label_10 = QLabel(self.preferencesPage)\n self.label_10.setObjectName(u\"label_10\")\n\n self.gridLayout_3.addWidget(self.label_10, 3, 0, 1, 1)\n\n self.groupBox = QGroupBox(self.preferencesPage)\n self.groupBox.setObjectName(u\"groupBox\")\n self.verticalLayout = QVBoxLayout(self.groupBox)\n self.verticalLayout.setObjectName(u\"verticalLayout\")\n self.backendCppCheck = QRadioButton(self.groupBox)\n self.backendCppCheck.setObjectName(u\"backendCppCheck\")\n self.backendCppCheck.setChecked(True)\n\n self.verticalLayout.addWidget(self.backendCppCheck)\n\n self.backendExllamaCheck = QRadioButton(self.groupBox)\n self.backendExllamaCheck.setObjectName(u\"backendExllamaCheck\")\n\n self.verticalLayout.addWidget(self.backendExllamaCheck)\n\n\n self.gridLayout_3.addWidget(self.groupBox, 0, 0, 1, 1)\n\n self.rightToolbox.addItem(self.preferencesPage, u\"Preferences\")\n self.themesPage = QWidget()\n self.themesPage.setObjectName(u\"themesPage\")\n self.themesPage.setGeometry(QRect(0, 0, 292, 760))\n self.gridLayout_7 = QGridLayout(self.themesPage)\n self.gridLayout_7.setObjectName(u\"gridLayout_7\")\n self.custStopStringLine = QLineEdit(self.themesPage)\n self.custStopStringLine.setObjectName(u\"custStopStringLine\")\n\n self.gridLayout_7.addWidget(self.custStopStringLine, 5, 0, 1, 1)\n\n self.imgFileLine = QLineEdit(self.themesPage)\n self.imgFileLine.setObjectName(u\"imgFileLine\")\n\n self.gridLayout_7.addWidget(self.imgFileLine, 7, 0, 1, 1)\n\n self.groupBox_2 = QGroupBox(self.themesPage)\n self.groupBox_2.setObjectName(u\"groupBox_2\")\n self.gridLayout_8 = QGridLayout(self.groupBox_2)\n self.gridLayout_8.setObjectName(u\"gridLayout_8\")\n self.themeLightRadio = QRadioButton(self.groupBox_2)\n self.themeLightRadio.setObjectName(u\"themeLightRadio\")\n\n self.gridLayout_8.addWidget(self.themeLightRadio, 3, 0, 1, 1)\n\n self.themeNativeRadio = QRadioButton(self.groupBox_2)\n self.themeNativeRadio.setObjectName(u\"themeNativeRadio\")\n\n self.gridLayout_8.addWidget(self.themeNativeRadio, 4, 0, 1, 1)\n\n self.themeDarkRadio = QRadioButton(self.groupBox_2)\n self.themeDarkRadio.setObjectName(u\"themeDarkRadio\")\n self.themeDarkRadio.setChecked(True)\n\n self.gridLayout_8.addWidget(self.themeDarkRadio, 2, 0, 1, 1)\n\n\n self.gridLayout_7.addWidget(self.groupBox_2, 10, 0, 1, 1)\n\n self.label_16 = QLabel(self.themesPage)\n self.label_16.setObjectName(u\"label_16\")\n\n self.gridLayout_7.addWidget(self.label_16, 4, 0, 1, 1)\n\n self.imgFileButton = QToolButton(self.themesPage)\n self.imgFileButton.setObjectName(u\"imgFileButton\")\n\n self.gridLayout_7.addWidget(self.imgFileButton, 7, 1, 1, 1)\n\n self.stopStringAutoCheck = QCheckBox(self.themesPage)\n self.stopStringAutoCheck.setObjectName(u\"stopStringAutoCheck\")\n self.stopStringAutoCheck.setChecked(True)\n\n self.gridLayout_7.addWidget(self.stopStringAutoCheck, 1, 0, 1, 1)\n\n self.autoSaveSessionCheck = QCheckBox(self.themesPage)\n self.autoSaveSessionCheck.setObjectName(u\"autoSaveSessionCheck\")\n self.autoSaveSessionCheck.setChecked(True)\n\n self.gridLayout_7.addWidget(self.autoSaveSessionCheck, 2, 0, 1, 1)\n\n self.penaliseNlCheck = QCheckBox(self.themesPage)\n self.penaliseNlCheck.setObjectName(u\"penaliseNlCheck\")\n self.penaliseNlCheck.setChecked(True)\n\n self.gridLayout_7.addWidget(self.penaliseNlCheck, 8, 0, 1, 1)\n\n self.verticalSpacer_5 = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)\n\n self.gridLayout_7.addItem(self.verticalSpacer_5, 12, 0, 1, 1)\n\n self.autoscrollCheck = QCheckBox(self.themesPage)\n self.autoscrollCheck.setObjectName(u\"autoscrollCheck\")\n self.autoscrollCheck.setChecked(True)\n\n self.gridLayout_7.addWidget(self.autoscrollCheck, 0, 0, 1, 1)\n\n self.label_13 = QLabel(self.themesPage)\n self.label_13.setObjectName(u\"label_13\")\n\n self.gridLayout_7.addWidget(self.label_13, 6, 0, 1, 1)\n\n self.charInstructCheck = QCheckBox(self.themesPage)\n self.charInstructCheck.setObjectName(u\"charInstructCheck\")\n self.charInstructCheck.setChecked(True)\n\n self.gridLayout_7.addWidget(self.charInstructCheck, 9, 0, 1, 1)\n\n self.rightToolbox.addItem(self.themesPage, u\"Preferences - More\")\n self.splitter.addWidget(self.rightToolbox)\n\n self.gridLayout_5.addWidget(self.splitter, 0, 0, 1, 7)\n\n self.inputHistoryCombo = QComboBox(self.centralwidget)\n self.inputHistoryCombo.setObjectName(u\"inputHistoryCombo\")\n\n self.gridLayout_5.addWidget(self.inputHistoryCombo, 1, 0, 1, 1)\n\n self.clearButton = QPushButton(self.centralwidget)\n self.clearButton.setObjectName(u\"clearButton\")\n self.clearButton.setEnabled(False)\n self.clearButton.setMinimumSize(QSize(64, 64))\n self.clearButton.setMaximumSize(QSize(64, 64))\n\n self.gridLayout_5.addWidget(self.clearButton, 2, 5, 1, 1)\n\n self.generateButton = QPushButton(self.centralwidget)\n self.generateButton.setObjectName(u\"generateButton\")\n self.generateButton.setEnabled(True)\n self.generateButton.setMinimumSize(QSize(64, 64))\n self.generateButton.setMaximumSize(QSize(64, 64))\n\n self.gridLayout_5.addWidget(self.generateButton, 2, 1, 1, 1)\n\n self.continueButton = QPushButton(self.centralwidget)\n self.continueButton.setObjectName(u\"continueButton\")\n self.continueButton.setEnabled(False)\n self.continueButton.setMinimumSize(QSize(64, 64))\n self.continueButton.setMaximumSize(QSize(64, 64))\n\n self.gridLayout_5.addWidget(self.continueButton, 2, 4, 1, 1)\n\n self.rewindButton = QPushButton(self.centralwidget)\n self.rewindButton.setObjectName(u\"rewindButton\")\n self.rewindButton.setEnabled(False)\n self.rewindButton.setMinimumSize(QSize(64, 64))\n self.rewindButton.setMaximumSize(QSize(64, 64))\n\n self.gridLayout_5.addWidget(self.rewindButton, 2, 3, 1, 1)\n\n self.stopButton = QPushButton(self.centralwidget)\n self.stopButton.setObjectName(u\"stopButton\")\n self.stopButton.setEnabled(False)\n self.stopButton.setMinimumSize(QSize(64, 64))\n self.stopButton.setMaximumSize(QSize(64, 64))\n\n self.gridLayout_5.addWidget(self.stopButton, 2, 6, 1, 1)\n\n ChatWindow.setCentralWidget(self.centralwidget)\n self.menubar = QMenuBar(ChatWindow)\n self.menubar.setObjectName(u\"menubar\")\n self.menubar.setGeometry(QRect(0, 0, 1582, 27))\n self.menuFile = QMenu(self.menubar)\n self.menuFile.setObjectName(u\"menuFile\")\n self.menuHelp = QMenu(self.menubar)\n self.menuHelp.setObjectName(u\"menuHelp\")\n self.menuEditors = QMenu(self.menubar)\n self.menuEditors.setObjectName(u\"menuEditors\")\n ChatWindow.setMenuBar(self.menubar)\n self.statusbar = QStatusBar(ChatWindow)\n self.statusbar.setObjectName(u\"statusbar\")\n ChatWindow.setStatusBar(self.statusbar)\n\n self.menubar.addAction(self.menuFile.menuAction())\n self.menubar.addAction(self.menuEditors.menuAction())\n self.menubar.addAction(self.menuHelp.menuAction())\n self.menuFile.addAction(self.actionReload_contacts)\n self.menuFile.addSeparator()\n self.menuFile.addAction(self.actionSave_session)\n self.menuFile.addAction(self.actionLoad_session)\n self.menuFile.addSeparator()\n self.menuFile.addAction(self.actionSave_settings)\n self.menuFile.addAction(self.actionExit)\n self.menuHelp.addAction(self.actionAbout)\n self.menuEditors.addAction(self.actionCharacter)\n\n self.retranslateUi(ChatWindow)\n self.top_kSlider.valueChanged.connect(self.top_kSpin.setValue)\n self.max_new_tokensSpin.valueChanged.connect(self.max_new_tokensSlider.setValue)\n self.max_new_tokensSlider.valueChanged.connect(self.max_new_tokensSpin.setValue)\n self.top_kSpin.valueChanged.connect(self.top_kSlider.setValue)\n self.repeatLastSlider.valueChanged.connect(self.repeatLastSpin.setValue)\n self.repeatLastSpin.valueChanged.connect(self.repeatLastSlider.setValue)\n self.keepLastNSlider.valueChanged.connect(self.keepLastNSpin.setValue)\n self.keepLastNSpin.valueChanged.connect(self.keepLastNSlider.setValue)\n\n self.textTabWidget.setCurrentIndex(0)\n self.rightToolbox.setCurrentIndex(0)\n self.generateButton.setDefault(True)\n\n\n QMetaObject.connectSlotsByName(ChatWindow)\n # setupUi\n\n def retranslateUi(self, ChatWindow):\n ChatWindow.setWindowTitle(QCoreApplication.translate(\"ChatWindow\", u\"AI Messenger\", None))\n self.actionSettings.setText(QCoreApplication.translate(\"ChatWindow\", u\"Settings\", None))\n self.actionExit.setText(QCoreApplication.translate(\"ChatWindow\", u\"Exit\", None))\n self.actionAbout.setText(QCoreApplication.translate(\"ChatWindow\", u\"About\", None))\n self.actionSave_settings.setText(QCoreApplication.translate(\"ChatWindow\", u\"Save settings\", None))\n self.actionSave_session.setText(QCoreApplication.translate(\"ChatWindow\", u\"Save session\", None))\n self.actionReload_contacts.setText(QCoreApplication.translate(\"ChatWindow\", u\"Reload contacts\", None))\n self.actionCharacter.setText(QCoreApplication.translate(\"ChatWindow\", u\"Character\", None))\n self.actionLoad_session.setText(QCoreApplication.translate(\"ChatWindow\", u\"Load session\", None))\n#if QT_CONFIG(tooltip)\n self.retryButton.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Retry\", None))\n#endif // QT_CONFIG(tooltip)\n self.retryButton.setText(QCoreApplication.translate(\"ChatWindow\", u\"Ret\", None))\n self.chatTextEdit.setPlaceholderText(QCoreApplication.translate(\"ChatWindow\", u\"Output text\", None))\n self.textTabWidget.setTabText(self.textTabWidget.indexOf(self.chatTab), QCoreApplication.translate(\"ChatWindow\", u\"Chat\", None))\n self.notebookTextEdit.setPlaceholderText(QCoreApplication.translate(\"ChatWindow\", u\"Output text\", None))\n self.textTabWidget.setTabText(self.textTabWidget.indexOf(self.notebookTab), QCoreApplication.translate(\"ChatWindow\", u\"Notebook\", None))\n ___qtreewidgetitem = self.contactsTree.headerItem()\n ___qtreewidgetitem.setText(0, QCoreApplication.translate(\"ChatWindow\", u\"Presets\", None));\n self.rightToolbox.setItemText(self.rightToolbox.indexOf(self.page), QCoreApplication.translate(\"ChatWindow\", u\"Chat presets\", None))\n self.label_5.setText(QCoreApplication.translate(\"ChatWindow\", u\"Top K:\", None))\n self.label_8.setText(QCoreApplication.translate(\"ChatWindow\", u\"Typical P:\", None))\n self.label_11.setText(QCoreApplication.translate(\"ChatWindow\", u\"Max new tokens:\", None))\n#if QT_CONFIG(tooltip)\n self.top_pSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P\", None))\n#endif // QT_CONFIG(tooltip)\n self.label_15.setText(QCoreApplication.translate(\"ChatWindow\", u\"Temperature:\", None))\n#if QT_CONFIG(tooltip)\n self.top_pSlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.max_new_tokensSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Set the number of tokens to predict when generating text (-1 = infinity, -2 = until context filled)\", None))\n#endif // QT_CONFIG(tooltip)\n self.label_17.setText(QCoreApplication.translate(\"ChatWindow\", u\"Top P:\", None))\n#if QT_CONFIG(tooltip)\n self.temperatureSlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Adjust the randomness of the generated text\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.max_new_tokensSlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Set the number of tokens to predict when generating text (-1 = infinity, -2 = until context filled)\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.typical_pSlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Locally typical sampling, parameter p (1.0 = disabled)\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.repetition_penaltySpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Repetition penality value\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.temperatureSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Adjust the randomness of the generated text\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.paramPresets_comboBox.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Parameter preset\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.repetition_penaltySlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Repetition penality value\", None))\n#endif // QT_CONFIG(tooltip)\n self.label_2.setText(QCoreApplication.translate(\"ChatWindow\", u\"Preset:\", None))\n#if QT_CONFIG(tooltip)\n self.typical_pSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Locally typical sampling, parameter p (1.0 = disabled)\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.top_kSlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Limit the next token selection to the K most probable tokens\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.top_kSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Limit the next token selection to the K most probable tokens\", None))\n#endif // QT_CONFIG(tooltip)\n self.label.setText(QCoreApplication.translate(\"ChatWindow\", u\"Repetition penalty:\", None))\n self.rightToolbox.setItemText(self.rightToolbox.indexOf(self.paramsBasicPage), QCoreApplication.translate(\"ChatWindow\", u\"Params - Shared\", None))\n#if QT_CONFIG(tooltip)\n self.tfszSlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Tail free sampling, parameter z (1.0 = disabled)\", None))\n#endif // QT_CONFIG(tooltip)\n self.label_9.setText(QCoreApplication.translate(\"ChatWindow\", u\"Mirostat LR:\", None))\n self.label_4.setText(QCoreApplication.translate(\"ChatWindow\", u\"Frequency penalty:\", None))\n#if QT_CONFIG(tooltip)\n self.tfszSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Tail free sampling, parameter z (1.0 = disabled)\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.keepLastNSlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Keep this many tokens when context exceeded (-1 = all)\", None))\n#endif // QT_CONFIG(tooltip)\n self.label_20.setText(QCoreApplication.translate(\"ChatWindow\", u\"Keep prompt n:\", None))\n#if QT_CONFIG(tooltip)\n self.presencePenaltySpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Repeat alpha presence penalty (0.0 = disabled)\", None))\n#endif // QT_CONFIG(tooltip)\n self.label_3.setText(QCoreApplication.translate(\"ChatWindow\", u\"Mirostat Ent:\", None))\n#if QT_CONFIG(tooltip)\n self.keepLastNSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Keep this many tokens when context exceeded (-1 = all)\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.mirostatEta.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Use Mirostat sampling. Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used. (0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\", None))\n#endif // QT_CONFIG(tooltip)\n self.label_31.setText(QCoreApplication.translate(\"ChatWindow\", u\"Tail Free Sampling:\", None))\n self.label_14.setText(QCoreApplication.translate(\"ChatWindow\", u\"Mirostat mode:\", None))\n self.label_7.setText(QCoreApplication.translate(\"ChatWindow\", u\"Presence penalty:\", None))\n#if QT_CONFIG(tooltip)\n self.repeatLastSlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Range to sample for repeat penalty (-1 = all)\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.minPSlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Sets a minimum base probability threshold for token selection\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.seedSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Seed value (-1 for random)\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.freqPenaltySpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Repeat alpha frequency penalty (0.0 = disabled)\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.presencePenaltySlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Repeat alpha presence penalty (0.0 = disabled)\", None))\n#endif // QT_CONFIG(tooltip)\n self.label_6.setText(QCoreApplication.translate(\"ChatWindow\", u\"Min P:\", None))\n self.label_19.setText(QCoreApplication.translate(\"ChatWindow\", u\"Repeat last n:\", None))\n#if QT_CONFIG(tooltip)\n self.mirostatTau.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Set the Mirostat target entropy, parameter tau\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.mirostatMode.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Set the Mirostat learning rate, parameter eta\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.repeatLastSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Range to sample for repeat penalty (-1 = all)\", None))\n#endif // QT_CONFIG(tooltip)\n#if QT_CONFIG(tooltip)\n self.minpSpin.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Sets a minimum base probability threshold for token selection\", None))\n#endif // QT_CONFIG(tooltip)\n self.label_21.setText(QCoreApplication.translate(\"ChatWindow\", u\"Seed:\", None))\n#if QT_CONFIG(tooltip)\n self.freqPenaltySlider.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Repeat alpha frequency penalty (0.0 = disabled)\", None))\n#endif // QT_CONFIG(tooltip)\n self.rightToolbox.setItemText(self.rightToolbox.indexOf(self.paramAdvPage), QCoreApplication.translate(\"ChatWindow\", u\"Params - More\", None))\n#if QT_CONFIG(tooltip)\n self.customSysPromptCheck.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Use a custom system prompt\", None))\n#endif // QT_CONFIG(tooltip)\n self.customSysPromptCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"System prompt:\", None))\n#if QT_CONFIG(tooltip)\n self.botnameLine.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Display name of bot\", None))\n#endif // QT_CONFIG(tooltip)\n self.botnameLine.setText(QCoreApplication.translate(\"ChatWindow\", u\"Bot\", None))\n self.botnameLine.setPlaceholderText(QCoreApplication.translate(\"ChatWindow\", u\"Bot\", None))\n#if QT_CONFIG(tooltip)\n self.usernameLine.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Display name of user\", None))\n#endif // QT_CONFIG(tooltip)\n self.usernameLine.setText(QCoreApplication.translate(\"ChatWindow\", u\"You\", None))\n self.usernameLine.setPlaceholderText(QCoreApplication.translate(\"ChatWindow\", u\"You\", None))\n#if QT_CONFIG(tooltip)\n self.customSysPromptText.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Custom system prompt text\", None))\n#endif // QT_CONFIG(tooltip)\n self.customSysPromptText.setPlaceholderText(QCoreApplication.translate(\"ChatWindow\", u\"Custom system prompt\", None))\n#if QT_CONFIG(tooltip)\n self.streamCheck.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Streaming of text\", None))\n#endif // QT_CONFIG(tooltip)\n self.streamCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"Stream\", None))\n#if QT_CONFIG(tooltip)\n self.cacheCheck.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Store context in cache\", None))\n#endif // QT_CONFIG(tooltip)\n self.cacheCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"Cache\", None))\n self.label_12.setText(QCoreApplication.translate(\"ChatWindow\", u\"Bot name:\", None))\n self.label_10.setText(QCoreApplication.translate(\"ChatWindow\", u\"User name:\", None))\n self.groupBox.setTitle(QCoreApplication.translate(\"ChatWindow\", u\"Backend\", None))\n#if QT_CONFIG(tooltip)\n self.backendCppCheck.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Use LLaMA.cpp server backend\", None))\n#endif // QT_CONFIG(tooltip)\n self.backendCppCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"LLaMA.cpp\", None))\n#if QT_CONFIG(tooltip)\n self.backendExllamaCheck.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Use ExLLaMA V2 websockets server backend\", None))\n#endif // QT_CONFIG(tooltip)\n self.backendExllamaCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"ExLLaMA V2\", None))\n self.rightToolbox.setItemText(self.rightToolbox.indexOf(self.preferencesPage), QCoreApplication.translate(\"ChatWindow\", u\"Preferences\", None))\n#if QT_CONFIG(tooltip)\n self.custStopStringLine.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Comma separated list\", None))\n#endif // QT_CONFIG(tooltip)\n self.custStopStringLine.setPlaceholderText(QCoreApplication.translate(\"ChatWindow\", u\"User, Bot\", None))\n#if QT_CONFIG(tooltip)\n self.imgFileLine.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Image for LLaVA (llama.cpp only)\", None))\n#endif // QT_CONFIG(tooltip)\n self.imgFileLine.setText(\"\")\n self.imgFileLine.setPlaceholderText(QCoreApplication.translate(\"ChatWindow\", u\"Path to image for LLaVA usage\", None))\n self.groupBox_2.setTitle(QCoreApplication.translate(\"ChatWindow\", u\"Theme\", None))\n self.themeLightRadio.setText(QCoreApplication.translate(\"ChatWindow\", u\"Light\", None))\n self.themeNativeRadio.setText(QCoreApplication.translate(\"ChatWindow\", u\"Native\", None))\n self.themeDarkRadio.setText(QCoreApplication.translate(\"ChatWindow\", u\"Dark\", None))\n self.label_16.setText(QCoreApplication.translate(\"ChatWindow\", u\"Custom stop strings:\", None))\n self.imgFileButton.setText(QCoreApplication.translate(\"ChatWindow\", u\"...\", None))\n#if QT_CONFIG(tooltip)\n self.stopStringAutoCheck.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Auto add stop strings\", None))\n#endif // QT_CONFIG(tooltip)\n self.stopStringAutoCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"Auto add stop strings\", None))\n#if QT_CONFIG(tooltip)\n self.autoSaveSessionCheck.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Auto save session to file after generation\", None))\n#endif // QT_CONFIG(tooltip)\n self.autoSaveSessionCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"Auto save session\", None))\n#if QT_CONFIG(tooltip)\n self.penaliseNlCheck.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Penalise newline tokens when applying the repeat penalty \", None))\n#endif // QT_CONFIG(tooltip)\n self.penaliseNlCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"Penalise newlines\", None))\n#if QT_CONFIG(tooltip)\n self.autoscrollCheck.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Autoscroll the output text when generating\", None))\n#endif // QT_CONFIG(tooltip)\n self.autoscrollCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"Autoscroll\", None))\n self.label_13.setText(QCoreApplication.translate(\"ChatWindow\", u\"LLaVA image:\", None))\n self.charInstructCheck.setText(QCoreApplication.translate(\"ChatWindow\", u\"Use instruct for characters\", None))\n self.rightToolbox.setItemText(self.rightToolbox.indexOf(self.themesPage), QCoreApplication.translate(\"ChatWindow\", u\"Preferences - More\", None))\n#if QT_CONFIG(tooltip)\n self.clearButton.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Clear the output history\", None))\n#endif // QT_CONFIG(tooltip)\n self.clearButton.setText(QCoreApplication.translate(\"ChatWindow\", u\"Clr\", None))\n#if QT_CONFIG(tooltip)\n self.generateButton.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Start generation\", None))\n#endif // QT_CONFIG(tooltip)\n self.generateButton.setText(QCoreApplication.translate(\"ChatWindow\", u\"Gen\", None))\n#if QT_CONFIG(tooltip)\n self.continueButton.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Rewinds the chat 1 turn\", None))\n#endif // QT_CONFIG(tooltip)\n self.continueButton.setText(QCoreApplication.translate(\"ChatWindow\", u\"Con\", None))\n#if QT_CONFIG(tooltip)\n self.rewindButton.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Continue the last generation\", None))\n#endif // QT_CONFIG(tooltip)\n self.rewindButton.setText(QCoreApplication.translate(\"ChatWindow\", u\"Rw\", None))\n#if QT_CONFIG(tooltip)\n self.stopButton.setToolTip(QCoreApplication.translate(\"ChatWindow\", u\"Stop generation\", None))\n#endif // QT_CONFIG(tooltip)\n self.stopButton.setText(QCoreApplication.translate(\"ChatWindow\", u\"St\", None))\n self.menuFile.setTitle(QCoreApplication.translate(\"ChatWindow\", u\"File\", None))\n self.menuHelp.setTitle(QCoreApplication.translate(\"ChatWindow\", u\"Help\", None))\n self.menuEditors.setTitle(QCoreApplication.translate(\"ChatWindow\", u\"Editors\", None))\n # retranslateUi" } ]
import asyncio import base64 import glob import json import platform import random import sys import cpp_server_gen import exllamav2_server_gen from pathlib import Path from PIL import Image from PySide6.QtCore import QSize, Qt, QThread, Signal, Slot from PySide6.QtGui import QIcon, QTextCursor from PySide6.QtWidgets import ( QApplication, QFileDialog, QMainWindow, QPlainTextEdit, QTreeWidgetItem, QWidget, ) from qt_material import apply_stylesheet from character_window import Ui_CharacterForm from chat_window import Ui_ChatWindow
12,963
# Constants for the directories and file names APP_ICON = Path("assets/icons/appicon.png") INSTRUCT_PRESETS_DIR = Path("presets/Assistants") CHARACTER_PRESETS_DIR = Path("presets/Characters") CARDS_PRESETS_DIR = Path("presets/Cards") SETTINGS_FILE = Path("saved/settings.json") SESSION_FILE = Path("saved/session.json") PARAMS_DIR = Path("presets/model_params")
# Constants for the directories and file names APP_ICON = Path("assets/icons/appicon.png") INSTRUCT_PRESETS_DIR = Path("presets/Assistants") CHARACTER_PRESETS_DIR = Path("presets/Characters") CARDS_PRESETS_DIR = Path("presets/Cards") SETTINGS_FILE = Path("saved/settings.json") SESSION_FILE = Path("saved/session.json") PARAMS_DIR = Path("presets/model_params")
class CharacterWindow(QWidget, Ui_CharacterForm):
0
2023-11-15 20:44:28+00:00
16k
believethehype/nostrdvm
main.py
[ { "identifier": "Bot", "path": "nostr_dvm/bot.py", "snippet": "class Bot:\n job_list: list\n\n # This is a simple list just to keep track which events we created and manage, so we don't pay for other requests\n def __init__(self, dvm_config, admin_config=None):\n self.NAME = \"Bot\"\n dvm_config.DB = \"db/\" + self.NAME + \".db\"\n self.dvm_config = dvm_config\n nip89config = NIP89Config()\n nip89config.NAME = self.NAME\n self.dvm_config.NIP89 = nip89config\n self.admin_config = admin_config\n self.keys = Keys.from_sk_str(dvm_config.PRIVATE_KEY)\n wait_for_send = True\n skip_disconnected_relays = True\n opts = (Options().wait_for_send(wait_for_send).send_timeout(timedelta(seconds=self.dvm_config.RELAY_TIMEOUT))\n .skip_disconnected_relays(skip_disconnected_relays))\n signer = ClientSigner.keys(self.keys)\n self.client = Client.with_opts(signer, opts)\n\n pk = self.keys.public_key()\n\n self.job_list = []\n\n print(\"Nostr BOT public key: \" + str(pk.to_bech32()) + \" Hex: \" + str(pk.to_hex()) + \" Name: \" + self.NAME +\n \" Supported DVM tasks: \" +\n ', '.join(p.NAME + \":\" + p.TASK for p in self.dvm_config.SUPPORTED_DVMS) + \"\\n\")\n\n for relay in self.dvm_config.RELAY_LIST:\n self.client.add_relay(relay)\n self.client.connect()\n\n zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_ZAP]).since(Timestamp.now())\n dm_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_DM]).since(Timestamp.now())\n kinds = [EventDefinitions.KIND_NIP90_GENERIC, EventDefinitions.KIND_FEEDBACK]\n for dvm in self.dvm_config.SUPPORTED_DVMS:\n if dvm.KIND not in kinds:\n kinds.append(dvm.KIND + 1000)\n dvm_filter = (Filter().kinds(kinds).since(Timestamp.now()))\n\n self.client.subscribe([zap_filter, dm_filter, dvm_filter])\n\n create_sql_table(self.dvm_config.DB)\n admin_make_database_updates(adminconfig=self.admin_config, dvmconfig=self.dvm_config, client=self.client)\n\n class NotificationHandler(HandleNotification):\n client = self.client\n dvm_config = self.dvm_config\n keys = self.keys\n\n def handle(self, relay_url, nostr_event):\n if (EventDefinitions.KIND_NIP90_EXTRACT_TEXT + 1000 <= nostr_event.kind()\n <= EventDefinitions.KIND_NIP90_GENERIC + 1000):\n handle_nip90_response_event(nostr_event)\n elif nostr_event.kind() == EventDefinitions.KIND_FEEDBACK:\n handle_nip90_feedback(nostr_event)\n elif nostr_event.kind() == EventDefinitions.KIND_DM:\n handle_dm(nostr_event)\n elif nostr_event.kind() == EventDefinitions.KIND_ZAP:\n handle_zap(nostr_event)\n\n def handle_msg(self, relay_url, msg):\n return\n\n def handle_dm(nostr_event):\n sender = nostr_event.pubkey().to_hex()\n if sender == self.keys.public_key().to_hex():\n return\n\n try:\n decrypted_text = nip04_decrypt(self.keys.secret_key(), nostr_event.pubkey(), nostr_event.content())\n user = get_or_add_user(db=self.dvm_config.DB, npub=sender, client=self.client, config=self.dvm_config)\n print(\"[\" + self.NAME + \"] Message from \" + user.name + \": \" + decrypted_text)\n\n # if user selects an index from the overview list...\n if decrypted_text[0].isdigit():\n split = decrypted_text.split(' ')\n index = int(split[0]) - 1\n # if user sends index info, e.g. 1 info, we fetch the nip89 information and reply with it.\n if len(split) > 1 and split[1].lower() == \"info\":\n answer_nip89(nostr_event, index)\n # otherwise we probably have to do some work, so build an event from input and send it to the DVM\n else:\n task = self.dvm_config.SUPPORTED_DVMS[index].TASK\n print(\"[\" + self.NAME + \"] Request from \" + str(user.name) + \" (\" + str(user.nip05) +\n \", Balance: \" + str(user.balance) + \" Sats) Task: \" + str(task))\n\n if user.isblacklisted:\n # If users are blacklisted for some reason, tell them.\n answer_blacklisted(nostr_event)\n\n else:\n # Parse inputs to params\n tags = build_params(decrypted_text, nostr_event, index)\n p_tag = Tag.parse(['p', self.dvm_config.SUPPORTED_DVMS[index].PUBLIC_KEY])\n\n if self.dvm_config.SUPPORTED_DVMS[index].SUPPORTS_ENCRYPTION:\n tags_str = []\n for tag in tags:\n tags_str.append(tag.as_vec())\n params_as_str = json.dumps(tags_str)\n print(params_as_str)\n # and encrypt them\n encrypted_params = nip04_encrypt(self.keys.secret_key(),\n PublicKey.from_hex(\n self.dvm_config.SUPPORTED_DVMS[index].PUBLIC_KEY),\n params_as_str)\n # add encrypted and p tag on the outside\n encrypted_tag = Tag.parse(['encrypted'])\n # add the encrypted params to the content\n nip90request = (EventBuilder(self.dvm_config.SUPPORTED_DVMS[index].KIND,\n encrypted_params, [p_tag, encrypted_tag]).\n to_event(self.keys))\n else:\n tags.append(p_tag)\n\n nip90request = (EventBuilder(self.dvm_config.SUPPORTED_DVMS[index].KIND,\n \"\", tags).\n to_event(self.keys))\n\n # remember in the job_list that we have made an event, if anybody asks for payment,\n # we know we actually sent the request\n entry = {\"npub\": user.npub, \"event_id\": nip90request.id().to_hex(),\n \"dvm_key\": self.dvm_config.SUPPORTED_DVMS[index].PUBLIC_KEY, \"is_paid\": False}\n self.job_list.append(entry)\n\n # send the event to the DVM\n send_event(nip90request, client=self.client, dvm_config=self.dvm_config)\n # print(nip90request.as_json())\n\n\n\n elif decrypted_text.lower().startswith(\"balance\"):\n time.sleep(3.0)\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n \"Your current balance is \" + str(\n user.balance) + \" Sats. Zap me to add to your balance. I will use your balance interact with the DVMs for you.\\n\"\n \"I support both public and private Zaps, as well as Zapplepay.\\n\"\n \"Alternativly you can add a #cashu token with \\\"-cashu cashuASomeToken\\\" to your command.\\n Make sure the token is worth the requested amount + \"\n \"mint fees (at least 3 sat).\\n Not all DVMs might accept Cashu tokens.\"\n , None).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=dvm_config)\n\n elif decrypted_text.startswith(\"cashuA\"):\n print(\"Received Cashu token:\" + decrypted_text)\n cashu_redeemed, cashu_message, total_amount, fees = redeem_cashu(decrypted_text, self.dvm_config,\n self.client)\n print(cashu_message)\n if cashu_message == \"success\":\n update_user_balance(self.dvm_config.DB, sender, total_amount, client=self.client,\n config=self.dvm_config)\n else:\n time.sleep(2.0)\n message = \"Error: \" + cashu_message + \". Token has not been redeemed.\"\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, PublicKey.from_hex(sender), message,\n None).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=self.dvm_config)\n elif decrypted_text.lower().startswith(\"what's the second best\"):\n time.sleep(3.0)\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n \"No, there is no second best.\\n\\nhttps://cdn.nostr.build/p/mYLv.mp4\",\n nostr_event.id()).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=self.dvm_config)\n\n else:\n # Build an overview of known DVMs and send it to the user\n answer_overview(nostr_event)\n\n except Exception as e:\n print(\"Error in bot \" + str(e))\n\n def handle_nip90_feedback(nostr_event):\n print(nostr_event.as_json())\n try:\n is_encrypted = False\n status = \"\"\n etag = \"\"\n ptag = \"\"\n content = nostr_event.content()\n for tag in nostr_event.tags():\n if tag.as_vec()[0] == \"status\":\n status = tag.as_vec()[1]\n if len(tag.as_vec()) > 2:\n content = tag.as_vec()[2]\n elif tag.as_vec()[0] == \"e\":\n etag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"p\":\n ptag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"encrypted\":\n is_encrypted = True\n\n if is_encrypted:\n if ptag == self.keys.public_key().to_hex():\n tags_str = nip04_decrypt(Keys.from_sk_str(dvm_config.PRIVATE_KEY).secret_key(),\n nostr_event.pubkey(), nostr_event.content())\n params = json.loads(tags_str)\n params.append(Tag.parse([\"p\", ptag]).as_vec())\n params.append(Tag.parse([\"encrypted\"]).as_vec())\n event_as_json = json.loads(nostr_event.as_json())\n event_as_json['tags'] = params\n event_as_json['content'] = \"\"\n nostr_event = Event.from_json(json.dumps(event_as_json))\n\n for tag in nostr_event.tags():\n if tag.as_vec()[0] == \"status\":\n status = tag.as_vec()[1]\n if len(tag.as_vec()) > 2:\n content = tag.as_vec()[2]\n elif tag.as_vec()[0] == \"e\":\n etag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"content\":\n content = tag.as_vec()[1]\n\n else:\n return\n\n if status == \"success\" or status == \"error\" or status == \"processing\" or status == \"partial\" and content != \"\":\n entry = next((x for x in self.job_list if x['event_id'] == etag), None)\n if entry is not None and entry['dvm_key'] == nostr_event.pubkey().to_hex():\n user = get_or_add_user(db=self.dvm_config.DB, npub=entry['npub'],\n client=self.client, config=self.dvm_config)\n time.sleep(2.0)\n reply_event = EventBuilder.new_encrypted_direct_msg(self.keys,\n PublicKey.from_hex(entry['npub']),\n content,\n None).to_event(self.keys)\n print(status + \": \" + content)\n print(\n \"[\" + self.NAME + \"] Received reaction from \" + nostr_event.pubkey().to_hex() + \" message to orignal sender \" + user.name)\n send_event(reply_event, client=self.client, dvm_config=dvm_config)\n\n elif status == \"payment-required\" or status == \"partial\":\n for tag in nostr_event.tags():\n if tag.as_vec()[0] == \"amount\":\n amount_msats = int(tag.as_vec()[1])\n amount = int(amount_msats / 1000)\n entry = next((x for x in self.job_list if x['event_id'] == etag), None)\n if entry is not None and entry['is_paid'] is False and entry[\n 'dvm_key'] == nostr_event.pubkey().to_hex():\n # if we get a bolt11, we pay and move on\n user = get_or_add_user(db=self.dvm_config.DB, npub=entry[\"npub\"],\n client=self.client, config=self.dvm_config)\n if user.balance >= amount:\n balance = max(user.balance - amount, 0)\n update_sql_table(db=self.dvm_config.DB, npub=user.npub, balance=balance,\n iswhitelisted=user.iswhitelisted, isblacklisted=user.isblacklisted,\n nip05=user.nip05, lud16=user.lud16, name=user.name,\n lastactive=Timestamp.now().as_secs())\n evt = EventBuilder.new_encrypted_direct_msg(self.keys,\n PublicKey.from_hex(entry[\"npub\"]),\n \"Paid \" + str(\n amount) + \" Sats from balance to DVM. New balance is \" +\n str(balance)\n + \" Sats.\\n\",\n None).to_event(self.keys)\n\n print(\n \"[\" + self.NAME + \"] Replying \" + user.name + \" with \\\"scheduled\\\" confirmation\")\n send_event(evt, client=self.client, dvm_config=dvm_config)\n else:\n print(\"Bot payment-required\")\n time.sleep(2.0)\n evt = EventBuilder.new_encrypted_direct_msg(self.keys,\n PublicKey.from_hex(entry[\"npub\"]),\n \"Current balance: \" + str(\n user.balance) + \" Sats. Balance of \" + str(\n amount) + \" Sats required. Please zap me with at least \" +\n str(int(amount - user.balance))\n + \" Sats, then try again.\",\n None).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=dvm_config)\n return\n\n if len(tag.as_vec()) > 2:\n bolt11 = tag.as_vec()[2]\n # else we create a zap\n else:\n user = get_or_add_user(db=self.dvm_config.DB, npub=nostr_event.pubkey().to_hex(),\n client=self.client, config=self.dvm_config)\n print(\"Paying: \" + user.name)\n bolt11 = zaprequest(user.lud16, amount, \"Zap\", nostr_event, self.keys,\n self.dvm_config,\n \"private\")\n if bolt11 == None:\n print(\"Receiver has no Lightning address\")\n return\n try:\n print(bolt11)\n payment_hash = pay_bolt11_ln_bits(bolt11, self.dvm_config)\n self.job_list[self.job_list.index(entry)]['is_paid'] = True\n print(\"[\" + self.NAME + \"] payment_hash: \" + payment_hash +\n \" Forwarding payment of \" + str(amount) + \" Sats to DVM\")\n except Exception as e:\n print(e)\n\n\n except Exception as e:\n print(e)\n\n def handle_nip90_response_event(nostr_event: Event):\n try:\n ptag = \"\"\n etag = \"\"\n is_encrypted = False\n for tag in nostr_event.tags():\n if tag.as_vec()[0] == \"e\":\n etag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"p\":\n ptag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"encrypted\":\n is_encrypted = True\n\n entry = next((x for x in self.job_list if x['event_id'] == etag), None)\n if entry is not None and entry[\n 'dvm_key'] == nostr_event.pubkey().to_hex():\n print(entry)\n user = get_or_add_user(db=self.dvm_config.DB, npub=entry['npub'],\n client=self.client, config=self.dvm_config)\n\n self.job_list.remove(entry)\n content = nostr_event.content()\n if is_encrypted:\n if ptag == self.keys.public_key().to_hex():\n content = nip04_decrypt(self.keys.secret_key(), nostr_event.pubkey(), content)\n else:\n return\n\n dvms = [x for x in self.dvm_config.SUPPORTED_DVMS if\n x.PUBLIC_KEY == nostr_event.pubkey().to_hex() and x.KIND == nostr_event.kind() - 1000]\n if len(dvms) > 0:\n dvm = dvms[0]\n if dvm.dvm_config.EXTERNAL_POST_PROCESS_TYPE != PostProcessFunctionType.NONE:\n if dvm.dvm_config.EXTERNAL_POST_PROCESS_TYPE == PostProcessFunctionType.LIST_TO_EVENTS:\n content = post_process_list_to_events(content)\n elif dvm.dvm_config.EXTERNAL_POST_PROCESS_TYPE == PostProcessFunctionType.LIST_TO_USERS:\n content = post_process_list_to_users(content)\n\n print(\"[\" + self.NAME + \"] Received results, message to orignal sender \" + user.name)\n time.sleep(1.0)\n reply_event = EventBuilder.new_encrypted_direct_msg(self.keys,\n PublicKey.from_hex(user.npub),\n content,\n None).to_event(self.keys)\n send_event(reply_event, client=self.client, dvm_config=dvm_config)\n\n except Exception as e:\n print(e)\n\n def handle_zap(zap_event):\n print(\"[\" + self.NAME + \"] Zap received\")\n try:\n invoice_amount, zapped_event, sender, message, anon = parse_zap_event_tags(zap_event,\n self.keys, self.NAME,\n self.client, self.dvm_config)\n\n etag = \"\"\n for tag in zap_event.tags():\n if tag.as_vec()[0] == \"e\":\n etag = tag.as_vec()[1]\n\n user = get_or_add_user(self.dvm_config.DB, sender, client=self.client, config=self.dvm_config)\n\n entry = next((x for x in self.job_list if x['event_id'] == etag), None)\n print(entry)\n # print(entry['dvm_key'])\n # print(str(zapped_event.pubkey().to_hex()))\n # print(str(zap_event.pubkey().to_hex()))\n print(sender)\n if entry is not None and entry['is_paid'] is True and entry['dvm_key'] == sender:\n # if we get a bolt11, we pay and move on\n user = get_or_add_user(db=self.dvm_config.DB, npub=entry[\"npub\"],\n client=self.client, config=self.dvm_config)\n\n sender = user.npub\n\n if zapped_event is not None:\n if not anon:\n print(\"[\" + self.NAME + \"] Note Zap received for Bot balance: \" + str(\n invoice_amount) + \" Sats from \" + str(\n user.name))\n update_user_balance(self.dvm_config.DB, sender, invoice_amount, client=self.client,\n config=self.dvm_config)\n\n # a regular note\n elif not anon:\n print(\"[\" + self.NAME + \"] Profile Zap received for Bot balance: \" + str(\n invoice_amount) + \" Sats from \" + str(\n user.name))\n update_user_balance(self.dvm_config.DB, sender, invoice_amount, client=self.client,\n config=self.dvm_config)\n\n except Exception as e:\n print(\"[\" + self.NAME + \"] Error during content decryption:\" + str(e))\n\n def answer_overview(nostr_event):\n message = \"DVMs that I support:\\n\\n\"\n index = 1\n for p in self.dvm_config.SUPPORTED_DVMS:\n if p.PER_UNIT_COST != 0 and p.PER_UNIT_COST is not None:\n message += (str(index) + \" \" + p.NAME + \" \" + p.TASK + \"\\n\\t\" + str(p.FIX_COST) +\n \" Sats + \" + str(p.PER_UNIT_COST) + \" Sats per Second\\n\\n\")\n else:\n message += (str(index) + \" \" + p.NAME + \" \" + p.TASK + \"\\n\\t\" + str(p.FIX_COST) +\n \" Sats\\n\\n\")\n index += 1\n\n time.sleep(3.0)\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n message + \"\\nSelect an Index and provide an input (\"\n \"e.g. \\\"2 A purple ostrich\\\")\\nType \\\"index info\\\" to learn \"\n \"more about each DVM. (e.g. \\\"2 info\\\")\\n\\n\"\n \"Type \\\"balance\\\" to see your current balance\",\n nostr_event.id()).to_event(self.keys)\n\n send_event(evt, client=self.client, dvm_config=dvm_config)\n\n def answer_blacklisted(nostr_event):\n # For some reason an admin might blacklist npubs, e.g. for abusing the service\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n \"Your are currently blocked from all \"\n \"services.\", None).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=dvm_config)\n\n def answer_nip89(nostr_event, index):\n info = print_dvm_info(self.client, index)\n time.sleep(2.0)\n if info is not None:\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n info, None).to_event(self.keys)\n else:\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n \"No NIP89 Info found for \" +\n self.dvm_config.SUPPORTED_DVMS[index].NAME,\n None).to_event(self.keys)\n\n send_event(evt, client=self.client, dvm_config=dvm_config)\n\n def build_params(decrypted_text, nostr_event, index):\n tags = []\n splitzero = decrypted_text.split(' -')\n split = splitzero[0].split(' ')\n # If only a command without parameters is sent, we assume no input is required, and that means the dvm might take in the user as input (e.g. for content discovery)\n if len(split) == 1:\n remaining_text = decrypted_text.replace(split[0], \"\")\n params = remaining_text.split(\" -\")\n tag = Tag.parse([\"param\", \"user\", nostr_event.pubkey().to_hex()])\n tags.append(tag)\n for i in params:\n print(i)\n if i != \" \":\n try:\n split = i.split(\" \")\n if len(split) > 1:\n param = str(split[0])\n print(str(param))\n value = str(split[1])\n print(str(value))\n if param == \"cashu\":\n tag = Tag.parse([param, value])\n else:\n if param == \"user\":\n if value.startswith(\"@\") or value.startswith(\"nostr:\") or value.startswith(\n \"npub\"):\n value = PublicKey.from_bech32(\n value.replace(\"@\", \"\").replace(\"nostr:\", \"\")).to_hex()\n tag = Tag.parse([\"param\", param, value])\n tags.append(tag)\n except Exception as e:\n print(e)\n print(\"Couldn't add \" + str(i))\n output = Tag.parse([\"output\", \"text/plain\"])\n tags.append(output)\n relay_list = [\"relays\"]\n for relay in self.dvm_config.RELAY_LIST:\n relay_list.append(relay)\n relays = Tag.parse(relay_list)\n tags.append(relays)\n\n return tags\n\n tags = []\n command = decrypted_text.replace(split[0] + \" \", \"\")\n split = command.split(\" -\")\n input = split[0].rstrip()\n if input.startswith(\"http\"):\n temp = input.split(\" \")\n if len(temp) > 1:\n input_type = \"url\"\n i_tag1 = Tag.parse([\"i\", temp[0], input_type])\n tags.append(i_tag1)\n input_type = \"text\"\n i_tag2 = Tag.parse([\"i\", input.replace(temp[0], \"\").lstrip(), input_type])\n tags.append(i_tag2)\n else:\n input_type = \"url\"\n i_tag = Tag.parse([\"i\", input, input_type])\n tags.append(i_tag)\n elif (input.startswith(\"nevent\") or input.startswith(\"nostr:nevent\") or input.startswith(\"note\") or\n input.startswith(\"nostr:note\")):\n input_type = \"event\"\n if str(input).startswith('note'):\n event_id = EventId.from_bech32(input)\n elif str(input).startswith(\"nevent\"):\n event_id = Nip19Event.from_bech32(input).event_id()\n elif str(input).startswith('nostr:note'):\n event_id = EventId.from_nostr_uri(input)\n elif str(input).startswith(\"nostr:nevent\"):\n event_id = Nip19Event.from_nostr_uri(input).event_id()\n else:\n event_id = EventId.from_hex(input)\n i_tag = Tag.parse([\"i\", event_id.to_hex(), input_type])\n tags.append(i_tag)\n else:\n print(input)\n input_type = \"text\"\n i_tag = Tag.parse([\"i\", input, input_type])\n tags.append(i_tag)\n\n alt_tag = Tag.parse([\"alt\", self.dvm_config.SUPPORTED_DVMS[index].TASK])\n tags.append(alt_tag)\n relaylist = [\"relays\"]\n for relay in self.dvm_config.RELAY_LIST:\n relaylist.append(relay)\n relays_tag = Tag.parse(relaylist)\n tags.append(relays_tag)\n output_tag = Tag.parse([\"output\", \"text/plain\"])\n tags.append(output_tag)\n remaining_text = command.replace(input, \"\")\n print(remaining_text)\n\n params = remaining_text.rstrip().split(\" -\")\n\n for i in params:\n print(i)\n if i != \" \":\n try:\n split = i.split(\" \")\n if len(split) > 1:\n param = str(split[0])\n print(str(param))\n value = str(split[1])\n print(str(value))\n if param == \"cashu\":\n tag = Tag.parse([param, value])\n else:\n if param == \"user\":\n if value.startswith(\"@\") or value.startswith(\"nostr:\") or value.startswith(\"npub\"):\n value = PublicKey.from_bech32(\n value.replace(\"@\", \"\").replace(\"nostr:\", \"\")).to_hex()\n tag = Tag.parse([\"param\", param, value])\n tags.append(tag)\n print(\"Added params: \" + str(tag.as_vec()))\n except Exception as e:\n print(e)\n print(\"Couldn't add \" + str(i))\n\n return tags\n\n def print_dvm_info(client, index):\n pubkey = self.dvm_config.SUPPORTED_DVMS[index].dvm_config.PUBLIC_KEY\n kind = self.dvm_config.SUPPORTED_DVMS[index].KIND\n nip89content_str = nip89_fetch_events_pubkey(client, pubkey, kind)\n print(nip89content_str)\n if nip89content_str is not None:\n nip89content = json.loads(nip89content_str)\n info = \"\"\n cashu_accepted = False\n encryption_supported = False\n\n if nip89content.get(\"name\"):\n info += \"Name: \" + nip89content.get(\"name\") + \"\\n\"\n if nip89content.get(\"image\"):\n info += nip89content.get(\"image\") + \"\\n\"\n if nip89content.get(\"about\"):\n info += \"About:\\n\" + nip89content.get(\"about\") + \"\\n\\n\"\n if nip89content.get(\"cashuAccepted\"):\n cashu_accepted = str(nip89content.get(\"cashuAccepted\"))\n if nip89content.get(\"encryptionSupported\"):\n encryption_supported = str(nip89content.get(\"encryptionSupported\"))\n\n info += \"Encryption supported: \" + str(encryption_supported) + \"\\n\"\n info += \"Cashu accepted: \" + str(cashu_accepted) + \"\\n\\n\"\n if nip89content.get(\"nip90Params\"):\n params = nip89content[\"nip90Params\"]\n info += \"\\nParameters:\\n\"\n for param in params:\n info += \"-\" + param + '\\n'\n info += \"Required: \" + str(params[param]['required']) + '\\n'\n info += \"Possible Values: \" + json.dumps(params[param]['values']) + '\\n\\n'\n return info\n\n return None\n\n self.client.handle_notifications(NotificationHandler())\n\n try:\n while True:\n time.sleep(1.0)\n except KeyboardInterrupt:\n print('Stay weird!')\n os.kill(os.getpid(), signal.SIGTERM)" }, { "identifier": "videogeneration_replicate_svd", "path": "nostr_dvm/tasks/videogeneration_replicate_svd.py", "snippet": "class VideoGenerationReplicateSVD(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_VIDEO\n TASK: str = \"image-to-video\"\n FIX_COST: float = 120\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "imagegeneration_replicate_sdxl", "path": "nostr_dvm/tasks/imagegeneration_replicate_sdxl.py", "snippet": "class ImageGenerationReplicateSDXL(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE\n TASK: str = \"text-to-image\"\n FIX_COST: float = 120\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "textgeneration_llmlite", "path": "nostr_dvm/tasks/textgeneration_llmlite.py", "snippet": "class TextGenerationLLMLite(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_TEXT\n TASK: str = \"text-to-text\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "trending_notes_nostrband", "path": "nostr_dvm/tasks/trending_notes_nostrband.py", "snippet": "class TrendingNotesNostrBand(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_CONTENT_DISCOVERY\n TASK: str = \"trending-content\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\n def post_process(self, result, event):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "discovery_inactive_follows", "path": "nostr_dvm/tasks/discovery_inactive_follows.py", "snippet": "class DiscoverInactiveFollows(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_PEOPLE_DISCOVERY\n TASK: str = \"inactive-follows\"\n FIX_COST: float = 50\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\n def scanList(users, instance, i, st, notactivesince):\n def post_process(self, result, event):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "translation_google", "path": "nostr_dvm/tasks/translation_google.py", "snippet": "class TranslationGoogle(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_TRANSLATE_TEXT\n TASK: str = \"translation\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "textextraction_pdf", "path": "nostr_dvm/tasks/textextraction_pdf.py", "snippet": "class TextExtractionPDF(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_EXTRACT_TEXT\n TASK: str = \"pdf-to-text\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "translation_libretranslate", "path": "nostr_dvm/tasks/translation_libretranslate.py", "snippet": "class TranslationLibre(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_TRANSLATE_TEXT\n TASK: str = \"translation\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None, task=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "textextraction_google", "path": "nostr_dvm/tasks/textextraction_google.py", "snippet": "class SpeechToTextGoogle(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_EXTRACT_TEXT\n TASK: str = \"speech-to-text\"\n FIX_COST: float = 10\n PER_UNIT_COST: float = 0.1\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "convert_media", "path": "nostr_dvm/tasks/convert_media.py", "snippet": "class MediaConverter(DVMTaskInterface):\n KIND = EventDefinitions.KIND_NIP90_CONVERT_VIDEO\n TASK = \"convert\"\n FIX_COST = 20\n PER_UNIT_COST = 0.1\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "imagegeneration_openai_dalle", "path": "nostr_dvm/tasks/imagegeneration_openai_dalle.py", "snippet": "class ImageGenerationDALLE(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE\n TASK: str = \"text-to-image\"\n FIX_COST: float = 120\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "texttospeech", "path": "nostr_dvm/tasks/texttospeech.py", "snippet": "class TextToSpeech(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_TEXT_TO_SPEECH\n TASK: str = \"text-to-speech\"\n FIX_COST: float = 50\n PER_UNIT_COST = 0.5\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "imagegeneration_sd21_mlx", "path": "nostr_dvm/tasks/imagegeneration_sd21_mlx.py", "snippet": "class ImageGenerationMLX(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE\n TASK: str = \"text-to-image\"\n FIX_COST: float = 120\n B, H, W, C = x.shape\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "advanced_search", "path": "nostr_dvm/tasks/advanced_search.py", "snippet": "class AdvancedSearch(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_CONTENT_SEARCH\n TASK: str = \"search-content\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\n def post_process(self, result, event):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "textgeneration_huggingchat", "path": "nostr_dvm/tasks/textgeneration_huggingchat.py", "snippet": "class TextGenerationHuggingChat(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_TEXT\n TASK: str = \"text-to-text\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "summarization_huggingchat", "path": "nostr_dvm/tasks/summarization_huggingchat.py", "snippet": "class TextSummarizationHuggingChat(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_SUMMARIZE_TEXT\n TASK: str = \"summarization\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "AdminConfig", "path": "nostr_dvm/utils/admin_utils.py", "snippet": "class AdminConfig:\n REBROADCAST_NIP89: bool = False\n UPDATE_PROFILE: bool = False\n DELETE_NIP89: bool = False\n WHITELISTUSER: bool = False\n UNWHITELISTUSER: bool = False\n BLACKLISTUSER: bool = False\n DELETEUSER: bool = False\n LISTDATABASE: bool = False\n ClEANDB: bool = False\n\n USERNPUB: str = \"\"\n LUD16: str = \"\"\n\n EVENTID: str = \"\"\n PRIVKEY: str = \"\"" }, { "identifier": "keep_alive", "path": "nostr_dvm/utils/backend_utils.py", "snippet": "def keep_alive():\n try:\n while True:\n time.sleep(10)\n except KeyboardInterrupt:\n os.kill(os.getpid(), signal.SIGKILL)\n exit(1)" }, { "identifier": "EventDefinitions", "path": "nostr_dvm/utils/definitions.py", "snippet": "class EventDefinitions:\n KIND_DM = 4\n KIND_ZAP = 9735\n KIND_ANNOUNCEMENT = 31990\n KIND_NIP94_METADATA = 1063\n KIND_FEEDBACK = 7000\n KIND_NIP90_EXTRACT_TEXT = 5000\n KIND_NIP90_RESULT_EXTRACT_TEXT = KIND_NIP90_EXTRACT_TEXT + 1000\n KIND_NIP90_SUMMARIZE_TEXT = 5001\n KIND_NIP90_RESULT_SUMMARIZE_TEXT = KIND_NIP90_SUMMARIZE_TEXT + 1000\n KIND_NIP90_TRANSLATE_TEXT = 5002\n KIND_NIP90_RESULT_TRANSLATE_TEXT = KIND_NIP90_TRANSLATE_TEXT + 1000\n KIND_NIP90_GENERATE_TEXT = 5050\n KIND_NIP90_RESULT_GENERATE_TEXT = KIND_NIP90_GENERATE_TEXT + 1000\n KIND_NIP90_GENERATE_IMAGE = 5100\n KIND_NIP90_RESULT_GENERATE_IMAGE = KIND_NIP90_GENERATE_IMAGE + 1000\n KIND_NIP90_CONVERT_VIDEO = 5200\n KIND_NIP90_RESULT_CONVERT_VIDEO = KIND_NIP90_CONVERT_VIDEO + 1000\n KIND_NIP90_GENERATE_VIDEO = 5202\n KIND_NIP90_TEXT_TO_SPEECH = 5250\n KIND_NIP90_RESULT_TEXT_TO_SPEECH = KIND_NIP90_TEXT_TO_SPEECH + 1000\n KIND_NIP90_RESULT_GENERATE_VIDEO = KIND_NIP90_GENERATE_VIDEO + 1000\n KIND_NIP90_CONTENT_DISCOVERY = 5300\n KIND_NIP90_RESULT_CONTENT_DISCOVERY = KIND_NIP90_CONTENT_DISCOVERY + 1000\n KIND_NIP90_PEOPLE_DISCOVERY = 5301\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY = KIND_NIP90_PEOPLE_DISCOVERY + 1000\n KIND_NIP90_CONTENT_SEARCH = 5302\n KIND_NIP90_RESULTS_CONTENT_SEARCH = KIND_NIP90_CONTENT_SEARCH + 1000\n KIND_NIP90_GENERIC = 5999\n KIND_NIP90_RESULT_GENERIC = KIND_NIP90_GENERIC + 1000\n ANY_RESULT = [KIND_NIP90_RESULT_EXTRACT_TEXT,\n KIND_NIP90_RESULT_SUMMARIZE_TEXT,\n KIND_NIP90_RESULT_TRANSLATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_IMAGE,\n KIND_NIP90_CONTENT_DISCOVERY,\n KIND_NIP90_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_CONVERT_VIDEO,\n KIND_NIP90_RESULT_CONTENT_DISCOVERY,\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_GENERATE_VIDEO,\n KIND_NIP90_RESULT_GENERIC]" }, { "identifier": "DVMConfig", "path": "nostr_dvm/utils/dvmconfig.py", "snippet": "class DVMConfig:\n SUPPORTED_DVMS = []\n PRIVATE_KEY: str = \"\"\n PUBLIC_KEY: str = \"\"\n FIX_COST: float = None\n PER_UNIT_COST: float = None\n\n RELAY_LIST = [\"wss://relay.damus.io\", \"wss://nostr-pub.wellorder.net\", \"wss://nos.lol\", \"wss://nostr.wine\",\n \"wss://nostr.mom\", \"wss://nostr.oxtr.dev\", \"wss://relay.nostr.bg\",\n \"wss://relay.f7z.io\", \"wss://pablof7z.nostr1.com\", \"wss://relay.nostr.net\", \"wss://140.f7z.io\",\n \"wss://relay.snort.social\", \"wss://offchain.pub/\", \"wss://relay.nostr.band\"]\n\n RELAY_TIMEOUT = 5\n EXTERNAL_POST_PROCESS_TYPE = PostProcessFunctionType.NONE # Leave this on None, except the DVM is external\n LNBITS_INVOICE_KEY = '' # Will all automatically generated by default, or read from .env\n LNBITS_ADMIN_KEY = '' # In order to pay invoices, e.g. from the bot to DVMs, or reimburse users.\n LNBITS_URL = 'https://lnbits.com'\n LN_ADDRESS = ''\n SCRIPT = ''\n IDENTIFIER = ''\n USE_OWN_VENV = True # Make an own venv for each dvm's process function.Disable if you want to install packages into main venv. Only recommended if you dont want to run dvms with different dependency versions\n DB: str\n NEW_USER_BALANCE: int = 0 # Free credits for new users\n NIP89: NIP89Config\n SHOW_RESULT_BEFORE_PAYMENT: bool = False # if this is true show results even when not paid right after autoprocess" }, { "identifier": "build_external_dvm", "path": "nostr_dvm/utils/external_dvm_utils.py", "snippet": "def build_external_dvm(pubkey, task, kind, fix_cost, per_unit_cost, config,\n external_post_process=PostProcessFunctionType.NONE):\n dvm_config = DVMConfig()\n dvm_config.PUBLIC_KEY = PublicKey.from_hex(pubkey).to_hex()\n dvm_config.FIX_COST = fix_cost\n dvm_config.PER_UNIT_COST = per_unit_cost\n dvm_config.EXTERNAL_POST_PROCESS_TYPE = external_post_process\n\n opts = (Options().wait_for_send(True).send_timeout(timedelta(seconds=config.RELAY_TIMEOUT))\n .skip_disconnected_relays(True))\n keys = Keys.from_sk_str(config.PRIVATE_KEY)\n signer = ClientSigner.keys(keys)\n client = Client.with_opts(signer, opts)\n\n\n for relay in config.RELAY_LIST:\n client.add_relay(relay)\n client.connect()\n\n nip89content_str = nip89_fetch_events_pubkey(client, pubkey, kind)\n name = \"External DVM\"\n image = \"https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg\"\n about = \"An External DVM with no info\"\n nip90params = {}\n encryption_supported = False\n cashu_accepted = False\n\n if nip89content_str is not None:\n print(nip89content_str)\n nip89content = json.loads(nip89content_str)\n if nip89content.get(\"name\"):\n name = nip89content.get(\"name\")\n if nip89content.get(\"image\"):\n image = nip89content.get(\"image\")\n if nip89content.get(\"about\"):\n about = nip89content.get(\"about\")\n if nip89content.get(\"nip90Params\"):\n nip90params = nip89content[\"nip90Params\"]\n if nip89content.get(\"encryptionSupported\"):\n encryption_supported = nip89content[\"encryptionSupported\"]\n if nip89content.get(\"cashuAccepted\"):\n cashu_accepted = nip89content[\"cashuAccepted\"]\n else:\n print(\"No NIP89 set for \"+ name)\n nip89info = {\n \"name\": name,\n \"image\": image,\n \"about\": about,\n \"encryptionSupported\": encryption_supported,\n \"cashuAccepted\": cashu_accepted,\n \"nip90Params\": nip90params\n }\n nip89config = NIP89Config()\n nip89config.KIND = kind\n nip89config.CONTENT = json.dumps(nip89info)\n\n interface = DVMTaskInterface(name=name, dvm_config=dvm_config, nip89config=nip89config, task=task)\n interface.SUPPORTS_ENCRYPTION = encryption_supported\n interface.ACCEPTS_CASHU = cashu_accepted\n\n return interface" }, { "identifier": "check_and_set_private_key", "path": "nostr_dvm/utils/nostr_utils.py", "snippet": "def check_and_set_private_key(identifier):\n if not os.getenv(\"DVM_PRIVATE_KEY_\" + identifier.upper()):\n pk = Keys.generate().secret_key().to_hex()\n add_pk_to_env_file(\"DVM_PRIVATE_KEY_\" + identifier.upper(), pk)\n return pk\n else:\n return os.getenv(\"DVM_PRIVATE_KEY_\" + identifier.upper())" }, { "identifier": "PostProcessFunctionType", "path": "nostr_dvm/utils/output_utils.py", "snippet": "class PostProcessFunctionType:\n NONE = 0\n LIST_TO_USERS = 1\n LIST_TO_EVENTS = 2" }, { "identifier": "check_and_set_ln_bits_keys", "path": "nostr_dvm/utils/zap_utils.py", "snippet": "def check_and_set_ln_bits_keys(identifier, npub):\n if not os.getenv(\"LNBITS_INVOICE_KEY_\" + identifier.upper()):\n invoicekey, adminkey, walletid, userid, success = create_lnbits_account(identifier)\n add_key_to_env_file(\"LNBITS_INVOICE_KEY_\" + identifier.upper(), invoicekey)\n add_key_to_env_file(\"LNBITS_ADMIN_KEY_\" + identifier.upper(), adminkey)\n add_key_to_env_file(\"LNBITS_USER_ID_\" + identifier.upper(), userid)\n add_key_to_env_file(\"LNBITS_WALLET_ID_\" + identifier.upper(), userid)\n\n lnaddress = \"\"\n pin = \"\"\n if os.getenv(\"NOSTDRESS_DOMAIN\") and success != \"failed\":\n print(os.getenv(\"NOSTDRESS_DOMAIN\"))\n lnaddress, pin = make_ln_address_nostdress(identifier, npub, \" \", os.getenv(\"NOSTDRESS_DOMAIN\"))\n add_key_to_env_file(\"LNADDRESS_\" + identifier.upper(), lnaddress)\n add_key_to_env_file(\"LNADDRESS_PIN_\" + identifier.upper(), pin)\n\n return invoicekey, adminkey, userid, walletid, lnaddress\n else:\n return (os.getenv(\"LNBITS_INVOICE_KEY_\" + identifier.upper()),\n os.getenv(\"LNBITS_ADMIN_KEY_\" + identifier.upper()),\n os.getenv(\"LNBITS_USER_ID_\" + identifier.upper()),\n os.getenv(\"LNBITS_WALLET_ID_\" + identifier.upper()),\n os.getenv(\"LNADDRESS_\" + identifier.upper()))" } ]
import os import dotenv from pathlib import Path from sys import platform from nostr_dvm.bot import Bot from nostr_dvm.tasks import videogeneration_replicate_svd, imagegeneration_replicate_sdxl, textgeneration_llmlite, \ trending_notes_nostrband, discovery_inactive_follows, translation_google, textextraction_pdf, \ translation_libretranslate, textextraction_google, convert_media, imagegeneration_openai_dalle, texttospeech, \ imagegeneration_sd21_mlx, advanced_search, textgeneration_huggingchat, summarization_huggingchat from nostr_dvm.utils.admin_utils import AdminConfig from nostr_dvm.utils.backend_utils import keep_alive from nostr_dvm.utils.definitions import EventDefinitions from nostr_dvm.utils.dvmconfig import DVMConfig from nostr_dvm.utils.external_dvm_utils import build_external_dvm from nostr_dvm.utils.nostr_utils import check_and_set_private_key from nostr_dvm.utils.output_utils import PostProcessFunctionType from nostr_dvm.utils.zap_utils import check_and_set_ln_bits_keys from nostr_sdk import Keys
13,799
# We will run an optional bot that can communicate with the DVMs # Note this is very basic for now and still under development bot_config = DVMConfig() bot_config.PRIVATE_KEY = check_and_set_private_key("bot") npub = Keys.from_sk_str(bot_config.PRIVATE_KEY).public_key().to_bech32() invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys("bot", npub) bot_config.LNBITS_INVOICE_KEY = invoice_key bot_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back bot_config.LNBITS_URL = os.getenv("LNBITS_HOST") # Generate an optional Admin Config, in this case, whenever we give our DVMs this config, they will (re)broadcast # their NIP89 announcement # You can create individual admins configs and hand them over when initializing the dvm, # for example to whilelist users or add to their balance. # If you use this global config, options will be set for all dvms that use it. admin_config = AdminConfig() admin_config.REBROADCAST_NIP89 = False admin_config.LUD16 = lnaddress # Set rebroadcast to true once you have set your NIP89 descriptions and d tags. You only need to rebroadcast once you # want to update your NIP89 descriptions # Update the DVMs (not the bot) profile. For example after you updated the NIP89 or the lnaddress, you can automatically update profiles here. admin_config.UPDATE_PROFILE = False # Spawn some DVMs in the playground and run them # You can add arbitrary DVMs there and instantiate them here # Spawn DVM1 Kind 5000: A local Text Extractor from PDFs pdfextractor = textextraction_pdf.build_example("PDF Extractor", "pdf_extractor", admin_config) # If we don't add it to the bot, the bot will not provide access to the DVM pdfextractor.run() # Spawn DVM2 Kind 5002 Local Text TranslationGoogle, calling the free Google API. translator = translation_google.build_example("Google Translator", "google_translator", admin_config) bot_config.SUPPORTED_DVMS.append(translator) # We add translator to the bot translator.run() # Spawn DVM3 Kind 5002 Local Text TranslationLibre, calling the free LibreTranslateApi, as an alternative. # This will only run and appear on the bot if an endpoint is set in the .env if os.getenv("LIBRE_TRANSLATE_ENDPOINT") is not None and os.getenv("LIBRE_TRANSLATE_ENDPOINT") != "": libre_translator = translation_libretranslate.build_example("Libre Translator", "libre_translator", admin_config) bot_config.SUPPORTED_DVMS.append(libre_translator) # We add translator to the bot libre_translator.run() # Spawn DVM4, this one requires an OPENAI API Key and balance with OpenAI, you will move the task to them and pay # per call. Make sure you have enough balance and the DVM's cost is set higher than what you pay yourself, except, you know, # you're being generous. if os.getenv("OPENAI_API_KEY") is not None and os.getenv("OPENAI_API_KEY") != "": dalle = imagegeneration_openai_dalle.build_example("Dall-E 3", "dalle3", admin_config) bot_config.SUPPORTED_DVMS.append(dalle) dalle.run() if os.getenv("REPLICATE_API_TOKEN") is not None and os.getenv("REPLICATE_API_TOKEN") != "": sdxlreplicate = imagegeneration_replicate_sdxl.build_example("Stable Diffusion XL", "replicate_sdxl", admin_config) bot_config.SUPPORTED_DVMS.append(sdxlreplicate) sdxlreplicate.run() if os.getenv("REPLICATE_API_TOKEN") is not None and os.getenv("REPLICATE_API_TOKEN") != "": svdreplicate = videogeneration_replicate_svd.build_example("Stable Video Diffusion", "replicate_svd", admin_config) bot_config.SUPPORTED_DVMS.append(svdreplicate) svdreplicate.run() #Let's define a function so we can add external DVMs to our bot, we will instanciate it afterwards # Spawn DVM5.. oh wait, actually we don't spawn a new DVM, we use the dvmtaskinterface to define an external dvm by providing some info about it, such as # their pubkey, a name, task, kind etc. (unencrypted) tasktiger_external = build_external_dvm(pubkey="d483935d6bfcef3645195c04c97bbb70aedb6e65665c5ea83e562ca3c7acb978", task="text-to-image", kind=EventDefinitions.KIND_NIP90_GENERATE_IMAGE, fix_cost=80, per_unit_cost=0, config=bot_config) bot_config.SUPPORTED_DVMS.append(tasktiger_external) # Don't run it, it's on someone else's machine, and we simply make the bot aware of it. # DVM: 6 Another external dvm for recommendations: ymhm_external = build_external_dvm(pubkey="6b37d5dc88c1cbd32d75b713f6d4c2f7766276f51c9337af9d32c8d715cc1b93", task="content-discovery", kind=EventDefinitions.KIND_NIP90_CONTENT_DISCOVERY, fix_cost=0, per_unit_cost=0, external_post_process=PostProcessFunctionType.LIST_TO_EVENTS, config=bot_config) # If we get back a list of people or events, we can post-process it to make it readable in social clients bot_config.SUPPORTED_DVMS.append(ymhm_external) # Spawn DVM 7 Find inactive followers googleextractor = textextraction_google.build_example("Extractor", "speech_recognition", admin_config) bot_config.SUPPORTED_DVMS.append(googleextractor) googleextractor.run() # Spawn DVM 8 A Media Grabber/Converter media_bringer = convert_media.build_example("Media Bringer", "media_converter", admin_config) bot_config.SUPPORTED_DVMS.append(media_bringer) media_bringer.run() # Spawn DVM9 Find inactive followers discover_inactive = discovery_inactive_follows.build_example("Bygones", "discovery_inactive_follows", admin_config) bot_config.SUPPORTED_DVMS.append(discover_inactive) discover_inactive.run() trending = trending_notes_nostrband.build_example("Trending Notes on nostr.band", "trending_notes_nostrband", admin_config) bot_config.SUPPORTED_DVMS.append(trending) trending.run() ollama = textgeneration_llmlite.build_example("LLM", "llmlite", admin_config) bot_config.SUPPORTED_DVMS.append(ollama) ollama.run() tts = texttospeech.build_example("Text To Speech Test", "tts", admin_config) bot_config.SUPPORTED_DVMS.append(tts) tts.run()
def playground(): # We will run an optional bot that can communicate with the DVMs # Note this is very basic for now and still under development bot_config = DVMConfig() bot_config.PRIVATE_KEY = check_and_set_private_key("bot") npub = Keys.from_sk_str(bot_config.PRIVATE_KEY).public_key().to_bech32() invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys("bot", npub) bot_config.LNBITS_INVOICE_KEY = invoice_key bot_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back bot_config.LNBITS_URL = os.getenv("LNBITS_HOST") # Generate an optional Admin Config, in this case, whenever we give our DVMs this config, they will (re)broadcast # their NIP89 announcement # You can create individual admins configs and hand them over when initializing the dvm, # for example to whilelist users or add to their balance. # If you use this global config, options will be set for all dvms that use it. admin_config = AdminConfig() admin_config.REBROADCAST_NIP89 = False admin_config.LUD16 = lnaddress # Set rebroadcast to true once you have set your NIP89 descriptions and d tags. You only need to rebroadcast once you # want to update your NIP89 descriptions # Update the DVMs (not the bot) profile. For example after you updated the NIP89 or the lnaddress, you can automatically update profiles here. admin_config.UPDATE_PROFILE = False # Spawn some DVMs in the playground and run them # You can add arbitrary DVMs there and instantiate them here # Spawn DVM1 Kind 5000: A local Text Extractor from PDFs pdfextractor = textextraction_pdf.build_example("PDF Extractor", "pdf_extractor", admin_config) # If we don't add it to the bot, the bot will not provide access to the DVM pdfextractor.run() # Spawn DVM2 Kind 5002 Local Text TranslationGoogle, calling the free Google API. translator = translation_google.build_example("Google Translator", "google_translator", admin_config) bot_config.SUPPORTED_DVMS.append(translator) # We add translator to the bot translator.run() # Spawn DVM3 Kind 5002 Local Text TranslationLibre, calling the free LibreTranslateApi, as an alternative. # This will only run and appear on the bot if an endpoint is set in the .env if os.getenv("LIBRE_TRANSLATE_ENDPOINT") is not None and os.getenv("LIBRE_TRANSLATE_ENDPOINT") != "": libre_translator = translation_libretranslate.build_example("Libre Translator", "libre_translator", admin_config) bot_config.SUPPORTED_DVMS.append(libre_translator) # We add translator to the bot libre_translator.run() # Spawn DVM4, this one requires an OPENAI API Key and balance with OpenAI, you will move the task to them and pay # per call. Make sure you have enough balance and the DVM's cost is set higher than what you pay yourself, except, you know, # you're being generous. if os.getenv("OPENAI_API_KEY") is not None and os.getenv("OPENAI_API_KEY") != "": dalle = imagegeneration_openai_dalle.build_example("Dall-E 3", "dalle3", admin_config) bot_config.SUPPORTED_DVMS.append(dalle) dalle.run() if os.getenv("REPLICATE_API_TOKEN") is not None and os.getenv("REPLICATE_API_TOKEN") != "": sdxlreplicate = imagegeneration_replicate_sdxl.build_example("Stable Diffusion XL", "replicate_sdxl", admin_config) bot_config.SUPPORTED_DVMS.append(sdxlreplicate) sdxlreplicate.run() if os.getenv("REPLICATE_API_TOKEN") is not None and os.getenv("REPLICATE_API_TOKEN") != "": svdreplicate = videogeneration_replicate_svd.build_example("Stable Video Diffusion", "replicate_svd", admin_config) bot_config.SUPPORTED_DVMS.append(svdreplicate) svdreplicate.run() #Let's define a function so we can add external DVMs to our bot, we will instanciate it afterwards # Spawn DVM5.. oh wait, actually we don't spawn a new DVM, we use the dvmtaskinterface to define an external dvm by providing some info about it, such as # their pubkey, a name, task, kind etc. (unencrypted) tasktiger_external = build_external_dvm(pubkey="d483935d6bfcef3645195c04c97bbb70aedb6e65665c5ea83e562ca3c7acb978", task="text-to-image", kind=EventDefinitions.KIND_NIP90_GENERATE_IMAGE, fix_cost=80, per_unit_cost=0, config=bot_config) bot_config.SUPPORTED_DVMS.append(tasktiger_external) # Don't run it, it's on someone else's machine, and we simply make the bot aware of it. # DVM: 6 Another external dvm for recommendations: ymhm_external = build_external_dvm(pubkey="6b37d5dc88c1cbd32d75b713f6d4c2f7766276f51c9337af9d32c8d715cc1b93", task="content-discovery", kind=EventDefinitions.KIND_NIP90_CONTENT_DISCOVERY, fix_cost=0, per_unit_cost=0, external_post_process=PostProcessFunctionType.LIST_TO_EVENTS, config=bot_config) # If we get back a list of people or events, we can post-process it to make it readable in social clients bot_config.SUPPORTED_DVMS.append(ymhm_external) # Spawn DVM 7 Find inactive followers googleextractor = textextraction_google.build_example("Extractor", "speech_recognition", admin_config) bot_config.SUPPORTED_DVMS.append(googleextractor) googleextractor.run() # Spawn DVM 8 A Media Grabber/Converter media_bringer = convert_media.build_example("Media Bringer", "media_converter", admin_config) bot_config.SUPPORTED_DVMS.append(media_bringer) media_bringer.run() # Spawn DVM9 Find inactive followers discover_inactive = discovery_inactive_follows.build_example("Bygones", "discovery_inactive_follows", admin_config) bot_config.SUPPORTED_DVMS.append(discover_inactive) discover_inactive.run() trending = trending_notes_nostrband.build_example("Trending Notes on nostr.band", "trending_notes_nostrband", admin_config) bot_config.SUPPORTED_DVMS.append(trending) trending.run() ollama = textgeneration_llmlite.build_example("LLM", "llmlite", admin_config) bot_config.SUPPORTED_DVMS.append(ollama) ollama.run() tts = texttospeech.build_example("Text To Speech Test", "tts", admin_config) bot_config.SUPPORTED_DVMS.append(tts) tts.run()
search = advanced_search.build_example("Advanced Search", "discovery_content_search", admin_config)
14
2023-11-17 18:32:56+00:00
16k
IBM/oper8
tests/watch_manager/python_watch_manager/threads/test_watch_thread.py
[ { "identifier": "DryRunDeployManager", "path": "oper8/deploy_manager/dry_run_deploy_manager.py", "snippet": "class DryRunDeployManager(DeployManagerBase):\n \"\"\"\n Deploy manager which doesn't actually deploy!\n \"\"\"\n\n def __init__(self, resources=None, owner_cr=None, strict_resource_version=False):\n \"\"\"Construct with a static value to use for whether or not the functions\n should report change.\n \"\"\"\n self._owner_cr = owner_cr\n self._cluster_content = {}\n self.strict_resource_version = strict_resource_version\n\n # Dicts of registered watches and watchers\n self._watches = {}\n self._finalizers = {}\n\n # Deploy provided resources\n self._deploy(resources or [], call_watches=False, manage_owner_references=False)\n\n ## Interface ###############################################################\n\n def deploy(self, resource_definitions, manage_owner_references=True, **_):\n log.info(\"DRY RUN deploy\")\n return self._deploy(\n resource_definitions, manage_owner_references=manage_owner_references\n )\n\n def disable(self, resource_definitions):\n log.info(\"DRY RUN disable\")\n changed = False\n for resource in resource_definitions:\n api_version = resource.get(\"apiVersion\")\n kind = resource.get(\"kind\")\n name = resource.get(\"metadata\", {}).get(\"name\")\n namespace = resource.get(\"metadata\", {}).get(\"namespace\")\n _, content = self.get_object_current_state(\n kind=kind, api_version=api_version, namespace=namespace, name=name\n )\n if content is not None:\n changed = True\n\n # Set resource finalizers\n with DRY_RUN_CLUSTER_LOCK:\n self._cluster_content[namespace][kind][api_version][name][\n \"metadata\"\n ][\"deletionTimestamp\"] = datetime.now().strftime(\n \"%Y-%m-%dT%H:%M:%SZ\"\n )\n self._cluster_content[namespace][kind][api_version][name][\n \"metadata\"\n ][\"deletionGracePeriodSeconds\"] = 0\n\n # Call any registered finalizers\n for key, callback in self._get_registered_watches(\n api_version, kind, namespace, name, finalizer=True\n ):\n log.debug2(\n \"Calling registered finalizer [%s] for [%s]\", callback, key\n )\n callback(self._cluster_content[namespace][kind][api_version][name])\n\n # If finalizers have been cleared and object hasn't already been deleted then\n # remove the key\n current_obj = (\n self._cluster_content.get(namespace, {})\n .get(kind, {})\n .get(api_version, {})\n .get(name, {})\n )\n if current_obj and not current_obj.get(\"metadata\", {}).get(\n \"finalizers\", []\n ):\n with DRY_RUN_CLUSTER_LOCK:\n self._delete_key(namespace, kind, api_version, name)\n\n return True, changed\n\n def get_object_current_state(self, kind, name, namespace=None, api_version=None):\n log.info(\n \"DRY RUN get_object_current_state of [%s/%s] in [%s]\", kind, name, namespace\n )\n\n # Look in the cluster state\n matches = []\n kind_entries = self._cluster_content.get(namespace, {}).get(kind, {})\n log.debug3(\"Kind entries: %s\", kind_entries)\n for api_ver, entries in kind_entries.items():\n log.debug3(\"Checking api_version [%s // %s]\", api_ver, api_version)\n if name in entries and (api_ver == api_version or api_version is None):\n matches.append(entries[name])\n log.debug(\n \"Found %d matches for [%s/%s] in %s\", len(matches), kind, name, namespace\n )\n if len(matches) == 1:\n return True, copy.deepcopy(matches[0])\n return True, None\n\n def filter_objects_current_state(\n self,\n kind,\n namespace=None,\n api_version=None,\n label_selector=None,\n field_selector=None,\n ): # pylint: disable=too-many-arguments\n log.info(\n \"DRY RUN filter_objects_current_state of [%s] in [%s]\", kind, namespace\n )\n # Look in the cluster state\n matches = []\n kind_entries = self._cluster_content.get(namespace, {}).get(kind, {})\n log.debug3(\"Kind entries: %s\", kind_entries)\n for api_ver, entries in kind_entries.items():\n # Make sure api version matches\n log.debug3(\"Checking api_version [%s // %s]\", api_ver, api_version)\n if api_ver != api_version and api_version is not None:\n continue\n\n for resource in entries.values():\n # Make sure Labels Match\n log.debug3(\"Resource: %s\", resource)\n\n labels = resource.get(\"metadata\", {}).get(\"labels\", {})\n log.debug3(\"Checking label_selector [%s // %s]\", labels, label_selector)\n if label_selector is not None and not _match_selector(\n labels, label_selector\n ):\n continue\n\n # Only do the work for field selector if one exists\n log.debug3(\"Checking field_selector [%s]\", field_selector)\n if field_selector is not None and not _match_selector(\n _convert_dict_to_dot(resource),\n field_selector,\n ):\n continue\n\n # Add deep copy of entry to matches list\n matches.append(copy.deepcopy(resource))\n\n return True, matches\n\n def set_status(\n self,\n kind,\n name,\n namespace,\n status,\n api_version=None,\n ): # pylint: disable=too-many-arguments\n log.info(\n \"DRY RUN set_status of [%s.%s/%s] in %s: %s\",\n api_version,\n kind,\n name,\n namespace,\n status,\n )\n object_content = self.get_object_current_state(\n kind, name, namespace, api_version\n )[1]\n if object_content is None:\n log.debug(\"Did not find [%s/%s] in %s\", kind, name, namespace)\n return False, False\n prev_status = object_content.get(\"status\")\n object_content[\"status\"] = status\n self._deploy([object_content], call_watches=False)\n return True, prev_status != status\n\n def watch_objects( # pylint: disable=too-many-arguments,too-many-locals,unused-argument\n self,\n kind: str,\n api_version: Optional[str] = None,\n namespace: Optional[str] = None,\n name: Optional[str] = None,\n label_selector: Optional[str] = None,\n field_selector: Optional[str] = None,\n resource_version: Optional[str] = None,\n timeout: Optional[int] = 15,\n **kwargs,\n ) -> Iterator[KubeWatchEvent]:\n \"\"\"Watch the DryRunDeployManager for resource changes by registering\n callbacks\"\"\"\n\n event_queue = Queue()\n resource_map = {}\n\n def add_event(resource_map: dict, manifest: dict):\n \"\"\"Callback triggered when resources are deployed\"\"\"\n resource = ManagedObject(manifest)\n event_type = KubeEventType.ADDED\n\n watch_key = self._watch_key(\n api_version=resource.api_version,\n kind=resource.kind,\n namespace=resource.namespace,\n name=resource.name,\n )\n if watch_key in resource_map:\n log.debug4(\"Watch key detected, setting Modified event type\")\n event_type = KubeEventType.MODIFIED\n\n resource_map[watch_key] = resource\n event = KubeWatchEvent(\n type=event_type,\n resource=resource,\n )\n event_queue.put(event)\n\n def delete_event(resource_map: dict, manifest: dict):\n \"\"\"Callback triggered when resources are disabled\"\"\"\n resource = ManagedObject(manifest)\n watch_key = self._watch_key(\n api_version=resource.api_version,\n kind=resource.kind,\n namespace=resource.namespace,\n name=resource.name,\n )\n if watch_key in resource_map:\n del resource_map[watch_key]\n\n event = KubeWatchEvent(\n type=KubeEventType.DELETED,\n resource=resource,\n )\n event_queue.put(event)\n\n # Get initial resources\n _, manifests = self.filter_objects_current_state(\n kind=kind,\n api_version=api_version,\n namespace=namespace,\n label_selector=label_selector,\n field_selector=field_selector,\n )\n for manifest in manifests:\n resource = ManagedObject(manifest)\n watch_key = self._watch_key(\n kind=resource.kind,\n api_version=resource.api_version,\n name=resource.name,\n namespace=resource.namespace,\n )\n resource_map[watch_key] = resource\n\n event = KubeWatchEvent(type=KubeEventType.ADDED, resource=resource)\n log.debug2(\"Yielding initial event %s\", event)\n yield event\n\n end_time = datetime.max\n if timeout:\n end_time = datetime.now() + timedelta(seconds=timeout)\n\n # Register callbacks\n self.register_watch(\n api_version=api_version,\n kind=kind,\n namespace=namespace,\n name=name,\n callback=partial(add_event, resource_map),\n )\n self.register_finalizer(\n api_version=api_version,\n kind=kind,\n namespace=namespace,\n name=name,\n callback=partial(delete_event, resource_map),\n )\n\n # Yield any events from the callback queue\n log.debug2(\"Waiting till %s\", end_time)\n while True:\n sec_till_end = (end_time - datetime.now()).seconds or 1\n try:\n event = event_queue.get(timeout=sec_till_end)\n log.debug2(\"Yielding event %s\", event)\n yield event\n except Empty:\n pass\n\n if datetime.now() > end_time:\n return\n\n ## Dry Run Methods #########################################################\n def register_watch( # pylint: disable=too-many-arguments\n self,\n api_version: str,\n kind: str,\n callback: Callable[[dict], None],\n namespace=\"\",\n name=\"\",\n ):\n \"\"\"Register a callback to watch for deploy events on a given\n api_version/kind\n \"\"\"\n watch_key = self._watch_key(\n api_version=api_version, kind=kind, namespace=namespace, name=name\n )\n log.debug(\"Registering watch for %s\", watch_key)\n self._watches.setdefault(watch_key, []).append(callback)\n\n def register_finalizer( # pylint: disable=too-many-arguments\n self,\n api_version: str,\n kind: str,\n callback: Callable[[dict], None],\n namespace=\"\",\n name=\"\",\n ):\n \"\"\"Register a callback to call on deletion events on a given\n api_version/kind\n \"\"\"\n watch_key = self._watch_key(\n api_version=api_version, kind=kind, namespace=namespace, name=name\n )\n log.debug(\"Registering finalizer for %s\", watch_key)\n self._finalizers.setdefault(watch_key, []).append(callback)\n\n ## Implementation Details ##################################################\n\n @staticmethod\n def _watch_key(api_version=\"\", kind=\"\", namespace=\"\", name=\"\"):\n return \":\".join([api_version or \"\", kind or \"\", namespace or \"\", name or \"\"])\n\n def _get_registered_watches( # pylint: disable=too-many-arguments\n self,\n api_version: str = \"\",\n kind: str = \"\",\n namespace: str = \"\",\n name: str = \"\",\n finalizer: bool = False,\n ) -> List[Tuple[str, Callable]]:\n # Get the scoped watch key\n resource_watch_key = self._watch_key(\n api_version=api_version, kind=kind, namespace=namespace, name=name\n )\n namespaced_watch_key = self._watch_key(\n api_version=api_version, kind=kind, namespace=namespace\n )\n global_watch_key = self._watch_key(api_version=api_version, kind=kind)\n\n # Get which watch list we're pulling from\n callback_map = self._watches\n if finalizer:\n callback_map = self._finalizers\n\n output_list = []\n log.debug3(\n \"Looking for resourced key: %s namespace key %s global key %s\",\n resource_watch_key,\n namespaced_watch_key,\n global_watch_key,\n )\n for key, callback_list in callback_map.items():\n if key in [resource_watch_key, namespaced_watch_key, global_watch_key]:\n log.debug3(\"%d Callbacks found for key %s\", len(callback_list), key)\n for callback in callback_list:\n output_list.append((key, callback))\n\n return output_list\n\n def _delete_key(self, namespace, kind, api_version, name):\n del self._cluster_content[namespace][kind][api_version][name]\n if not self._cluster_content[namespace][kind][api_version]:\n del self._cluster_content[namespace][kind][api_version]\n if not self._cluster_content[namespace][kind]:\n del self._cluster_content[namespace][kind]\n if not self._cluster_content[namespace]:\n del self._cluster_content[namespace]\n\n def _deploy(\n self, resource_definitions, call_watches=True, manage_owner_references=True\n ):\n log.info(\"DRY RUN deploy\")\n changes = False\n for resource in resource_definitions:\n api_version = resource.get(\"apiVersion\")\n kind = resource.get(\"kind\")\n name = resource.get(\"metadata\", {}).get(\"name\")\n namespace = resource.get(\"metadata\", {}).get(\"namespace\")\n log.debug(\n \"DRY RUN deploy [%s/%s/%s/%s]\", namespace, kind, api_version, name\n )\n log.debug4(resource)\n\n # If owner CR configured, add ownerReferences\n if self._owner_cr and manage_owner_references:\n log.debug2(\"Adding dry-run owner references\")\n update_owner_references(self, self._owner_cr, resource)\n log.debug3(\n \"All owner references: %s\", resource[\"metadata\"][\"ownerReferences\"]\n )\n\n with DRY_RUN_CLUSTER_LOCK:\n entries = (\n self._cluster_content.setdefault(namespace, {})\n .setdefault(kind, {})\n .setdefault(api_version, {})\n )\n current = copy.deepcopy(entries.get(name, {}))\n old_resource_version = current.get(\"metadata\", {}).pop(\n \"resourceVersion\", None\n )\n changes = changes or (current != resource)\n\n if \"metadata\" not in resource:\n resource[\"metadata\"] = {}\n\n if (\n self.strict_resource_version\n and resource[\"metadata\"].get(\"resourceVersion\")\n and old_resource_version\n and resource[\"metadata\"].get(\"resourceVersion\")\n != old_resource_version\n ):\n log.warning(\n \"Unable to deploy resource. resourceVersion is out of date\"\n )\n return False, False\n\n resource[\"metadata\"][\"creationTimestamp\"] = entries.get(\n \"metadata\", {}\n ).get(\"creationTimestamp\", datetime.now().isoformat())\n resource[\"metadata\"][\"uid\"] = entries.get(\"metadata\", {}).get(\n \"uid\", str(uuid.uuid4())\n )\n resource[\"metadata\"][\"resourceVersion\"] = str(\n random.randint(1, 1000)\n ).zfill(5)\n entries[name] = resource\n\n # Call any registered watches\n if call_watches:\n for key, callback in self._get_registered_watches(\n api_version, kind, namespace, name\n ):\n log.debug2(\"Calling registered watch [%s] for [%s]\", callback, key)\n callback(resource)\n\n # Delete Key if it has already been disabled and doesn't have finalizers\n if self._cluster_content[namespace][kind][api_version][name].get(\n \"metadata\", {}\n ).get(\"deletionTimestamp\") and not self._cluster_content[namespace][kind][\n api_version\n ][\n name\n ].get(\n \"metadata\", {}\n ).get(\n \"finalizers\"\n ):\n with DRY_RUN_CLUSTER_LOCK:\n self._delete_key(namespace, kind, api_version, name)\n\n return True, changes" }, { "identifier": "KubeEventType", "path": "oper8/deploy_manager/kube_event.py", "snippet": "class KubeEventType(Enum):\n \"\"\"Enum for all possible kubernetes event types\"\"\"\n\n DELETED = \"DELETED\"\n MODIFIED = \"MODIFIED\"\n ADDED = \"ADDED\"" }, { "identifier": "library_config", "path": "oper8/test_helpers/helpers.py", "snippet": "@contextmanager\ndef library_config(**config_overrides):\n \"\"\"This context manager sets library config values temporarily and reverts\n them on completion\n \"\"\"\n # Override the configs and hang onto the old values\n old_vals = {}\n for key, val in config_overrides.items():\n if key in config_detail_dict:\n old_vals[key] = config_detail_dict[key]\n config_detail_dict[key] = val\n\n # Yield to the context\n yield\n\n # Revert to the old values\n for key in config_overrides:\n if key in old_vals:\n config_detail_dict[key] = old_vals[key]\n else:\n del config_detail_dict[key]" }, { "identifier": "DisabledLeadershipManager", "path": "oper8/test_helpers/pwm_helpers.py", "snippet": "class DisabledLeadershipManager(LeadershipManagerBase):\n \"\"\"Leadership Manager that is always disabled\"\"\"\n\n def __init__(self):\n self.shutdown_event = Event()\n\n def acquire_resource(self, resource):\n return False\n\n def acquire(self, force: bool = False) -> bool:\n if force:\n self.shutdown_event.set()\n return self.shutdown_event.wait()\n\n def release(self):\n raise NotImplementedError()\n\n def release_resource(self, resource=None):\n raise NotImplementedError()\n\n def is_leader(self):\n return False" }, { "identifier": "MockedReconcileThread", "path": "oper8/test_helpers/pwm_helpers.py", "snippet": "class MockedReconcileThread(ReconcileThread):\n \"\"\"Subclass of ReconcileThread that mocks the subprocess. This was more\n reliable than using unittest.mock\"\"\"\n\n _disable_singleton = True\n\n def __init__(\n self,\n deploy_manager=None,\n leadership_manager=None,\n subprocess_wait_time=0.1,\n returned_messages=None,\n ):\n self.requests = Queue()\n self.timer_events = Queue()\n self.processes_started = 0\n self.processes_finished = 0\n self.watch_threads_created = 0\n self.subprocess_wait_time = subprocess_wait_time\n self.returned_messages = returned_messages or [[]]\n super().__init__(deploy_manager, leadership_manager)\n\n def push_request(self, request: ReconcileRequest):\n self.requests.put(request)\n super().push_request(request)\n\n def get_request(self) -> ReconcileRequest:\n return self.requests.get()\n\n def _handle_watch_request(self, request: WatchRequest):\n self.watch_threads_created += 1\n return super()._handle_watch_request(request)\n\n def _handle_process_end(self, reconcile_process: ReconcileProcess):\n self.processes_finished += 1\n return super()._handle_process_end(reconcile_process)\n\n def _start_reconcile_process(\n self, request: ReconcileRequest, pipe: Connection\n ) -> multiprocessing.Process:\n self.processes_started += 1\n\n returned_messages = []\n if len(self.returned_messages) > 0:\n returned_messages = self.returned_messages.pop(0)\n\n # Create and start a mocked reconcile process\n process = self.spawn_ctx.Process(\n target=mocked_create_and_start_entrypoint,\n args=[\n self.logging_queue,\n request,\n pipe,\n self.subprocess_wait_time,\n returned_messages,\n ],\n )\n process.start()\n log.debug3(f\"Started child process with pid: {process.pid}\")\n\n return process\n\n def _create_timer_event_for_request(\n self, request: ReconcileRequest, result: ReconciliationResult = None\n ):\n timer_event = super()._create_timer_event_for_request(request, result)\n self.timer_events.put(timer_event)\n return timer_event" }, { "identifier": "clear_caches", "path": "oper8/test_helpers/pwm_helpers.py", "snippet": "@pytest.fixture(autouse=True)\ndef clear_caches():\n get_configured_filter.cache_clear()" }, { "identifier": "make_ownerref", "path": "oper8/test_helpers/pwm_helpers.py", "snippet": "def make_ownerref(resource):\n metadata = resource.get(\"metadata\", {})\n return {\n \"apiVersion\": resource.get(\"apiVersion\"),\n \"kind\": resource.get(\"kind\"),\n \"name\": metadata.get(\"name\"),\n \"uid\": metadata.get(\"uid\"),\n }" }, { "identifier": "make_resource", "path": "oper8/test_helpers/pwm_helpers.py", "snippet": "def make_resource(\n kind=\"Foo\",\n namespace=\"test\",\n api_version=\"foo.bar.com/v1\",\n name=\"foo\",\n spec=None,\n status=None,\n generation=1,\n resource_version=None,\n annotations=None,\n labels=None,\n owner_refs=None,\n):\n return {\n \"kind\": kind,\n \"apiVersion\": api_version,\n \"metadata\": {\n \"name\": name,\n \"namespace\": namespace,\n \"generation\": generation,\n \"resourceVersion\": resource_version or random.randint(1, 1000),\n \"ownerReferences\": owner_refs or [],\n \"labels\": labels or {},\n \"uid\": str(uuid4()),\n \"annotations\": annotations or {},\n },\n \"spec\": spec or {},\n \"status\": status or {},\n }" }, { "identifier": "DisableFilter", "path": "oper8/watch_manager/python_watch_manager/filters/filters.py", "snippet": "class DisableFilter(Filter):\n \"\"\"Filter to disable all reconciles\"\"\"\n\n def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]:\n \"\"\"Always return False\"\"\"\n return False" }, { "identifier": "WatchThread", "path": "oper8/watch_manager/python_watch_manager/threads/watch.py", "snippet": "class WatchThread(ThreadBase): # pylint: disable=too-many-instance-attributes\n \"\"\"The WatchThread monitors the cluster for changes to a specific GroupVersionKind either\n cluster-wide or for a particular namespace. When it detects a change it checks the event\n against the registered Filters and submits a ReconcileRequest if it passes. Every resource\n that has at least one watch request gets a corresponding WatchedResource object whose main\n job is to store the current Filter status\n \"\"\"\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n reconcile_thread: RECONCILE_THREAD_TYPE,\n kind: str,\n api_version: str,\n namespace: Optional[str] = None,\n deploy_manager: DeployManagerBase = None,\n leadership_manager: LeadershipManagerBase = None,\n ):\n \"\"\"Initialize a WatchThread by assigning instance variables and creating maps\n\n Args:\n reconcile_thread: ReconcileThread\n The reconcile thread to submit requests to\n kind: str\n The kind to watch\n api_version: str\n The api_version to watch\n namespace: Optional[str] = None\n The namespace to watch. If none then cluster-wide\n deploy_manager: DeployManagerBase = None\n The deploy_manager to watch events\n leadership_manager: LeadershipManagerBase = None\n The leadership manager to use for elections\n \"\"\"\n # Setup initial variables\n self.reconcile_thread = reconcile_thread\n self.kind = kind\n self.api_version = api_version\n self.namespace = namespace\n\n name = f\"watch_thread_{self.api_version}_{self.kind}\"\n if self.namespace:\n name = name + f\"_{self.namespace}\"\n super().__init__(\n name=name,\n daemon=True,\n deploy_manager=deploy_manager,\n leadership_manager=leadership_manager,\n )\n\n # Setup kubernetes watch resource\n self.kubernetes_watch = watch.Watch()\n\n # Setup watched resources and request mappings. watched_resources\n # is used to track the current status of a resource in a cluster and also includes\n # the current filters. watch_request tracks all of the Controllers that have watched\n # a specific resource or groupings of resources\n self.watched_resources: Dict[str, WatchedResource] = {}\n self.watch_requests: Dict[str, Set[WatchRequest]] = {}\n\n # Lock for adding/gathering watch requests\n self.watch_request_lock = Lock()\n\n def run(self):\n \"\"\"The WatchThread's control loop continuously watches the DeployManager for any new\n events. For every event it gets it gathers all the WatchRequests whose `watched` value\n applies. The thread then initializes a WatchedObject if one doesn't already exist and\n tests the event against each request's Filter. Finally, it submits a ReconcileRequest\n for all events that pass\n \"\"\"\n\n # Check for leadership and shutdown at the start\n list_resource_version = 0\n while True:\n if not self.check_preconditions():\n log.debug(\"Checking preconditions failed. Shuting down\")\n return\n\n for event in self.deploy_manager.watch_objects(\n self.kind,\n self.api_version,\n namespace=self.namespace,\n resource_version=list_resource_version,\n watch_manager=self.kubernetes_watch,\n ):\n # Validate leadership on each event\n if not self.check_preconditions():\n log.debug(\"Checking preconditions failed. Shuting down\")\n return\n\n resource = event.resource\n\n # Gather all the watch requests which apply to this event\n watch_requests = self._gather_resource_requests(resource)\n if not watch_requests:\n log.debug2(\"Skipping resource without requested watch\")\n self._clean_event(event)\n continue\n\n # Ensure a watched object exists for every resource\n if resource.uid not in self.watched_resources:\n self._create_watched_resource(resource, watch_requests)\n\n # Check both global and watch specific filters\n watch_requests = self._check_filters(\n watch_requests, resource, event.type\n )\n if not watch_requests:\n log.debug2(\n \"Skipping event %s as all requests failed filters\", event\n )\n self._clean_event(event)\n continue\n\n # Push a reconcile request for each watch requested\n for watch_request in watch_requests:\n log.debug(\n \"Requesting reconcile for %s\",\n resource,\n extra={\"resource\": watch_request.requester.get_resource()},\n )\n self._request_reconcile(event, watch_request)\n\n # Clean up any resources used for the event\n self._clean_event(event)\n\n # Update the resource version to only get new events\n list_resource_version = self.kubernetes_watch.resource_version\n\n ## Class Interface ###################################################\n\n def stop_thread(self):\n \"\"\"Override stop_thread to stop the kubernetes client's Watch as well\"\"\"\n super().stop_thread()\n self.kubernetes_watch.stop()\n\n ## Public Interface ###################################################\n\n def request_watch(self, watch_request: WatchRequest):\n \"\"\"Add a watch request if it doesn't exist\n\n Args:\n watch_request: WatchRequest\n The watch_request to add\n \"\"\"\n requester_id = watch_request.requester\n\n # Acquire the watch request lock before starting work\n with self.watch_request_lock:\n if watch_request in self.watch_requests.get(requester_id.global_id, []):\n log.debug3(\"Request already added\")\n return\n\n # Create watch request for this kind/api_version. Use global id\n # as watch thread is already namespaced/global\n log.debug3(\"Adding action with key %s\", requester_id.global_id)\n self.watch_requests.setdefault(requester_id.global_id, set()).add(\n watch_request\n )\n\n ## WatchRequest Functions ###################################################\n\n def _gather_resource_requests(self, resource: ManagedObject) -> List[WatchRequest]:\n \"\"\"Gather the list of actions that apply to this specific Kube event based on\n the ownerRefs and the resource itself.\n\n Args:\n resource: ManagedObject\n The resource for this event\n\n Returns:\n request_list: List[WatchRequest]\n The list of watch requests that apply\n \"\"\"\n\n request_list = []\n\n # Acquire the watch request lock\n with self.watch_request_lock:\n # Check if the event resource can be reconciled directly like in the case of\n # Controllers\n resource_id = ResourceId.from_resource(resource)\n for request in self.watch_requests.get(resource_id.global_id, []):\n # Check if request has a specific name and if this event matches\n if request.requester.name and request.requester.name != resource.name:\n continue\n\n unique_request = copy.deepcopy(request)\n if not unique_request.requester.name:\n unique_request.requester = dataclasses.replace(\n unique_request.requester, name=resource_id.name\n )\n\n log.debug3(\n \"Gathering request for controller %s from %s\",\n unique_request.controller_type,\n resource_id.global_id,\n )\n request_list.append(unique_request)\n\n # Check for any owners watching this resource\n for owner_ref in resource.metadata.get(\"ownerReferences\", []):\n owner_id = ResourceId.from_owner_ref(\n owner_ref, namespace=resource_id.namespace\n )\n\n if owner_id.global_id not in self.watch_requests:\n log.debug3(\"Skipping event with owner_key: %s\", owner_id.global_id)\n continue\n\n for request in self.watch_requests.get(owner_id.global_id, []):\n # If request has a specific name then ensure it matches\n if (\n request.requester.name\n and request.requester.name != owner_ref.get(\"name\")\n ):\n continue\n\n # If request doesn't already have a name then force\n # this resource. This allows multiple controllers with\n # the same kind/api_version to own the same resource\n unique_request = copy.deepcopy(request)\n if not unique_request.requester.name:\n unique_request.requester = dataclasses.replace(\n unique_request.requester, name=owner_id.name\n )\n\n log.debug3(\n \"Gathering request for controller %s from %s\",\n unique_request.controller_type,\n owner_ref,\n )\n request_list.append(unique_request)\n\n return request_list\n\n def _request_reconcile(self, event: KubeWatchEvent, request: WatchRequest):\n \"\"\"Request a reconcile for a kube event\n\n Args:\n event: KubeWatchEvent\n The KubeWatchEvent that triggered the reconcile\n request: WatchRequest\n The object that's requested a reconcile\n \"\"\"\n\n resource = event.resource\n event_type = event.type\n requester_id = request.requester\n\n # If the watch request is for a different object (e.g dependent watch) then\n # fetch the correct resource to reconcile\n if (\n requester_id.kind != event.resource.kind\n or requester_id.api_version != event.resource.api_version\n or (requester_id.name and requester_id.name != event.resource.name)\n ):\n success, obj = self.deploy_manager.get_object_current_state(\n kind=requester_id.kind,\n name=requester_id.name,\n namespace=event.resource.namespace,\n api_version=requester_id.api_version,\n )\n if not success or not obj:\n log.warning(\n \"Unable to fetch owner resource %s\", requester_id.get_named_id()\n )\n return\n\n resource = ManagedObject(obj)\n event_type = ReconcileRequestType.DEPENDENT\n\n # Generate the request and push one for each watched action to the reconcile thread\n request = ReconcileRequest(request.controller_type, event_type, resource)\n self.reconcile_thread.push_request(request)\n\n ## Watched Resource Functions ###################################################\n\n def _create_watched_resource(\n self,\n resource: ManagedObject,\n watch_requests: List[WatchRequest],\n ):\n \"\"\"Create a WatchedResource and initialize it's filters\n\n Args:\n resource: ManagedObject\n The resource being watched\n watch_requests: List[WatchRequest]\n The list of requests that apply to this resource\n\n \"\"\"\n # update the watched resources dict\n if resource.uid in self.watched_resources:\n return\n\n # Setup filter dict with global filters\n filter_dict = {None: FilterManager(get_configured_filter(), resource)}\n for request in watch_requests:\n filter_dict[request.requester.get_named_id()] = FilterManager(\n request.filters, resource\n )\n\n # Add watched resource to mapping\n self.watched_resources[resource.uid] = WatchedResource(\n gvk=ResourceId.from_resource(resource), filters=filter_dict\n )\n\n def _clean_event(self, event: KubeWatchEvent):\n \"\"\"Call this function after processing every event to clean any leftover resources\n\n Args:\n event: KubeWatchEvent\n The kube event to clean up\n \"\"\"\n if event.type == KubeEventType.DELETED:\n self.watched_resources.pop(event.resource.uid, None)\n\n ## Filter Functions ###################################################\n\n def _check_filters(\n self,\n watch_requests: List[WatchRequest],\n resource: ManagedObject,\n event: KubeEventType,\n ) -> List[WatchRequest]:\n \"\"\"Check a resource and event against both global and request specific filters\n\n Args:\n watch_requests: List[WatchRequest]\n List of watch requests whose filters should be checked\n resource: ManagedObject\n The resource being filtered\n event: KubeEventType\n THe event type being filtered\n\n Returns:\n successful_requests: List[WatchRequest]\n The list of requests that passed the filter\n\n \"\"\"\n\n if resource.uid not in self.watched_resources:\n return []\n\n # If the default watched resource filter fails then no need to\n # check any watch requests\n watched_resource = self.watched_resources[resource.uid]\n if not watched_resource.filters[None].update_and_test(resource, event):\n return []\n\n output_requests = []\n\n # Check the watch requests for any of their filters\n for request in watch_requests:\n requester_id = request.requester.get_named_id()\n\n # If this is the first time this watched resource has seen this request then\n # initialize the filters\n if requester_id not in watched_resource.filters:\n watched_resource.filters[requester_id] = FilterManager(\n request.filters, resource\n )\n\n if not watched_resource.filters[requester_id].update_and_test(\n resource, event\n ):\n continue\n\n output_requests.append(request)\n\n return output_requests" }, { "identifier": "ReconcileRequestType", "path": "oper8/watch_manager/python_watch_manager/utils/types.py", "snippet": "class ReconcileRequestType(Enum):\n \"\"\"Enum to expand the possible KubeEventTypes to include PythonWatchManager\n specific events\"\"\"\n\n # Used for events that are a requeue of an object\n REQUEUED = \"REQUEUED\"\n\n # Used for periodic reconcile events\n PERIODIC = \"PERIODIC\"\n\n # Used for when an event is a dependent resource of a controller\n DEPENDENT = \"DEPENDENT\"\n\n # Used as a sentinel to alert threads to stop\n STOPPED = \"STOPPED\"" }, { "identifier": "ResourceId", "path": "oper8/watch_manager/python_watch_manager/utils/types.py", "snippet": "class ResourceId:\n \"\"\"Class containing the information needed to identify a resource\"\"\"\n\n api_version: str\n kind: str\n name: str = None\n namespace: str = None\n\n # Id properties\n\n @cached_property\n def global_id(self) -> str:\n \"\"\"Get the global_id for a resource in the form kind.version.group\"\"\"\n group_version = self.api_version.split(\"/\")\n return \".\".join([self.kind, *reversed(group_version)])\n\n @cached_property\n def namespaced_id(self) -> str:\n \"\"\"Get the namespace specific id for a resource\"\"\"\n return f\"{self.namespace}.{self.global_id}\"\n\n # Helper Accessor functions\n def get_id(self) -> str:\n \"\"\"Get the requisite id for a resource\"\"\"\n return self.namespaced_id if self.namespace else self.global_id\n\n def get_named_id(self) -> str:\n \"\"\"Get a named id for a resouce\"\"\"\n return f\"{self.name}.{self.get_id()}\"\n\n def get_resource(self) -> dict:\n \"\"\"Get a resource template from this id\"\"\"\n return {\n \"kind\": self.kind,\n \"apiVersion\": self.api_version,\n \"metadata\": {\"name\": self.name, \"namespace\": self.namespace},\n }\n\n # Helper Creation Functions\n @classmethod\n def from_resource(cls, resource: Union[ManagedObject, dict]) -> \"ResourceId\":\n \"\"\"Create a resource id from an existing resource\"\"\"\n metadata = resource.get(\"metadata\", {})\n return cls(\n api_version=resource.get(\"apiVersion\"),\n kind=resource.get(\"kind\"),\n namespace=metadata.get(\"namespace\"),\n name=metadata.get(\"name\"),\n )\n\n @classmethod\n def from_owner_ref(cls, owner_ref: dict, namespace: str = None) -> \"ResourceId\":\n \"\"\"Create a resource id from an ownerRef\"\"\"\n return cls(\n api_version=owner_ref.get(\"apiVersion\"),\n kind=owner_ref.get(\"kind\"),\n namespace=namespace,\n name=owner_ref.get(\"name\"),\n )\n\n @classmethod\n def from_controller(\n cls, controller: Type[CONTROLLER_TYPE], namespace: str = None\n ) -> \"ResourceId\":\n \"\"\"Get a Controller's target as a resource id\"\"\"\n return cls(\n api_version=f\"{controller.group}/{controller.version}\",\n kind=controller.kind,\n namespace=namespace,\n )" }, { "identifier": "WatchRequest", "path": "oper8/watch_manager/python_watch_manager/utils/types.py", "snippet": "class WatchRequest:\n \"\"\"A class for requesting a watch of a particular object. It contains information around the\n watched object, who requested the watch, the controller type to be reconciled, and any filters\n to be applied to just this request\"\"\"\n\n watched: ResourceId\n requester: ResourceId\n\n # Watch request must have either type or info\n controller_type: Type[CONTROLLER_TYPE] = None\n controller_info: ClassInfo = None\n\n # Don't compare filters when checking equality as we\n # assume they're the same if they have the same controller\n filters: List[Type[FILTER_TYPE]] = field(default_factory=list, compare=False)\n filters_info: List[Type[ClassInfo]] = field(default_factory=list, compare=False)\n\n def __hash__(self) -> int:\n return hash(\n (\n self.watched,\n self.requester,\n self.controller_type if self.controller_type else self.controller_info,\n )\n )" } ]
import time import pytest from oper8.deploy_manager.dry_run_deploy_manager import DryRunDeployManager from oper8.deploy_manager.kube_event import KubeEventType from oper8.test_helpers.helpers import library_config from oper8.test_helpers.pwm_helpers import ( DisabledLeadershipManager, MockedReconcileThread, clear_caches, make_ownerref, make_resource, ) from oper8.watch_manager.python_watch_manager.filters.filters import DisableFilter from oper8.watch_manager.python_watch_manager.threads.watch import WatchThread from oper8.watch_manager.python_watch_manager.utils.types import ( ReconcileRequestType, ResourceId, WatchRequest, )
11,704
watch_thread.start_thread() request = WatchRequest( # Set watched and requester to the same watched=request_resource_id, requester=request_resource_id, ) watch_thread.request_watch(request) dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) dm.deploy([make_resource(name="second_obj")]) dm.disable([watched_object]) time.sleep(1.5) watch_thread.stop_thread() assert mocked_reconcile_thread.get_request().type == KubeEventType.ADDED assert mocked_reconcile_thread.get_request().type == KubeEventType.MODIFIED assert mocked_reconcile_thread.get_request().type == KubeEventType.ADDED assert mocked_reconcile_thread.get_request().type == KubeEventType.DELETED @pytest.mark.timeout(5) def test_watch_thread_global_watch_two_owners(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() owner_object = make_resource(kind="OwnerKind", name="owner", spec={"test": "value"}) owner_2_object = make_resource( kind="OwnerKind", name="owner2", spec={"test": "value"} ) watched_object = make_resource( spec={"test": "value"}, owner_refs=[make_ownerref(owner_object), make_ownerref(owner_2_object)], ) watched_object_id = ResourceId.from_resource(watched_object) dm.deploy([owner_object]) dm.deploy([owner_2_object]) with library_config(python_watch_manager={"filter": None}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) request = WatchRequest( # Set watched and requester to the same watched=watched_object_id, requester=ResourceId( api_version=owner_object.get("apiVersion"), kind=owner_object.get("kind"), ), ) watch_thread.request_watch(request) watch_thread.start_thread() dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) time.sleep(1.5) watch_thread.stop_thread() add_events = [ mocked_reconcile_thread.get_request(), mocked_reconcile_thread.get_request(), ] assert "owner" in [event.resource.name for event in add_events] assert "owner2" in [event.resource.name for event in add_events] assert add_events[0].type == ReconcileRequestType.DEPENDENT assert add_events[1].type == ReconcileRequestType.DEPENDENT modified_events = [ mocked_reconcile_thread.get_request(), mocked_reconcile_thread.get_request(), ] assert "owner" in [event.resource.name for event in modified_events] assert "owner2" in [event.resource.name for event in modified_events] assert modified_events[0].type == ReconcileRequestType.DEPENDENT assert modified_events[1].type == ReconcileRequestType.DEPENDENT @pytest.mark.timeout(5) def test_watch_thread_no_watch(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() watched_object = make_resource(spec={"test": "value"}) with library_config(python_watch_manager={"filter": DisableFilter}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) watch_thread.start_thread() dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) time.sleep(1.5) watch_thread.stop_thread() assert mocked_reconcile_thread.requests.empty() @pytest.mark.timeout(5) def test_watch_thread_not_leader(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() watched_object = make_resource(spec={"test": "value"}) watched_object_id = ResourceId.from_resource(watched_object) with library_config(python_watch_manager={"filter": None}): watch_thread = WatchThread(
""" Tests for the WatchThread """ # Standard # Third Party # Local ## Helpers ##################################################################### @pytest.mark.timeout(5) def test_watch_thread_happy_path(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() watched_object = make_resource(spec={"test": "value"}) watched_object_id = ResourceId.from_resource(watched_object) with library_config(python_watch_manager={"filter": None}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) watch_thread.start_thread() request = WatchRequest( # Set watched and requester to the same watched=watched_object_id, requester=watched_object_id, ) watch_thread.request_watch(request) dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) time.sleep(1.5) watch_thread.stop_thread() assert mocked_reconcile_thread.get_request().type == KubeEventType.ADDED assert mocked_reconcile_thread.get_request().type == KubeEventType.MODIFIED @pytest.mark.timeout(5) def test_watch_thread_filtered(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() watched_object = make_resource(spec={"test": "value"}) watched_object_id = ResourceId.from_resource(watched_object) with library_config(python_watch_manager={"filter": DisableFilter}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) watch_thread.start_thread() request = WatchRequest( # Set watched and requester to the same watched=watched_object_id, requester=watched_object_id, ) watch_thread.request_watch(request) dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) time.sleep(1.5) watch_thread.stop_thread() assert mocked_reconcile_thread.requests.empty() @pytest.mark.timeout(5) def test_watch_thread_deleted(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() watched_object = make_resource(spec={"test": "value"}) watched_object_id = ResourceId.from_resource(watched_object) with library_config(python_watch_manager={"filter": None}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) watch_thread.start_thread() request = WatchRequest( # Set watched and requester to the same watched=watched_object_id, requester=watched_object_id, ) watch_thread.request_watch(request) dm.deploy([watched_object]) dm.disable([watched_object]) time.sleep(1.5) watch_thread.stop_thread() assert mocked_reconcile_thread.get_request().type == KubeEventType.ADDED assert mocked_reconcile_thread.get_request().type == KubeEventType.DELETED @pytest.mark.timeout(5) def test_watch_thread_owner_watch(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() owner_object = make_resource( kind="DifferentKind", name="owner", spec={"test": "value"} ) owner_object_id = ResourceId.from_resource(owner_object) watched_object = make_resource( spec={"test": "value"}, owner_refs=[make_ownerref(owner_object)] ) watched_object_id = ResourceId.from_resource(watched_object) # Deploy owner before watch has started dm.deploy([owner_object]) with library_config(python_watch_manager={"filter": None}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) request = WatchRequest( # Set watched and requester to the same watched=watched_object_id, requester=owner_object_id, ) watch_thread.request_watch(request) watch_thread.start_thread() dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) time.sleep(1.5) watch_thread.stop_thread() assert ( mocked_reconcile_thread.get_request().type == ReconcileRequestType.DEPENDENT ) assert ( mocked_reconcile_thread.get_request().type == ReconcileRequestType.DEPENDENT ) @pytest.mark.timeout(5) def test_watch_thread_global_watch(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() owner_object = make_resource( kind="DifferentKind", name="owner", spec={"test": "value"} ) watched_object = make_resource( spec={"test": "value"}, owner_refs=[make_ownerref(owner_object)] ) watched_object_id = ResourceId.from_resource(watched_object) dm.deploy([owner_object]) with library_config(python_watch_manager={"filter": None}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) watch_thread.start_thread() request = WatchRequest( # Set watched and requester to the same watched=watched_object_id, requester=ResourceId( api_version=owner_object.get("apiVersion"), kind=owner_object.get("kind"), ), ) watch_thread.request_watch(request) dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) time.sleep(3) watch_thread.stop_thread() assert ( mocked_reconcile_thread.get_request().type == ReconcileRequestType.DEPENDENT ) assert ( mocked_reconcile_thread.get_request().type == ReconcileRequestType.DEPENDENT ) @pytest.mark.timeout(5) def test_watch_thread_all_events(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() watched_object = make_resource(spec={"test": "value"}) request_resource_id = ResourceId( api_version=watched_object.get("apiVersion"), kind=watched_object.get("kind") ) with library_config(python_watch_manager={"filter": None}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) watch_thread.start_thread() request = WatchRequest( # Set watched and requester to the same watched=request_resource_id, requester=request_resource_id, ) watch_thread.request_watch(request) dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) dm.deploy([make_resource(name="second_obj")]) dm.disable([watched_object]) time.sleep(1.5) watch_thread.stop_thread() assert mocked_reconcile_thread.get_request().type == KubeEventType.ADDED assert mocked_reconcile_thread.get_request().type == KubeEventType.MODIFIED assert mocked_reconcile_thread.get_request().type == KubeEventType.ADDED assert mocked_reconcile_thread.get_request().type == KubeEventType.DELETED @pytest.mark.timeout(5) def test_watch_thread_global_watch_two_owners(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() owner_object = make_resource(kind="OwnerKind", name="owner", spec={"test": "value"}) owner_2_object = make_resource( kind="OwnerKind", name="owner2", spec={"test": "value"} ) watched_object = make_resource( spec={"test": "value"}, owner_refs=[make_ownerref(owner_object), make_ownerref(owner_2_object)], ) watched_object_id = ResourceId.from_resource(watched_object) dm.deploy([owner_object]) dm.deploy([owner_2_object]) with library_config(python_watch_manager={"filter": None}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) request = WatchRequest( # Set watched and requester to the same watched=watched_object_id, requester=ResourceId( api_version=owner_object.get("apiVersion"), kind=owner_object.get("kind"), ), ) watch_thread.request_watch(request) watch_thread.start_thread() dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) time.sleep(1.5) watch_thread.stop_thread() add_events = [ mocked_reconcile_thread.get_request(), mocked_reconcile_thread.get_request(), ] assert "owner" in [event.resource.name for event in add_events] assert "owner2" in [event.resource.name for event in add_events] assert add_events[0].type == ReconcileRequestType.DEPENDENT assert add_events[1].type == ReconcileRequestType.DEPENDENT modified_events = [ mocked_reconcile_thread.get_request(), mocked_reconcile_thread.get_request(), ] assert "owner" in [event.resource.name for event in modified_events] assert "owner2" in [event.resource.name for event in modified_events] assert modified_events[0].type == ReconcileRequestType.DEPENDENT assert modified_events[1].type == ReconcileRequestType.DEPENDENT @pytest.mark.timeout(5) def test_watch_thread_no_watch(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() watched_object = make_resource(spec={"test": "value"}) with library_config(python_watch_manager={"filter": DisableFilter}): watch_thread = WatchThread( reconcile_thread=mocked_reconcile_thread, kind="Foo", api_version="foo.bar.com/v1", namespace="test", deploy_manager=dm, ) watch_thread.start_thread() dm.deploy([watched_object]) watched_object["spec"] = {"test": "updated"} dm.deploy([watched_object]) time.sleep(1.5) watch_thread.stop_thread() assert mocked_reconcile_thread.requests.empty() @pytest.mark.timeout(5) def test_watch_thread_not_leader(): dm = DryRunDeployManager() mocked_reconcile_thread = MockedReconcileThread() watched_object = make_resource(spec={"test": "value"}) watched_object_id = ResourceId.from_resource(watched_object) with library_config(python_watch_manager={"filter": None}): watch_thread = WatchThread(
leadership_manager=DisabledLeadershipManager(),
3
2023-11-15 16:43:29+00:00
16k
Jisencc/yolov5_dual_weighting
utils/segment/dataloaders.py
[ { "identifier": "augment_hsv", "path": "utils/augmentations.py", "snippet": "def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):\n # HSV color-space augmentation\n if hgain or sgain or vgain:\n r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains\n hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))\n dtype = im.dtype # uint8\n\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed" }, { "identifier": "copy_paste", "path": "utils/augmentations.py", "snippet": "def copy_paste(im, labels, segments, p=0.5):\n # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)\n n = len(segments)\n if p and n:\n h, w, c = im.shape # height, width, channels\n im_new = np.zeros(im.shape, np.uint8)\n for j in random.sample(range(n), k=round(p * n)):\n l, s = labels[j], segments[j]\n box = w - l[3], l[2], w - l[1], l[4]\n ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area\n if (ioa < 0.30).all(): # allow 30% obscuration of existing labels\n labels = np.concatenate((labels, [[l[0], *box]]), 0)\n segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))\n cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED)\n\n result = cv2.flip(im, 1) # augment segments (flip left-right)\n i = cv2.flip(im_new, 1).astype(bool)\n im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug\n\n return im, labels, segments" }, { "identifier": "letterbox", "path": "utils/augmentations.py", "snippet": "def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):\n # Resize and pad image while meeting stride-multiple constraints\n shape = im.shape[:2] # current shape [height, width]\n if isinstance(new_shape, int):\n new_shape = (new_shape, new_shape)\n\n # Scale ratio (new / old)\n r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])\n if not scaleup: # only scale down, do not scale up (for better val mAP)\n r = min(r, 1.0)\n\n # Compute padding\n ratio = r, r # width, height ratios\n new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))\n dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding\n if auto: # minimum rectangle\n dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding\n elif scaleFill: # stretch\n dw, dh = 0.0, 0.0\n new_unpad = (new_shape[1], new_shape[0])\n ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios\n\n dw /= 2 # divide padding into 2 sides\n dh /= 2\n\n if shape[::-1] != new_unpad: # resize\n im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)\n top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))\n left, right = int(round(dw - 0.1)), int(round(dw + 0.1))\n im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border\n return im, ratio, (dw, dh)" }, { "identifier": "InfiniteDataLoader", "path": "utils/dataloaders.py", "snippet": "class InfiniteDataLoader(dataloader.DataLoader):\n \"\"\" Dataloader that reuses workers\n\n Uses same syntax as vanilla DataLoader\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))\n self.iterator = super().__iter__()\n\n def __len__(self):\n return len(self.batch_sampler.sampler)\n\n def __iter__(self):\n for _ in range(len(self)):\n yield next(self.iterator)" }, { "identifier": "LoadImagesAndLabels", "path": "utils/dataloaders.py", "snippet": "class LoadImagesAndLabels(Dataset):\n # YOLOv5 train_loader/val_loader, loads images and labels for training and validation\n cache_version = 0.6 # dataset labels *.cache version\n rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]\n\n def __init__(self,\n path,\n img_size=640,\n batch_size=16,\n augment=False,\n hyp=None,\n rect=False,\n image_weights=False,\n cache_images=False,\n single_cls=False,\n stride=32,\n pad=0.0,\n min_items=0,\n prefix=''):\n self.img_size = img_size\n self.augment = augment\n self.hyp = hyp\n self.image_weights = image_weights\n self.rect = False if image_weights else rect\n self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)\n self.mosaic_border = [-img_size // 2, -img_size // 2]\n self.stride = stride\n self.path = path\n self.albumentations = Albumentations(size=img_size) if augment else None\n\n try:\n f = [] # image files\n for p in path if isinstance(path, list) else [path]:\n p = Path(p) # os-agnostic\n if p.is_dir(): # dir\n f += glob.glob(str(p / '**' / '*.*'), recursive=True)\n # f = list(p.rglob('*.*')) # pathlib\n elif p.is_file(): # file\n with open(p) as t:\n t = t.read().strip().splitlines()\n parent = str(p.parent) + os.sep\n f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path\n # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib)\n else:\n raise FileNotFoundError(f'{prefix}{p} does not exist')\n self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS)\n # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib\n assert self.im_files, f'{prefix}No images found'\n except Exception as e:\n raise Exception(f'{prefix}Error loading data from {path}: {e}\\n{HELP_URL}') from e\n\n # Check cache\n self.label_files = img2label_paths(self.im_files) # labels\n cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache')\n try:\n cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict\n assert cache['version'] == self.cache_version # matches current version\n assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash\n except Exception:\n cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops\n\n # Display cache\n nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total\n if exists and LOCAL_RANK in {-1, 0}:\n d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt'\n tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results\n if cache['msgs']:\n LOGGER.info('\\n'.join(cache['msgs'])) # display warnings\n assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}'\n\n # Read cache\n [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items\n labels, shapes, self.segments = zip(*cache.values())\n nl = len(np.concatenate(labels, 0)) # number of labels\n assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}'\n self.labels = list(labels)\n self.shapes = np.array(shapes)\n self.im_files = list(cache.keys()) # update\n self.label_files = img2label_paths(cache.keys()) # update\n\n # Filter images\n if min_items:\n include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int)\n LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset')\n self.im_files = [self.im_files[i] for i in include]\n self.label_files = [self.label_files[i] for i in include]\n self.labels = [self.labels[i] for i in include]\n self.segments = [self.segments[i] for i in include]\n self.shapes = self.shapes[include] # wh\n\n # Create indices\n n = len(self.shapes) # number of images\n bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index\n nb = bi[-1] + 1 # number of batches\n self.batch = bi # batch index of image\n self.n = n\n self.indices = range(n)\n\n # Update labels\n include_class = [] # filter labels to include only these classes (optional)\n self.segments = list(self.segments)\n include_class_array = np.array(include_class).reshape(1, -1)\n for i, (label, segment) in enumerate(zip(self.labels, self.segments)):\n if include_class:\n j = (label[:, 0:1] == include_class_array).any(1)\n self.labels[i] = label[j]\n if segment:\n self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem]\n if single_cls: # single-class training, merge all classes into 0\n self.labels[i][:, 0] = 0\n\n # Rectangular Training\n if self.rect:\n # Sort by aspect ratio\n s = self.shapes # wh\n ar = s[:, 1] / s[:, 0] # aspect ratio\n irect = ar.argsort()\n self.im_files = [self.im_files[i] for i in irect]\n self.label_files = [self.label_files[i] for i in irect]\n self.labels = [self.labels[i] for i in irect]\n self.segments = [self.segments[i] for i in irect]\n self.shapes = s[irect] # wh\n ar = ar[irect]\n\n # Set training image shapes\n shapes = [[1, 1]] * nb\n for i in range(nb):\n ari = ar[bi == i]\n mini, maxi = ari.min(), ari.max()\n if maxi < 1:\n shapes[i] = [maxi, 1]\n elif mini > 1:\n shapes[i] = [1, 1 / mini]\n\n self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride\n\n # Cache images into RAM/disk for faster training\n if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix):\n cache_images = False\n self.ims = [None] * n\n self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files]\n if cache_images:\n b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes\n self.im_hw0, self.im_hw = [None] * n, [None] * n\n fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image\n results = ThreadPool(NUM_THREADS).imap(fcn, range(n))\n pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0)\n for i, x in pbar:\n if cache_images == 'disk':\n b += self.npy_files[i].stat().st_size\n else: # 'ram'\n self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i)\n b += self.ims[i].nbytes\n pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})'\n pbar.close()\n\n def check_cache_ram(self, safety_margin=0.1, prefix=''):\n # Check image caching requirements vs available memory\n b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes\n n = min(self.n, 30) # extrapolate from 30 random images\n for _ in range(n):\n im = cv2.imread(random.choice(self.im_files)) # sample image\n ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio\n b += im.nbytes * ratio ** 2\n mem_required = b * self.n / n # GB required to cache dataset into RAM\n mem = psutil.virtual_memory()\n cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question\n if not cache:\n LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, '\n f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, '\n f\"{'caching images ✅' if cache else 'not caching images ⚠️'}\")\n return cache\n\n def cache_labels(self, path=Path('./labels.cache'), prefix=''):\n # Cache dataset labels, check images and read shapes\n x = {} # dict\n nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages\n desc = f'{prefix}Scanning {path.parent / path.stem}...'\n with Pool(NUM_THREADS) as pool:\n pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))),\n desc=desc,\n total=len(self.im_files),\n bar_format=TQDM_BAR_FORMAT)\n for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar:\n nm += nm_f\n nf += nf_f\n ne += ne_f\n nc += nc_f\n if im_file:\n x[im_file] = [lb, shape, segments]\n if msg:\n msgs.append(msg)\n pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt'\n\n pbar.close()\n if msgs:\n LOGGER.info('\\n'.join(msgs))\n if nf == 0:\n LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')\n x['hash'] = get_hash(self.label_files + self.im_files)\n x['results'] = nf, nm, ne, nc, len(self.im_files)\n x['msgs'] = msgs # warnings\n x['version'] = self.cache_version # cache version\n try:\n np.save(path, x) # save cache for next time\n path.with_suffix('.cache.npy').rename(path) # remove .npy suffix\n LOGGER.info(f'{prefix}New cache created: {path}')\n except Exception as e:\n LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable\n return x\n\n def __len__(self):\n return len(self.im_files)\n\n # def __iter__(self):\n # self.count = -1\n # print('ran dataset iter')\n # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)\n # return self\n\n def __getitem__(self, index):\n index = self.indices[index] # linear, shuffled, or image_weights\n\n hyp = self.hyp\n mosaic = self.mosaic and random.random() < hyp['mosaic']\n if mosaic:\n # Load mosaic\n img, labels = self.load_mosaic(index)\n shapes = None\n\n # MixUp augmentation\n if random.random() < hyp['mixup']:\n img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1)))\n\n else:\n # Load image\n img, (h0, w0), (h, w) = self.load_image(index)\n\n # Letterbox\n shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape\n img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)\n shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling\n\n labels = self.labels[index].copy()\n if labels.size: # normalized xywh to pixel xyxy format\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])\n\n if self.augment:\n img, labels = random_perspective(img,\n labels,\n degrees=hyp['degrees'],\n translate=hyp['translate'],\n scale=hyp['scale'],\n shear=hyp['shear'],\n perspective=hyp['perspective'])\n\n nl = len(labels) # number of labels\n if nl:\n labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3)\n\n if self.augment:\n # Albumentations\n img, labels = self.albumentations(img, labels)\n nl = len(labels) # update after albumentations\n\n # HSV color-space\n augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])\n\n # Flip up-down\n if random.random() < hyp['flipud']:\n img = np.flipud(img)\n if nl:\n labels[:, 2] = 1 - labels[:, 2]\n\n # Flip left-right\n if random.random() < hyp['fliplr']:\n img = np.fliplr(img)\n if nl:\n labels[:, 1] = 1 - labels[:, 1]\n\n # Cutouts\n # labels = cutout(img, labels, p=0.5)\n # nl = len(labels) # update after cutout\n\n labels_out = torch.zeros((nl, 6))\n if nl:\n labels_out[:, 1:] = torch.from_numpy(labels)\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return torch.from_numpy(img), labels_out, self.im_files[index], shapes\n\n def load_image(self, i):\n # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw)\n im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i],\n if im is None: # not cached in RAM\n if fn.exists(): # load npy\n im = np.load(fn)\n else: # read image\n im = cv2.imread(f) # BGR\n assert im is not None, f'Image Not Found {f}'\n h0, w0 = im.shape[:2] # orig hw\n r = self.img_size / max(h0, w0) # ratio\n if r != 1: # if sizes are not equal\n interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA\n im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp)\n return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized\n return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized\n\n def cache_images_to_disk(self, i):\n # Saves an image as an *.npy file for faster loading\n f = self.npy_files[i]\n if not f.exists():\n np.save(f.as_posix(), cv2.imread(self.im_files[i]))\n\n def load_mosaic(self, index):\n # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic\n labels4, segments4 = [], []\n s = self.img_size\n yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y\n indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices\n random.shuffle(indices)\n for i, index in enumerate(indices):\n # Load image\n img, _, (h, w) = self.load_image(index)\n\n # place img in img4\n if i == 0: # top left\n img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles\n x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)\n elif i == 1: # top right\n x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc\n x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h\n elif i == 2: # bottom left\n x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)\n elif i == 3: # bottom right\n x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)\n x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)\n\n img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]\n padw = x1a - x1b\n padh = y1a - y1b\n\n # Labels\n labels, segments = self.labels[index].copy(), self.segments[index].copy()\n if labels.size:\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format\n segments = [xyn2xy(x, w, h, padw, padh) for x in segments]\n labels4.append(labels)\n segments4.extend(segments)\n\n # Concat/clip labels\n labels4 = np.concatenate(labels4, 0)\n for x in (labels4[:, 1:], *segments4):\n np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()\n # img4, labels4 = replicate(img4, labels4) # replicate\n\n # Augment\n img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste'])\n img4, labels4 = random_perspective(img4,\n labels4,\n segments4,\n degrees=self.hyp['degrees'],\n translate=self.hyp['translate'],\n scale=self.hyp['scale'],\n shear=self.hyp['shear'],\n perspective=self.hyp['perspective'],\n border=self.mosaic_border) # border to remove\n\n return img4, labels4\n\n def load_mosaic9(self, index):\n # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic\n labels9, segments9 = [], []\n s = self.img_size\n indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices\n random.shuffle(indices)\n hp, wp = -1, -1 # height, width previous\n for i, index in enumerate(indices):\n # Load image\n img, _, (h, w) = self.load_image(index)\n\n # place img in img9\n if i == 0: # center\n img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles\n h0, w0 = h, w\n c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates\n elif i == 1: # top\n c = s, s - h, s + w, s\n elif i == 2: # top right\n c = s + wp, s - h, s + wp + w, s\n elif i == 3: # right\n c = s + w0, s, s + w0 + w, s + h\n elif i == 4: # bottom right\n c = s + w0, s + hp, s + w0 + w, s + hp + h\n elif i == 5: # bottom\n c = s + w0 - w, s + h0, s + w0, s + h0 + h\n elif i == 6: # bottom left\n c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h\n elif i == 7: # left\n c = s - w, s + h0 - h, s, s + h0\n elif i == 8: # top left\n c = s - w, s + h0 - hp - h, s, s + h0 - hp\n\n padx, pady = c[:2]\n x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords\n\n # Labels\n labels, segments = self.labels[index].copy(), self.segments[index].copy()\n if labels.size:\n labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format\n segments = [xyn2xy(x, w, h, padx, pady) for x in segments]\n labels9.append(labels)\n segments9.extend(segments)\n\n # Image\n img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]\n hp, wp = h, w # height, width previous\n\n # Offset\n yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y\n img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]\n\n # Concat/clip labels\n labels9 = np.concatenate(labels9, 0)\n labels9[:, [1, 3]] -= xc\n labels9[:, [2, 4]] -= yc\n c = np.array([xc, yc]) # centers\n segments9 = [x - c for x in segments9]\n\n for x in (labels9[:, 1:], *segments9):\n np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()\n # img9, labels9 = replicate(img9, labels9) # replicate\n\n # Augment\n img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste'])\n img9, labels9 = random_perspective(img9,\n labels9,\n segments9,\n degrees=self.hyp['degrees'],\n translate=self.hyp['translate'],\n scale=self.hyp['scale'],\n shear=self.hyp['shear'],\n perspective=self.hyp['perspective'],\n border=self.mosaic_border) # border to remove\n\n return img9, labels9\n\n @staticmethod\n def collate_fn(batch):\n im, label, path, shapes = zip(*batch) # transposed\n for i, lb in enumerate(label):\n lb[:, 0] = i # add target image index for build_targets()\n return torch.stack(im, 0), torch.cat(label, 0), path, shapes\n\n @staticmethod\n def collate_fn4(batch):\n im, label, path, shapes = zip(*batch) # transposed\n n = len(shapes) // 4\n im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]\n\n ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]])\n wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]])\n s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale\n for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW\n i *= 4\n if random.random() < 0.5:\n im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear',\n align_corners=False)[0].type(im[i].type())\n lb = label[i]\n else:\n im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2)\n lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s\n im4.append(im1)\n label4.append(lb)\n\n for i, lb in enumerate(label4):\n lb[:, 0] = i # add target image index for build_targets()\n\n return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4" }, { "identifier": "seed_worker", "path": "utils/dataloaders.py", "snippet": "def seed_worker(worker_id):\n # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader\n worker_seed = torch.initial_seed() % 2 ** 32\n np.random.seed(worker_seed)\n random.seed(worker_seed)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "xyn2xy", "path": "utils/general.py", "snippet": "def xyn2xy(x, w=640, h=640, padw=0, padh=0):\n # Convert normalized segments into pixel segments, shape (n,2)\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[..., 0] = w * x[..., 0] + padw # top left x\n y[..., 1] = h * x[..., 1] + padh # top left y\n return y" }, { "identifier": "xywhn2xyxy", "path": "utils/general.py", "snippet": "def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\n # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x\n y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y\n y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x\n y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y\n return y" }, { "identifier": "xyxy2xywhn", "path": "utils/general.py", "snippet": "def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right\n if clip:\n clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center\n y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center\n y[..., 2] = (x[..., 2] - x[..., 0]) / w # width\n y[..., 3] = (x[..., 3] - x[..., 1]) / h # height\n return y" }, { "identifier": "torch_distributed_zero_first", "path": "utils/torch_utils.py", "snippet": "@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n # Decorator to make all processes in distributed training wait for each local_master to do something\n if local_rank not in [-1, 0]:\n dist.barrier(device_ids=[local_rank])\n yield\n if local_rank == 0:\n dist.barrier(device_ids=[0])" }, { "identifier": "mixup", "path": "utils/segment/augmentations.py", "snippet": "def mixup(im, labels, segments, im2, labels2, segments2):\n # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf\n r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0\n im = (im * r + im2 * (1 - r)).astype(np.uint8)\n labels = np.concatenate((labels, labels2), 0)\n segments = np.concatenate((segments, segments2), 0)\n return im, labels, segments" }, { "identifier": "random_perspective", "path": "utils/segment/augmentations.py", "snippet": "def random_perspective(im,\n targets=(),\n segments=(),\n degrees=10,\n translate=.1,\n scale=.1,\n shear=10,\n perspective=0.0,\n border=(0, 0)):\n # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))\n # targets = [cls, xyxy]\n\n height = im.shape[0] + border[0] * 2 # shape(h,w,c)\n width = im.shape[1] + border[1] * 2\n\n # Center\n C = np.eye(3)\n C[0, 2] = -im.shape[1] / 2 # x translation (pixels)\n C[1, 2] = -im.shape[0] / 2 # y translation (pixels)\n\n # Perspective\n P = np.eye(3)\n P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)\n P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)\n\n # Rotation and Scale\n R = np.eye(3)\n a = random.uniform(-degrees, degrees)\n # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations\n s = random.uniform(1 - scale, 1 + scale)\n # s = 2 ** random.uniform(-scale, scale)\n R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)\n\n # Shear\n S = np.eye(3)\n S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)\n S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)\n\n # Translation\n T = np.eye(3)\n T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels)\n T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels)\n\n # Combined rotation matrix\n M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT\n if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed\n if perspective:\n im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))\n else: # affine\n im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))\n\n # Visualize\n # import matplotlib.pyplot as plt\n # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()\n # ax[0].imshow(im[:, :, ::-1]) # base\n # ax[1].imshow(im2[:, :, ::-1]) # warped\n\n # Transform label coordinates\n n = len(targets)\n new_segments = []\n if n:\n new = np.zeros((n, 4))\n segments = resample_segments(segments) # upsample\n for i, segment in enumerate(segments):\n xy = np.ones((len(segment), 3))\n xy[:, :2] = segment\n xy = xy @ M.T # transform\n xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine\n\n # clip\n new[i] = segment2box(xy, width, height)\n new_segments.append(xy)\n\n # filter candidates\n i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01)\n targets = targets[i]\n targets[:, 1:5] = new[i]\n new_segments = np.array(new_segments)[i]\n\n return im, targets, new_segments" } ]
import os import random import cv2 import numpy as np import torch from torch.utils.data import DataLoader, distributed from ..augmentations import augment_hsv, copy_paste, letterbox from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn from ..torch_utils import torch_distributed_zero_first from .augmentations import mixup, random_perspective
10,983
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Dataloaders """ RANK = int(os.getenv('RANK', -1)) def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False, mask_downsample_ratio=1, overlap_mask=False, seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabelsAndMasks( path, imgsz, batch_size, augment=augment, # augmentation hyp=hyp, # hyperparameters rect=rect, # rectangular batches cache_images=cache, single_cls=single_cls, stride=int(stride), pad=pad, image_weights=image_weights, prefix=prefix, downsample_ratio=mask_downsample_ratio, overlap=overlap_mask) batch_size = min(batch_size, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() generator.manual_seed(6148914691236517205 + seed + RANK) return loader( dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, pin_memory=True, collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, worker_init_fn=seed_worker, generator=generator, ), dataset class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing def __init__( self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0, min_items=0, prefix='', downsample_ratio=1, overlap=False, ): super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, stride, pad, min_items, prefix) self.downsample_ratio = downsample_ratio self.overlap = overlap def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp mosaic = self.mosaic and random.random() < hyp['mosaic'] masks = [] if mosaic: # Load mosaic img, labels, segments = self.load_mosaic(index) shapes = None # MixUp augmentation if random.random() < hyp['mixup']:
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Dataloaders """ RANK = int(os.getenv('RANK', -1)) def create_dataloader(path, imgsz, batch_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, image_weights=False, quad=False, prefix='', shuffle=False, mask_downsample_ratio=1, overlap_mask=False, seed=0): if rect and shuffle: LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') shuffle = False with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP dataset = LoadImagesAndLabelsAndMasks( path, imgsz, batch_size, augment=augment, # augmentation hyp=hyp, # hyperparameters rect=rect, # rectangular batches cache_images=cache, single_cls=single_cls, stride=int(stride), pad=pad, image_weights=image_weights, prefix=prefix, downsample_ratio=mask_downsample_ratio, overlap=overlap_mask) batch_size = min(batch_size, len(dataset)) nd = torch.cuda.device_count() # number of CUDA devices nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates generator = torch.Generator() generator.manual_seed(6148914691236517205 + seed + RANK) return loader( dataset, batch_size=batch_size, shuffle=shuffle and sampler is None, num_workers=nw, sampler=sampler, pin_memory=True, collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, worker_init_fn=seed_worker, generator=generator, ), dataset class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing def __init__( self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, stride=32, pad=0, min_items=0, prefix='', downsample_ratio=1, overlap=False, ): super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, stride, pad, min_items, prefix) self.downsample_ratio = downsample_ratio self.overlap = overlap def __getitem__(self, index): index = self.indices[index] # linear, shuffled, or image_weights hyp = self.hyp mosaic = self.mosaic and random.random() < hyp['mosaic'] masks = [] if mosaic: # Load mosaic img, labels, segments = self.load_mosaic(index) shapes = None # MixUp augmentation if random.random() < hyp['mixup']:
img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1)))
11
2023-11-12 13:28:26+00:00
16k
RAIVNLab/MatFormer-OLMo
olmo/train.py
[ { "identifier": "PathOrStr", "path": "olmo/aliases.py", "snippet": "" }, { "identifier": "CheckpointType", "path": "olmo/config.py", "snippet": "class CheckpointType(StrEnum):\n sharded = \"sharded\"\n unsharded = \"unsharded\"" }, { "identifier": "SpeedMonitorConfig", "path": "olmo/config.py", "snippet": "class SpeedMonitorConfig(BaseConfig):\n window_size: int = 100\n gpu_flops_available: Optional[Union[float, int]] = None" }, { "identifier": "TrainConfig", "path": "olmo/config.py", "snippet": "class TrainConfig(BaseConfig):\n \"\"\"\n OLMo training configuration.\n \"\"\"\n\n run_name: Optional[str] = None\n \"\"\"\n The name of the run.\n \"\"\"\n\n seed: int = 6198\n \"\"\"\n Used to seed all initial RNG states.\n \"\"\"\n\n dry_run: bool = False\n \"\"\"\n If ``True``, don't actually train.\n \"\"\"\n\n model: ModelConfig = field(default_factory=ModelConfig)\n \"\"\"\n OLMo Model configuration.\n \"\"\"\n\n optimizer: OptimizerConfig = field(default_factory=OptimizerConfig)\n \"\"\"\n Optimizer configuration.\n \"\"\"\n\n scheduler: SchedulerConfig = field(default_factory=SchedulerConfig)\n \"\"\"\n Learning rate scheduler configuration.\n \"\"\"\n\n restore_base_learning_rate: bool = True\n \"\"\"\n Set to ``False`` if you want to restart with the base learning rate from the config, not the checkpoint.\n \"\"\"\n\n data: DataConfig = field(default_factory=DataConfig)\n \"\"\"\n Training data configuration.\n \"\"\"\n\n restore_dataloader: bool = True\n \"\"\"\n When restarting, restore the data loader to where it left off.\n If you restarting in order to train on a different dataset, set this to ``False``.\n \"\"\"\n\n fast_forward_batches: Optional[int] = None\n \"\"\"\n When restarting, use this to fast-forward the dataloader beyond the last checkpoint.\n This can be useful when restarting due to a loss spike in order to skip the data that\n corresponded to the spike.\n \"\"\"\n\n evaluators: List[EvaluatorConfig] = field(default_factory=list)\n \"\"\"\n Evaluation configurations.\n \"\"\"\n\n eval_interval: int = 1000\n \"\"\"\n How often (in terms of batches) to run evaluations.\n \"\"\"\n\n tokenizer: TokenizerConfig = field(default_factory=TokenizerConfig)\n \"\"\"\n Tokenizer configuration.\n \"\"\"\n\n save_folder: str = \"./\"\n \"\"\"\n The directory to save checkpoints to.\n \"\"\"\n\n remote_save_folder: Optional[str] = None\n \"\"\"\n A folder in a cloud bucket to upload saved checkpoints to.\n \"\"\"\n\n save_interval: int = 1000\n \"\"\"\n How often (in terms of batches) to save training state checkpoints that can be used for restarts.\n \"\"\"\n\n save_interval_unsharded: Optional[int] = None\n \"\"\"\n How often (if at all) to save the unsharded state to a single file.\n For large models it can be costly to save these, so it usually makes sense to save\n these less often than regular (sharded) training checkpoints.\n \"\"\"\n\n matformer_factor: int = 1\n\n save_num_checkpoints_to_keep: int = -1\n \"\"\"\n How many checkpoints to keep.\n \"\"\"\n\n save_num_unsharded_checkpoints_to_keep: int = -1\n \"\"\"\n How many unsharded checkpoints to keep.\n \"\"\"\n\n save_overwrite: bool = False\n \"\"\"\n If ``True``, overwrite any conflicting checkpoint files.\n \"\"\"\n\n force_save_unsharded: bool = False\n \"\"\"\n Save an unsharded checkpoint before training (even during a dry run).\n Use this option with `--load-path={PATH}` and `--dry_run` to convert a sharded\n checkpoint into an unsharded checkpoint.\n \"\"\"\n\n load_path: Optional[str] = None\n \"\"\"\n The path to a (sharded) training checkpoint to restore/resume from.\n \"\"\"\n\n max_duration: int = 10000\n \"\"\"\n Maximum number of batches to train for.\n \"\"\"\n\n global_train_batch_size: int = 512\n \"\"\"\n The effective global batch size.\n \"\"\"\n\n device_train_batch_size: Optional[int] = None # calculated automatically\n \"\"\"\n Don't set this manually. This will be set to ``global_train_batch_size // world_size``.\n \"\"\"\n\n device_train_microbatch_size: int = 16\n \"\"\"\n The number of instances passed to the model in a single forward-backward pass. You should set\n this as large as you can based on available GPU memory.\n \"\"\"\n\n device_eval_batch_size: int = 16\n \"\"\"\n The number of evaluation instances passed to the model in a single forward pass on each device.\n \"\"\"\n\n eval_subset_num_batches: int = -1\n \"\"\"\n The number of batches to use for downstream evaluation from each dataset.\n \"\"\"\n\n eval_on_load: bool = False\n \"\"\"\n When resuming from a checkpoint, run the evaluation loop right away.\n \"\"\"\n\n device_train_grad_accum: Optional[int] = None # calculated automatically\n \"\"\"\n Don't set this manually. This will be set to ``device_train_batch_size // device_train_microbatch_size``.\n \"\"\"\n\n max_grad_norm: Optional[float] = None\n \"\"\"\n Clip gradients to this value if set.\n \"\"\"\n\n precision: Optional[str] = None\n \"\"\"\n Precision to train with (e.g. \"amp_bf16\", \"amp_fp16\", or \"fp32\").\n \"\"\"\n\n wandb: Optional[WandbConfig] = None\n \"\"\"\n Weights & Biases configuration.\n \"\"\"\n\n speed_monitor: SpeedMonitorConfig = field(default_factory=SpeedMonitorConfig)\n \"\"\"\n Speed monitor configuration.\n \"\"\"\n\n console_log_interval: int = 1\n \"\"\"\n How often to log to the console.\n \"\"\"\n\n compile: Optional[CompilerConfig] = None\n \"\"\"\n Settings for compiling the model with ``torch.compile()``.\n \"\"\"\n\n activation_checkpointing: bool = False\n \"\"\"\n Use activation checkpointing on transformer blocks.\n \"\"\"\n\n fsdp: FSDPConfig = field(default_factory=FSDPConfig)\n \"\"\"\n Fully sharded data parallel settings.\n \"\"\"\n\n softmax_auxiliary_loss: bool = False\n \"\"\"\n If ``True``, we add the auxiliary loss function from PaLM that encourages the softmax\n normalizing term to be close to 0.\n \"\"\"\n\n time_limit: Optional[float] = 60 * 60 * 119.5\n \"\"\"\n The maximum amount of time to train for before saving a checkpoint and ending early.\n On LUMI we have 48 hours max per job, so we default to just under 48 hours to give us time\n to write out a final checkpoint.\n \"\"\"\n\n early_stopping_factor: Optional[float] = None\n\n save_data_indices: bool = True\n \"\"\"\n Save training data indices from each batch for each worker.\n \"\"\"\n\n @property\n def autocast_precision(self) -> torch.dtype:\n if self.precision == \"amp_bf16\":\n return torch.bfloat16\n elif self.precision == \"amp_fp16\":\n return torch.float16\n elif self.precision == \"fp32\":\n return torch.float32\n else:\n raise ValueError(f\"Unexpected precision type '{self.precision}'\")" }, { "identifier": "IterableDataset", "path": "olmo/data/iterable_dataset.py", "snippet": "class IterableDataset(torch.utils.data.IterableDataset[Dict[str, Any]]):\n \"\"\"\n Adapted from PyTorch's DistributedSampler, this wraps a Dataset or arbitrary sequence\n as an IterableDataset that can be deterministically restarted at any point by setting `start_index`,\n which should be a multiple of your global batch size.\n Similarly `max_examples`, if set, should be a multiple of global batch size.\n \"\"\"\n\n def __init__(\n self,\n dataset: Union[Sequence[List[int]], Sequence[torch.Tensor], Sequence[Dict[str, Any]]],\n *,\n seed: int = 0,\n start_index: int = 0,\n max_examples: Optional[int] = None,\n shuffle: bool = True,\n drop_last: bool = False,\n world_size: Optional[int] = None,\n rank: Optional[int] = None,\n work_dir: Optional[PathOrStr] = None,\n ):\n self.dataset = dataset\n self.seed = seed\n self.start_index = start_index\n self.max_examples = max_examples\n self.shuffle = shuffle\n self.drop_last = drop_last\n self.rank = rank if rank is not None else get_global_rank()\n self.world_size = world_size if world_size is not None else get_world_size()\n # If the dataset length is evenly divisible by # of replicas, then there\n # is no need to drop any data, since the dataset will be split equally.\n if self.drop_last and len(self.dataset) % self.world_size != 0: # type: ignore[arg-type]\n # Split to nearest available length that is evenly divisible by world size.\n # This is to ensure each rank receives the same amount of data.\n num_samples = math.ceil(\n (len(self.dataset) - self.world_size) / self.world_size # type: ignore[arg-type]\n )\n else:\n num_samples = math.ceil(len(self.dataset) / self.world_size) # type: ignore[arg-type]\n self.total_size = num_samples * self.world_size\n self.global_indices_file: Optional[Path] = None\n if work_dir is not None:\n self.global_indices_file = Path(work_dir) / \"global_indices.npy\"\n if self.rank == 0:\n log.info(\"Saving global data order indices...\")\n self.global_indices_file.parent.mkdir(parents=True, exist_ok=True)\n global_indices = self._build_global_indices()\n global_indices_mmap = np.memmap(\n self.global_indices_file, dtype=np.uint64, mode=\"w+\", shape=(len(global_indices),)\n )\n global_indices_mmap[:] = global_indices\n global_indices_mmap.flush()\n del global_indices_mmap\n log.info(\"Global data order indices saved to '%s'\", self.global_indices_file)\n barrier()\n\n def _build_global_indices(self) -> List[int]:\n if self.shuffle:\n # Deterministically shuffle based on epoch and seed\n # Torch built-in randomness is not very random, so we use numpy.\n rng = np.random.Generator(np.random.PCG64(seed=self.seed))\n indices = np.arange(len(self.dataset))\n rng.shuffle(indices)\n indices = list(indices)\n else:\n indices = list(range(len(self.dataset))) # type: ignore[arg-type]\n\n if not self.drop_last:\n # Add extra samples to make it evenly divisible\n padding_size = self.total_size - len(indices)\n if padding_size <= len(indices):\n indices += indices[:padding_size]\n else:\n indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]\n else:\n # Remove tail of data to make it evenly divisible.\n indices = indices[: self.total_size]\n assert len(indices) == self.total_size\n return indices\n\n def get_global_indices(self) -> Sequence[int]:\n if self.global_indices_file is not None:\n return np.memmap(self.global_indices_file, mode=\"r\", dtype=np.uint64) # type: ignore\n else:\n return self._build_global_indices()\n\n def __iter__(self) -> Iterator[Dict[str, Any]]:\n indices = self.get_global_indices()\n\n # Truncate to max_examples.\n if self.max_examples is not None:\n assert self.max_examples % self.world_size == 0\n indices = indices[: self.max_examples]\n\n # Start at the specified index.\n if self.start_index > 0:\n assert self.start_index % self.world_size == 0\n indices = indices[self.start_index :]\n\n # Slice indices by rank to avoid duplicates.\n indices = indices[self.rank : self.total_size : self.world_size]\n\n # Lastly, slice the indices by data loader worker rank to avoid duplicates.\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n indices = indices[worker_info.id :: worker_info.num_workers]\n\n # Convert to a list at this point so we don't have to rely on memory-mapping.\n if isinstance(indices, np.memmap):\n indices_list = indices.tolist() # type: ignore\n else:\n indices_list = indices\n del indices\n\n return (self._get_dataset_item(int(idx)) for idx in indices_list)\n\n def _get_dataset_item(self, idx: int) -> Dict[str, Any]:\n item = self.dataset[idx]\n if isinstance(item, dict):\n return dict(**item, index=idx)\n else:\n return {\"input_ids\": item, \"index\": idx}" }, { "identifier": "Evaluator", "path": "olmo/eval/evaluator.py", "snippet": "class Evaluator:\n label: str\n type: EvaluatorType\n eval_loader: DataLoader\n eval_metric: Union[Metric, Dict[str, Metric]]\n subset_num_batches: Optional[int] = None\n\n def reset_metrics(self) -> None:\n if isinstance(self.eval_metric, Metric):\n self.eval_metric.reset()\n else:\n for metric in self.eval_metric.values():\n metric.reset()\n\n def compute_metrics(self) -> Dict[str, float]:\n if self.type == EvaluatorType.downstream:\n assert isinstance(self.eval_metric, ICLMetric)\n return {\n f\"eval/downstream/{self.label}_{self.eval_metric.metric_type}\": self.eval_metric.compute().item(),\n }\n elif self.type == EvaluatorType.lm:\n # Metric(s) = cross entropy loss\n metrics: Dict[str, Metric]\n if isinstance(self.eval_metric, Metric):\n metrics = {self.label: self.eval_metric}\n else:\n metrics = self.eval_metric\n out = {}\n for label in sorted(metrics.keys()):\n metric = metrics[label]\n assert isinstance(metric, MeanMetric)\n if metric.weight.item() == 0.0: # type: ignore\n # In this case we probably haven't called '.update()' on this metric yet,\n # so we do so here with dummy values. Since we pass 0.0 in for weight this won't\n # affect the final value.\n # This can happen when the evaluator contains multiple tasks/datasets and we didn't\n # get to this one within the current evaluation loop.\n metric.update(0.0, 0.0)\n loss = metric.compute()\n if loss.isnan().item():\n # This can happen when the evaluator contains multiple tasks/datasets and we didn't\n # get to this one within the current evaluation loop.\n continue\n else:\n out[f\"eval/{label}/CrossEntropyLoss\"] = loss.item()\n out[f\"eval/{label}/Perplexity\"] = (2**(loss)).item()\n return out\n else:\n raise ValueError(f\"Unexpected evaluator type '{self.type}'\")\n\n def update_metrics(\n self,\n batch: Dict[str, Any],\n ce_loss: torch.Tensor,\n logits: torch.Tensor,\n matformer_factor = 1\n ) -> None:\n if self.type == EvaluatorType.downstream:\n assert isinstance(self.eval_metric, ICLMetric)\n self.eval_metric.update(batch, logits) # type: ignore\n elif self.type == EvaluatorType.lm:\n # Metric(s) = cross entropy loss\n for metadata, instance_loss in zip(batch[\"metadata\"], ce_loss):\n if isinstance(self.eval_metric, dict):\n metric = self.eval_metric[metadata[\"label\"]]\n else:\n metric = self.eval_metric\n metric.update(instance_loss)\n else:\n raise ValueError(f\"Unexpected evaluator type '{self.type}'\")" }, { "identifier": "OlmoConfigurationError", "path": "olmo/exceptions.py", "snippet": "class OlmoConfigurationError(OlmoError):\n \"\"\"\n An error with a configuration file.\n \"\"\"" }, { "identifier": "Olmo", "path": "olmo/model.py", "snippet": "class Olmo(nn.Module):\n def __init__(self, config: ModelConfig, init_params: bool = True):\n super().__init__()\n self.config = config\n\n # Validate config.\n if self.config.alibi and self.config.flash_attention:\n raise OlmoConfigurationError(\"ALiBi is currently not supported with FlashAttention\")\n\n if self.config.alibi and self.config.rope:\n raise OlmoConfigurationError(\"ALiBi and RoPE are mutually exclusive\")\n\n if self.config.embedding_size is not None and self.config.embedding_size != self.config.vocab_size:\n if self.config.embedding_size < self.config.vocab_size:\n raise OlmoConfigurationError(\"embedding size should be at least as big as vocab size\")\n elif self.config.embedding_size % 128 != 0:\n import warnings\n\n warnings.warn(\n \"Embedding size is not a multiple of 128! This could hurt throughput performance.\", UserWarning\n )\n\n torch.backends.cuda.enable_flash_sdp(self.config.flash_attention)\n torch.backends.cuda.enable_mem_efficient_sdp(False) # this is super slow so make sure torch won't use it\n\n self.transformer = nn.ModuleDict(\n dict(\n wte=nn.Embedding(\n config.embedding_size or config.vocab_size, config.d_model, device=config.init_device\n ),\n emb_drop=nn.Dropout(config.embedding_dropout),\n blocks=nn.ModuleList([OlmoBlock.build(config) for _ in range(config.n_layers)]),\n ln_f=LayerNorm.build(config),\n )\n )\n if not (self.config.alibi or self.config.rope):\n self.transformer.update(\n {\"wpe\": nn.Embedding(config.max_sequence_length, config.d_model, device=config.init_device)}\n )\n if init_params and self.config.init_device != \"meta\":\n self.apply(self.param_init_fn)\n self.__num_fwd_flops: Optional[int] = None\n\n # Attention bias cache.\n # We could cache these as buffers, but we've run into various issues doing that with FSDP.\n # In general it appears the way FSDP handles buffers is not well-defined.\n # It doesn't shard them but apparently it does synchronize them across processes, which we want to avoid\n # since (A) it isn't necessary, and (B) we have `-inf` in these biases which might get turned into\n # NaNs when they're synchronized due to casting or some other issue.\n self.__bias_cache: Dict[str, Optional[torch.FloatTensor]] = {\n \"causal_attention_bias\": None,\n \"alibi_attention_bias\": None,\n }\n if self.config.alibi:\n # Warm up cache.\n self.causal_attention_bias\n self.alibi_attention_bias\n\n @property\n def device(self) -> torch.device:\n device: torch.device = self.transformer.wte.weight.device # type: ignore\n if device.type == \"meta\":\n if self.config.init_device is not None and self.config.init_device != \"meta\":\n return torch.device(self.config.init_device)\n else:\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else:\n return device\n\n @property\n def causal_attention_bias(self) -> torch.FloatTensor:\n causal_bias = self.__bias_cache[\"causal_attention_bias\"]\n if causal_bias is None:\n causal_bias = causal_attention_bias(self.config, self.device)\n self.__bias_cache[\"causal_attention_bias\"] = causal_bias\n elif causal_bias.device != self.device: # in case model was moved to different device\n causal_bias = causal_bias.to(device=self.device)\n self.__bias_cache[\"causal_attention_bias\"] = causal_bias # type: ignore\n return causal_bias # type: ignore\n\n @property\n def alibi_attention_bias(self) -> torch.FloatTensor:\n alibi_bias = self.__bias_cache[\"alibi_attention_bias\"]\n if alibi_bias is None:\n alibi_bias = alibi_attention_bias(self.config, self.device)\n self.__bias_cache[\"alibi_attention_bias\"] = alibi_bias\n elif alibi_bias.device != self.device: # in case model was moved to different device\n alibi_bias = alibi_bias.to(device=self.device)\n self.__bias_cache[\"alibi_attention_bias\"] = alibi_bias # type: ignore\n return alibi_bias # type: ignore\n\n def forward(\n self,\n input_ids: torch.LongTensor,\n attention_mask: Optional[torch.Tensor] = None,\n attention_bias: Optional[torch.Tensor] = None,\n past_key_values: Optional[Sequence[Tuple[torch.Tensor, torch.Tensor]]] = None,\n use_cache: bool = False,\n last_logits_only: bool = False,\n ) -> OlmoOutput:\n \"\"\"\n :param input_ids: A tensor of shape `(batch_size, seq_len)`.\n :param attention_mask: A tensor of shape `(batch_size, seq_len)` that indicates\n which input IDs are masked. A `1` value in the mask means that\n the corresponding input ID should *not* be ignored. A `0` means\n that the corresponding input ID is masked.\n\n This has the same meaning as the `attention_mask` in HuggingFace's `transformers`\n library.\n :param attention_bias: A tensor of shape `(batch_size, 1, seq_len, seq_len)`,\n `(1, 1, seq_len, seq_len)`, or `(seq_len, seq_len)`. This is used\n to introduce causal or other biases.\n\n If the tensor is a bool or byte tensor, a `True` or `1` at `attention_bias[:, :, i, j]`\n indicates that the i-th element in the sequence is allowed to attend to the j-th\n element in the sequence.\n\n If the tensor is a float tensor, it will just be added to the attention\n scores before the softmax.\n\n The default is causal, which corresponds to a lower-diagonal byte matrix of ones.\n :param past_key_values: Pre-computed keys and values for each attention block.\n Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n :param use_cache: If `True`, return key and value tensors for each block.\n :param last_logits_only: If `True`, only compute the logits for the last token of each sequence.\n This can speed up decoding when you only care about the next token.\n \"\"\"\n if past_key_values:\n assert len(past_key_values) == self.config.n_layers\n\n batch_size, seq_len = input_ids.size()\n assert seq_len <= self.config.max_sequence_length, (\n f\"Cannot forward input with seq_len={seq_len}, \"\n f\"this model only supports seq_len<={self.config.max_sequence_length}\"\n )\n\n # Get embeddings of input.\n # shape: (batch_size, seq_len, d_model)\n x = self.transformer.wte(input_ids) # type: ignore\n\n if not (self.config.alibi or self.config.rope):\n # Get positional embeddings.\n if past_key_values is None:\n past_length = 0\n else:\n past_length = past_key_values[0][0].size(-2)\n # shape: (1, seq_len)\n pos = torch.arange(\n past_length, past_length + seq_len, dtype=torch.long, device=input_ids.device\n ).unsqueeze(0)\n # shape: (1, seq_len, d_model)\n pos_emb = self.transformer.wpe(pos) # type: ignore\n x = pos_emb + x\n\n # Add input + positional embeddings and apply dropout.\n # shape: (batch_size, seq_len, d_model)\n x = self.transformer.emb_drop(x) # type: ignore\n\n # Transform the attention mask into what the blocks expect.\n if attention_mask is not None:\n # shape: (batch_size, 1, 1, seq_len)\n attention_mask = attention_mask.to(dtype=x.dtype).view(batch_size, -1)[:, None, None, :]\n attention_mask = (1.0 - attention_mask) * torch.finfo(attention_mask.dtype).min\n attention_mask.masked_fill_(attention_mask == 1.0, float(\"-inf\"))\n\n # Merge attention mask with attention bias.\n if (\n attention_bias is not None\n or attention_mask is not None\n or self.config.alibi\n # NOTE (epwalsh): we need to initialize the attn bias in order for attn to work properly\n # with key+value cache. Otherwise `F.scaled_dot_product_attention()` doesn't seem to compute\n # scores correctly.\n or past_key_values is not None\n ):\n if attention_bias is None and self.config.alibi:\n attention_bias = self.causal_attention_bias + self.alibi_attention_bias\n elif attention_bias is None:\n attention_bias = self.causal_attention_bias\n elif attention_bias.dtype in (torch.int8, torch.bool):\n attention_bias = attention_bias.to(dtype=x.dtype)\n attention_bias.masked_fill_(attention_bias == 0.0, float(\"-inf\"))\n\n # Transform to the right shape and data type.\n mask_len = seq_len\n if attention_mask is not None:\n mask_len = attention_mask.shape[-1]\n elif past_key_values is not None:\n mask_len = past_key_values[0][0].shape[-2] + input_ids.shape[-1]\n attention_bias = attention_bias[:, :, :mask_len, :mask_len].to(x.dtype)\n\n # Add in the masking bias.\n if attention_mask is not None:\n attention_bias = attention_bias + attention_mask\n\n attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = [] if use_cache else None\n\n # Apply blocks one-by-one.\n for block, layer_past in zip(\n self.transformer.blocks, # type: ignore\n past_key_values or [None] * self.config.n_layers, # type: ignore\n ):\n # shape: (batch_size, seq_len, d_model)\n x, cache = block(x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache)\n if attn_key_values is not None:\n assert cache is not None\n attn_key_values.append(cache)\n\n if last_logits_only:\n # shape: (batch_size, 1, d_model)\n x = x[:, -1, :].unsqueeze(1)\n\n # Apply final layer norm.\n # shape: (batch_size, seq_len or 1, d_model)\n x = self.transformer.ln_f(x) # type: ignore\n\n # Get logits.\n # shape: (batch_size, seq_len or 1, vocab_size)\n logits = F.linear(x, self.transformer.wte.weight, None) # type: ignore\n\n return OlmoOutput(logits=logits, attn_key_values=attn_key_values) # type: ignore[arg-type]\n\n def fsdp_wrap_fn(self, module, recurse: bool = True, nonwrapped_numel: int = 0):\n del recurse, nonwrapped_numel\n return isinstance(module, OlmoBlock)\n\n def activation_checkpointing_fn(self, module):\n return isinstance(module, OlmoBlock)\n\n def reset_parameters(self):\n self.apply(self.param_init_fn)\n\n def param_init_fn(self, module):\n from functools import partial\n\n init_fn = partial(nn.init.normal_, mean=0.0, std=self.config.init_std)\n\n def fused_init_fn(module):\n # Parameter initialization is often based on the parameters shape.\n # If a layer is fused, initialization should be based on the shapes\n # of the original tensor instead of the shape of the fused tensor.\n # Layers which are fused should have the _fused attribute defined.\n # The first element of _fused is the dimension along which the tensor is fused.\n # This is followed by an iterable of split indices.\n _fused = getattr(module, \"_fused\", None)\n if _fused is None:\n raise RuntimeError(\"Internal logic error\")\n\n dim, splits = _fused\n splits = (0, *splits, module.weight.size(dim))\n for s, e in zip(splits[:-1], splits[1:]):\n slice_indices = [slice(None)] * module.weight.ndim\n slice_indices[dim] = slice(s, e)\n init_fn(module.weight[slice_indices])\n\n # Linear\n if isinstance(module, nn.Linear):\n if hasattr(module, \"_fused\"):\n fused_init_fn(module)\n else:\n init_fn(module.weight)\n\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n\n if getattr(module, \"_is_residual\", False):\n with torch.no_grad():\n module.weight.div_(math.sqrt(2 * self.config.n_layers))\n\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n\n # Embedding\n if isinstance(module, nn.Embedding):\n init_fn(module.weight)\n\n # LayerNorm\n if isinstance(module, (nn.LayerNorm, LayerNorm, RMSLayerNorm)):\n torch.nn.init.ones_(module.weight)\n torch.nn.init.zeros_(module.bias)\n\n def num_params(self, include_embedding: bool = True) -> int:\n \"\"\"\n Get the total number of parameters.\n \"\"\"\n params = (np for np in self.named_parameters())\n if not include_embedding:\n params = filter( # type: ignore\n lambda np: \".wte.\" not in np[0] and \".wpe.\" not in np[0],\n params,\n )\n return sum(p.numel() for _, p in params)\n\n @property\n def num_fwd_flops(self):\n if self.__num_fwd_flops:\n return self.__num_fwd_flops\n n_params = self.num_params()\n # the number of parameters is approximately the number of multiply-accumulates (MAC) in the network\n # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param\n # this gets us FLOPs / token\n params_flops_per_token = 2 * n_params\n params_flops_per_seq = params_flops_per_token * self.config.max_sequence_length\n # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2)\n attn_flops_per_seq = (\n self.config.n_layers * 2 * 2 * (self.config.d_model * (self.config.max_sequence_length**2))\n )\n self.__num_fwd_flops = params_flops_per_seq + attn_flops_per_seq\n return self.__num_fwd_flops\n\n def generate(\n self,\n input_ids: torch.LongTensor,\n attention_mask: Optional[torch.Tensor] = None,\n attention_bias: Optional[torch.Tensor] = None,\n max_steps: int = 10,\n beam_size: int = 1,\n per_node_beam_size: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n min_steps: Optional[int] = None,\n final_sequence_scorer: Optional[FinalSequenceScorer] = None,\n constraints: Optional[List[Constraint]] = None,\n ) -> OlmoGenerateOutput:\n \"\"\"\n Generate token IDs using beam search.\n\n Note that by default ``beam_size`` is set to 1, which is greedy decoding.\n\n :param input_ids: A tensor of shape `(batch_size, seq_len)`.\n :param attention_mask: A optional tensor of shape `(batch_size, seq_len)`, the same\n as for the forward method.\n :param attention_bias: A tensor of shape\n `(batch_size, 1, seq_len + tokens_to_generate, seq_len + tokens_to_generate)`,\n the same as for the forward method except only one shape is excepted here.\n\n For an explanation of the other arguments, see the :class:`BeamSearch` class.\n \"\"\"\n beam_search = BeamSearch(\n self.config.eos_token_id,\n max_steps=max_steps,\n beam_size=beam_size,\n per_node_beam_size=per_node_beam_size,\n sampler=sampler,\n min_steps=min_steps,\n final_sequence_scorer=final_sequence_scorer,\n constraints=constraints,\n )\n\n # Validate inputs.\n batch_size, seq_len = input_ids.shape\n if attention_mask is not None:\n assert attention_mask.shape == (batch_size, seq_len)\n if attention_bias is not None:\n assert len(attention_bias.shape) == 4\n assert attention_bias.shape[:2] == (batch_size, 1)\n assert (\n seq_len + beam_search.max_steps\n <= attention_bias.shape[2]\n == attention_bias.shape[3]\n <= self.config.max_sequence_length\n )\n\n tokens_generated = 0\n\n def flatten_past_key_values(\n past_key_values: List[Tuple[torch.Tensor, torch.Tensor]]\n ) -> Dict[str, torch.Tensor]:\n out = {}\n for i, (key, value) in enumerate(past_key_values):\n out[f\"past_key_{i}\"] = key\n out[f\"past_value_{i}\"] = value\n return out\n\n def unflatten_past_key_values(\n past_key_values: Dict[str, torch.Tensor]\n ) -> List[Tuple[torch.Tensor, torch.Tensor]]:\n out = []\n for i in range(self.config.n_layers):\n past_key = past_key_values[f\"past_key_{i}\"]\n past_value = past_key_values[f\"past_value_{i}\"]\n out.append((past_key, past_value))\n return out\n\n def step(\n last_predictions: torch.Tensor, state: dict[str, torch.Tensor]\n ) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:\n nonlocal tokens_generated\n\n attention_mask = state.get(\"attention_mask\")\n attention_bias = state.get(\"attention_bias\")\n\n if tokens_generated > 0:\n past_key_values = unflatten_past_key_values(state)\n input_ids = last_predictions.unsqueeze(1)\n if attention_mask is not None:\n group_size = input_ids.shape[0]\n attention_mask = torch.cat((attention_mask, attention_mask.new_ones((group_size, 1))), dim=-1)\n else:\n past_key_values = None\n input_ids = state[\"input_ids\"]\n\n tokens_generated += 1\n\n # Run forward pass of model to get logits, then normalize to get log probs.\n output = self(\n input_ids,\n attention_mask=attention_mask,\n attention_bias=attention_bias,\n past_key_values=past_key_values,\n use_cache=True,\n last_logits_only=True,\n )\n log_probs = F.log_softmax(output.logits[:, -1, :], dim=-1)\n\n # Create new state.\n state = flatten_past_key_values(output.attn_key_values)\n if attention_mask is not None:\n state[\"attention_mask\"] = attention_mask\n if attention_bias is not None:\n state[\"attention_bias\"] = attention_bias\n\n return log_probs, state\n\n initial_preds = input_ids.new_zeros((batch_size,)) # This is arbitrary, we won't use this.\n state: dict[str, torch.Tensor] = {\"input_ids\": input_ids}\n if attention_mask is not None:\n state[\"attention_mask\"] = attention_mask\n if attention_bias is not None:\n state[\"attention_bias\"] = attention_bias\n with torch.no_grad():\n token_ids, scores = beam_search.search(initial_preds, state, step)\n\n return OlmoGenerateOutput(\n token_ids=token_ids, # type: ignore[arg-type]\n scores=scores, # type: ignore[arg-type]\n )\n\n @classmethod\n def from_checkpoint(cls, checkpoint_dir: PathOrStr, device: str = \"cpu\") -> Olmo:\n \"\"\"\n Load an OLMo model from a checkpoint.\n \"\"\"\n from cached_path import cached_path\n\n # Load config.\n config_path = cached_path(os.path.join(checkpoint_dir, \"config.yaml\"))\n model_config = ModelConfig.load(config_path, key=\"model\", validate_paths=False)\n\n # Initialize model (always on CPU to start with so we don't run out of GPU memory).\n model_config.init_device = \"cpu\"\n model = Olmo(model_config)\n model.config.init_device = device\n\n # Load state dict directly to target device.\n state_dict_path = cached_path(os.path.join(checkpoint_dir, \"model.pt\"))\n state_dict = torch.load(state_dict_path, map_location=\"cpu\")\n model.load_state_dict(model._make_state_dict_compatible(state_dict))\n\n return model.to(torch.device(device)).eval()\n\n def _make_state_dict_compatible(self, state_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n # For backwards compatibility prior to fixing https://github.com/allenai/LLM/issues/222\n if self.config.block_type == BlockType.sequential:\n for block_idx in range(self.config.n_layers):\n norm_w_key = f\"transformer.blocks.{block_idx}.norm.weight\"\n norm_b_key = f\"transformer.blocks.{block_idx}.norm.bias\"\n if norm_w_key in state_dict:\n norm_w = state_dict.pop(norm_w_key)\n state_dict[f\"transformer.blocks.{block_idx}.attn_norm.weight\"] = norm_w\n state_dict[f\"transformer.blocks.{block_idx}.ff_norm.weight\"] = norm_w.clone()\n if norm_b_key in state_dict:\n norm_b = state_dict.pop(norm_b_key)\n state_dict[f\"transformer.blocks.{block_idx}.attn_norm.bias\"] = norm_b\n state_dict[f\"transformer.blocks.{block_idx}.ff_norm.bias\"] = norm_b.clone()\n return state_dict" }, { "identifier": "MatformerManager", "path": "olmo/model.py", "snippet": "class MatformerManager:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.current_factor = 1\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance" }, { "identifier": "set_new_base_lr", "path": "olmo/optim.py", "snippet": "def set_new_base_lr(\n optim: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler.LRScheduler, new_base_lr: float\n):\n \"\"\"\n Set a new base learning rate in the optimizer and scheduler.\n \"\"\"\n # Hack scheduler state to start with the new base LR.\n if isinstance(scheduler, torch.optim.lr_scheduler.SequentialLR):\n # Update 'base_lr' for all sub-schedulers.\n for sched in scheduler._schedulers: # type: ignore\n sched.base_lrs = [new_base_lr] * len(sched.base_lrs)\n\n # Update '_last_lr' for current sub-scheduler.\n current_sched = scheduler._schedulers[bisect_right(scheduler._milestones, scheduler.last_epoch)] # type: ignore\n if hasattr(current_sched, \"_get_closed_form_lr\"):\n current_sched._last_lr = current_sched._get_closed_form_lr()\n elif isinstance(current_sched, torch.optim.lr_scheduler.LambdaLR):\n current_sched._last_lr = current_sched.get_lr() # type: ignore\n else:\n raise NotImplementedError\n scheduler._last_lr = current_sched.get_last_lr() # type: ignore\n else:\n raise NotImplementedError\n\n # Update LR in optimizer.\n for param_group, new_lr in zip(optim.param_groups, scheduler.get_last_lr()):\n param_group[\"lr\"] = new_lr\n param_group[\"initial_lr\"] = new_base_lr" }, { "identifier": "barrier", "path": "olmo/util.py", "snippet": "def barrier() -> None:\n if dist.is_available() and dist.is_initialized():\n dist.barrier()" }, { "identifier": "get_global_rank", "path": "olmo/util.py", "snippet": "def get_global_rank() -> int:\n return int(os.environ.get(\"RANK\") or dist.get_rank())" }, { "identifier": "get_world_size", "path": "olmo/util.py", "snippet": "def get_world_size() -> int:\n if dist.is_available() and dist.is_initialized():\n return dist.get_world_size()\n else:\n return 1" }, { "identifier": "move_to_device", "path": "olmo/util.py", "snippet": "def move_to_device(o: T, device: torch.device) -> T:\n if isinstance(o, torch.Tensor):\n return o.to(device) # type: ignore[return-value]\n elif isinstance(o, dict):\n return {k: move_to_device(v, device) for k, v in o.items()} # type: ignore[return-value]\n elif isinstance(o, list):\n return [move_to_device(x, device) for x in o] # type: ignore[return-value]\n elif isinstance(o, tuple):\n return tuple((move_to_device(x, device) for x in o)) # type: ignore[return-value]\n else:\n return o" }, { "identifier": "peak_gpu_memory", "path": "olmo/util.py", "snippet": "def peak_gpu_memory(reset: bool = False) -> Optional[float]:\n \"\"\"\n Get the peak GPU memory usage in MB across all ranks.\n Only rank 0 will get the final result.\n \"\"\"\n if not torch.cuda.is_available():\n return None\n\n device = torch.device(\"cuda\")\n peak_mb = torch.cuda.max_memory_allocated(device) / 1000000\n if dist.is_available() and dist.is_initialized():\n peak_mb_tensor = torch.tensor(peak_mb, device=device)\n dist.reduce(peak_mb_tensor, 0, dist.ReduceOp.MAX)\n peak_mb = peak_mb_tensor.item()\n\n if reset:\n # Reset peak stats.\n torch.cuda.reset_max_memory_allocated(device)\n\n return peak_mb" }, { "identifier": "resource_path", "path": "olmo/util.py", "snippet": "def resource_path(folder: PathOrStr, fname: str) -> PathOrStr:\n if is_url(folder):\n from cached_path import cached_path\n\n return cached_path(f\"{folder}/{fname}\")\n else:\n return Path(folder) / fname" }, { "identifier": "syncronize_flag", "path": "olmo/util.py", "snippet": "def syncronize_flag(flag: bool, device: torch.device) -> bool:\n if dist.is_available() and dist.is_initialized():\n flag_tensor = torch.tensor(flag, device=device)\n dist.broadcast(flag_tensor, 0)\n return flag_tensor.item() # type: ignore\n else:\n return flag" }, { "identifier": "upload", "path": "olmo/util.py", "snippet": "def upload(source: PathOrStr, target: str, save_overwrite: bool = False):\n \"\"\"Upload source file to a target location on GCS or S3.\"\"\"\n from urllib.parse import urlparse\n\n source = Path(source)\n assert source.is_file()\n parsed = urlparse(target)\n if parsed.scheme == \"gs\":\n _gcs_upload(source, parsed.netloc, parsed.path, save_overwrite=save_overwrite)\n elif parsed.scheme == \"s3\":\n _s3_upload(source, parsed.netloc, parsed.path, save_overwrite=save_overwrite)\n else:\n raise NotImplementedError(f\"Upload not implemented for '{parsed.scheme}' scheme\")" }, { "identifier": "wait_on", "path": "olmo/util.py", "snippet": "def wait_on(condition: Callable[[], bool], description: str, timeout: float = 10.0):\n \"\"\"Wait on the condition function to return True.\"\"\"\n start_time = time.monotonic()\n while not condition():\n time.sleep(0.5)\n if time.monotonic() - start_time > timeout:\n raise TimeoutError(f\"{description} timed out\")" } ]
import logging import math import random import shutil import time import numpy as np import torch import torch.nn.functional as F import wandb from collections import deque from dataclasses import dataclass, field from itertools import islice from pathlib import Path from typing import Any, Deque, Dict, List, Optional, TextIO, Tuple from packaging import version from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.distributed.fsdp.api import ( FullOptimStateDictConfig, ShardedOptimStateDictConfig, ShardedStateDictConfig, ) from torch.utils.data import DataLoader from torchmetrics import MeanMetric from .aliases import PathOrStr from .config import CheckpointType, SpeedMonitorConfig, TrainConfig from .data import IterableDataset from .eval import Evaluator from .exceptions import OlmoConfigurationError from .model import Olmo, MatformerManager from .optim import set_new_base_lr from .util import ( barrier, get_global_rank, get_world_size, move_to_device, peak_gpu_memory, resource_path, syncronize_flag, upload, wait_on, )
11,117
from __future__ import annotations __all__ = ["SpeedMonitor", "LRMonitor", "Trainer"] log = logging.getLogger(__name__) @dataclass class SpeedMonitor: cfg: SpeedMonitorConfig start_times: Deque[float] = field(default_factory=lambda: deque([])) global_total_tokens: int = 0 device_interval_tokens: Deque[int] = field(default_factory=lambda: deque([])) def batch_start(self, global_total_tokens: int, device_batch_num_tokens: int, record: bool = True) -> None: self.global_total_tokens = global_total_tokens if record: if len(self.start_times) >= self.cfg.window_size: self.start_times.popleft() self.device_interval_tokens.popleft() self.start_times.append(time.monotonic()) self.device_interval_tokens.append(device_batch_num_tokens) def reset(self) -> None: self.start_times.clear() self.device_interval_tokens.clear() def check(self) -> Dict[str, float]: metrics: Dict[str, float] = {"throughput/total_tokens": self.global_total_tokens} if self.start_times: interval_seconds = time.monotonic() - self.start_times[0] interval_batches = len(self.start_times) interval_tokens = sum(self.device_interval_tokens) metrics["throughput/device/tokens_per_second"] = interval_tokens / interval_seconds metrics["throughput/device/batches_per_second"] = interval_batches / interval_seconds return metrics @dataclass class LRMonitor: optim: torch.optim.Optimizer def check(self) -> Dict[str, float]: lrs = [group["lr"] for group in self.optim.param_groups] return {f"optim/learning_rate_group{idx}": lr for idx, lr in enumerate(lrs)} @dataclass class Trainer: cfg: TrainConfig
from __future__ import annotations __all__ = ["SpeedMonitor", "LRMonitor", "Trainer"] log = logging.getLogger(__name__) @dataclass class SpeedMonitor: cfg: SpeedMonitorConfig start_times: Deque[float] = field(default_factory=lambda: deque([])) global_total_tokens: int = 0 device_interval_tokens: Deque[int] = field(default_factory=lambda: deque([])) def batch_start(self, global_total_tokens: int, device_batch_num_tokens: int, record: bool = True) -> None: self.global_total_tokens = global_total_tokens if record: if len(self.start_times) >= self.cfg.window_size: self.start_times.popleft() self.device_interval_tokens.popleft() self.start_times.append(time.monotonic()) self.device_interval_tokens.append(device_batch_num_tokens) def reset(self) -> None: self.start_times.clear() self.device_interval_tokens.clear() def check(self) -> Dict[str, float]: metrics: Dict[str, float] = {"throughput/total_tokens": self.global_total_tokens} if self.start_times: interval_seconds = time.monotonic() - self.start_times[0] interval_batches = len(self.start_times) interval_tokens = sum(self.device_interval_tokens) metrics["throughput/device/tokens_per_second"] = interval_tokens / interval_seconds metrics["throughput/device/batches_per_second"] = interval_batches / interval_seconds return metrics @dataclass class LRMonitor: optim: torch.optim.Optimizer def check(self) -> Dict[str, float]: lrs = [group["lr"] for group in self.optim.param_groups] return {f"optim/learning_rate_group{idx}": lr for idx, lr in enumerate(lrs)} @dataclass class Trainer: cfg: TrainConfig
model: Olmo
7
2023-11-14 02:24:07+00:00
16k
1in-oos/ccplus
caringcaribou/modules/uds.py
[ { "identifier": "auto_blacklist", "path": "caringcaribou/utils/can_actions.py", "snippet": "def auto_blacklist(bus, duration, classifier_function, print_results):\n \"\"\"Listens for false positives on the CAN bus and generates an arbitration ID blacklist.\n\n Finds all can.Message <msg> on 'bus' where 'classifier_function(msg)' evaluates to True.\n Terminates after 'duration' seconds and returns a set of all matching arbitration IDs.\n Prints progress, time countdown and list of results if 'print_results' is True.\n\n :param bus: CAN bus instance\n :param duration: duration in seconds\n :param classifier_function: function which, when called upon a can.Message instance,\n returns a bool indicating if it should be blacklisted\n :param print_results: whether progress and results should be printed to stdout\n :type bus: can.Bus\n :type duration: float\n :type classifier_function: function\n :type print_results: bool\n :return set of matching arbitration IDs to blacklist\n :rtype set(int)\n \"\"\"\n if print_results:\n print(\"Scanning for arbitration IDs to blacklist\")\n blacklist = set()\n start_time = time.time()\n end_time = start_time + duration\n while time.time() < end_time:\n if print_results:\n time_left = end_time - time.time()\n num_matches = len(blacklist)\n print(\"\\r{0:> 5.1f} seconds left, {1} found\".format(time_left, num_matches), end=\"\")\n stdout.flush()\n # Receive message\n msg = bus.recv(0.1)\n if msg is None:\n continue\n # Classify\n if classifier_function(msg):\n # Add to blacklist\n blacklist.add(msg.arbitration_id)\n if print_results:\n num_matches = len(blacklist)\n print(\"\\r 0.0 seconds left, {0} found\".format(num_matches), end=\"\")\n if len(blacklist) > 0:\n print(\"\\n Detected IDs: {0}\".format(\" \".join(sorted(list(map(hex, blacklist))))))\n else:\n print()\n return blacklist" }, { "identifier": "list_to_hex_str", "path": "caringcaribou/utils/common.py", "snippet": "def list_to_hex_str(data, delimiter=\"\"):\n \"\"\"Returns a hex string representation of the int values\n in 'data', separated with 'delimiter' between each byte\n\n Example:\n list_to_hex_str([10, 100, 200]) -> 0a.64.c8\n list_to_hex_str([0x07, 0xff, 0x6c], \"\") -> 07ff6c\n :param data: iterable of values\n :param delimiter: separator between values in output\n :type data: [int]\n :type delimiter: str\n :return: hex string representation of data\n :rtype str\n \"\"\"\n data_string = delimiter.join([\"{0:02x}\".format(i) for i in data])\n return data_string" }, { "identifier": "parse_int_dec_or_hex", "path": "caringcaribou/utils/common.py", "snippet": "def parse_int_dec_or_hex(value):\n \"\"\"Parses an integer on base 10 (decimal) or 16 (hex with \"0x\" prefix)\n\n Examples:\n parse_int_dec_or_hex(\"1234\") -> 1234\n parse_int_dec_or_hex(\"0xa7\") -> 167\n\n :param value: the value to parse\n :type value: str\n :rtype int\n \"\"\"\n return int(value, 0)" }, { "identifier": "ARBITRATION_ID_MAX", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MAX = 0x7FF" }, { "identifier": "ARBITRATION_ID_MAX_EXTENDED", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MAX_EXTENDED = 0x18DAFFF1" }, { "identifier": "ARBITRATION_ID_MIN_EXTENDED", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MIN_EXTENDED = 0x18DA00F1" }, { "identifier": "ARBITRATION_ID_MIN", "path": "caringcaribou/utils/constants.py", "snippet": "ARBITRATION_ID_MIN = 0x700" }, { "identifier": "IsoTp", "path": "caringcaribou/utils/iso15765_2.py", "snippet": "class IsoTp:\n \"\"\"\n Implementation of ISO-15765-2, also known as ISO-TP. This is a multi-frame messaging protocol\n over CAN, which allows message payloads of up to 4095 bytes.\n \"\"\"\n\n MAX_SF_LENGTH = 7\n MAX_FF_LENGTH = 6\n MAX_CF_LENGTH = 7\n\n SF_PCI_LENGTH = 1\n CF_PCI_LENGTH = 1\n FF_PCI_LENGTH = 2\n FC_PCI_LENGTH = 3\n\n FC_FS_CTS = 0\n FC_FS_WAIT = 1\n FC_FS_OVFLW = 2\n\n SF_FRAME_ID = 0\n FF_FRAME_ID = 1\n CF_FRAME_ID = 2\n FC_FRAME_ID = 3\n\n N_BS_TIMEOUT = 1.5\n\n MAX_FRAME_LENGTH = 8\n MAX_MESSAGE_LENGTH = 4095\n\n def __init__(self, arb_id_request, arb_id_response, bus=None, padding_value=0x00):\n # Setting default bus to None rather than the actual bus prevents a CanError when\n # called with a virtual CAN bus, while the OS is lacking a working CAN interface\n if bus is None:\n self.bus = can.Bus(DEFAULT_INTERFACE)\n else:\n self.bus = bus\n self.arb_id_request = arb_id_request\n self.arb_id_response = arb_id_response\n # Controls optional padding of SF messages and the last CF frame in multi-frame messages\n # Disabled padding is _not_ part of ISO-15765-2, but might prove useful for testing against some targets\n self.padding_value = padding_value\n if padding_value is None:\n self.padding_enabled = False\n else:\n self.padding_enabled = True\n if not isinstance(padding_value, int):\n raise TypeError(\"IsoTp: padding must be an integer or None, received '{0}'\".format(padding_value))\n if not 0x00 <= padding_value <= 0xFF:\n raise ValueError(\"IsoTp: padding must be in range 0x00-0xFF (0-255), got '{0}'\".format(padding_value))\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.bus.shutdown()\n\n def _set_filters(self, filters):\n \"\"\"\n Sets filters for the CAN bus - description can be found at\n https://python-can.readthedocs.io/en/stable/bus.html#can.BusABC.set_filters\n\n :param filters: dict specifying \"can_id\", \"can_mask\" and (optional) \"extended\" flag\n :return: None\n \"\"\"\n self.bus.set_filters(filters)\n\n def set_filter_single_arbitration_id(self, arbitration_id):\n \"\"\"Set a filter to only receive incoming messages on 'arbitration_id'\"\"\"\n arbitration_id_filter = [{\"can_id\": arbitration_id, \"can_mask\": ARBITRATION_ID_MAX_EXTENDED}]\n self._set_filters(arbitration_id_filter)\n\n def clear_filters(self):\n \"\"\"Remove arbitration ID filters\"\"\"\n self._set_filters(None)\n\n def send_message(self, data, arbitration_id, force_extended=False):\n \"\"\"\n Transmits a message using 'arbitration_id' and 'data' on 'self.bus'\n\n :param data: Data to send\n :param arbitration_id: Arbitration ID to use\n :param force_extended: Force extended arbitration ID\n :return: None\n \"\"\"\n is_extended = force_extended or arbitration_id > ARBITRATION_ID_MAX\n msg = can.Message(arbitration_id=arbitration_id, data=data, is_extended_id=is_extended)\n self.bus.send(msg)\n\n def decode_sf(self, frame):\n \"\"\"\n Decodes a singe frame (SF) message\n\n :param frame: Frame to decode\n :return: Tuple of single frame data length (SF_DL) and data if valid,\n Tuple of None, None otherwise\n \"\"\"\n if len(frame) >= self.SF_PCI_LENGTH:\n sf_dl = frame[0] & 0xF\n data = frame[1:]\n return sf_dl, list(data)\n else:\n return None, None\n\n def decode_ff(self, frame):\n \"\"\"\n Decodes a first frame (FF) message\n\n :param frame: Frame to decode\n :return: Tuple of first frame data length (FF_DL) and data if valid,\n Tuple of None, None otherwise\n \"\"\"\n if len(frame) >= self.FF_PCI_LENGTH:\n ff_dl = ((frame[0] & 0xF) << 8) | frame[1]\n data = frame[2:]\n return ff_dl, list(data)\n else:\n return None, None\n\n def decode_cf(self, frame):\n \"\"\"\n Decodes a consecutive frame (CF) message\n\n :param frame: Frame to decode\n :return: Tuple of sequence number (SN) and data if valid,\n Tuple of None, None otherwise\n \"\"\"\n if len(frame) >= self.CF_PCI_LENGTH:\n sn = frame[0] & 0xF\n data = frame[1:]\n return sn, list(data)\n else:\n return None, None\n\n def decode_fc(self, frame):\n \"\"\"\n Decodes a flow control (FC) frame\n\n :param frame: Frame to decode\n :return: Tuple of values flow status (FS), block size (BS) and separation time minimum (STmin) if valid,\n Tuple of None, None, None otherwise\n \"\"\"\n if len(frame) >= self.FC_PCI_LENGTH:\n fs = frame[0] & 0xF\n block_size = frame[1]\n st_min = frame[2]\n return fs, block_size, st_min\n else:\n return None, None, None\n\n def encode_fc(self, flow_status, block_size, st_min):\n \"\"\"\n Encodes a flow control (FC) message\n\n :param flow_status: Flow status (FS)\n :param block_size: Block size (BS)\n :param st_min: Separation time minimum (STmin)\n :return: Encoded data for the flow control message\n \"\"\"\n return [(self.FC_FRAME_ID << 4) | flow_status, block_size, st_min, 0, 0, 0, 0, 0]\n\n def send_request(self, message):\n \"\"\"\n Wrapper for sending 'message' as a request\n\n :param message: The message to send\n :return: None\n \"\"\"\n frames = self.get_frames_from_message(message, padding_value=self.padding_value)\n self.transmit(frames, self.arb_id_request, self.arb_id_response)\n\n def send_response(self, message):\n \"\"\"\n Wrapper for sending 'message' as a response\n\n :param message: The message to send\n :return: None\n \"\"\"\n frames = self.get_frames_from_message(message, padding_value=self.padding_value)\n self.transmit(frames, self.arb_id_response, self.arb_id_request)\n\n def indication(self, wait_window=None, trim_padding=True, first_frame_only=False):\n \"\"\"\n Receives an ISO-15765-2 message (one or more frames) and returns its content.\n\n :param wait_window: Max time (in seconds) to wait before timeout\n :param trim_padding: If True, removes message padding bytes from the received message\n :param first_frame_only: If True, return first frame only (simulating overflow behavior for multi-frame message)\n :return: A list of received data bytes if successful, None otherwise\n \"\"\"\n message = []\n\n if wait_window is None:\n wait_window = self.N_BS_TIMEOUT\n start_time = datetime.datetime.now()\n end_time = start_time + datetime.timedelta(seconds=wait_window)\n sn = 0\n message_length = 0\n\n while True:\n # Timeout check\n current_time = datetime.datetime.now()\n if current_time >= end_time:\n # Timeout\n return None\n # Receive frame\n msg = self.bus.recv(wait_window)\n if msg is not None:\n if msg.arbitration_id == self.arb_id_request:\n flow_control_arbitration_id = self.arb_id_response\n elif msg.arbitration_id == self.arb_id_response:\n flow_control_arbitration_id = self.arb_id_request\n else:\n # Unknown arbitration ID - ignore message\n continue\n frame = msg.data\n if len(frame) > 0:\n frame_type = (frame[0] >> 4) & 0xF\n if frame_type == self.SF_FRAME_ID:\n # Single frame (SF)\n dl, message = self.decode_sf(frame)\n if trim_padding:\n # Trim padding, in case the data exceeds single frame data length (SF_DL)\n message = message[:dl]\n break\n elif frame_type == self.FF_FRAME_ID:\n # First frame (FF) of a multi-frame message\n message_length, message = self.decode_ff(frame)\n if first_frame_only:\n # This is a hack to make it possible to only retrieve the first frame of a multi-frame\n # response, by telling the sender to stop sending data due to overflow\n ovflw_frame = self.encode_fc(self.FC_FS_OVFLW, 0, 0)\n # Respond with overflow (OVFLW) message\n self.send_message(ovflw_frame, flow_control_arbitration_id)\n # Return the first frame only\n break\n fc_frame = self.encode_fc(self.FC_FS_CTS, 0, 0)\n sn = 0\n # Respond with flow control (FC) message\n self.send_message(fc_frame, flow_control_arbitration_id)\n elif frame_type == self.CF_FRAME_ID:\n # Consecutive frame (CF)\n new_sn, data = self.decode_cf(frame)\n if (sn + 1) % 16 == new_sn:\n sn = new_sn\n message += data\n if len(message) >= message_length:\n # Last frame received\n if trim_padding:\n # Trim padding of last frame, which may exceed first frame data length (FF_DL)\n message = message[:message_length]\n # Stop listening for more frames\n break\n else:\n pass\n else:\n # Invalid frame type\n return None\n return list(message)\n\n def transmit(self, frames, arbitration_id, arbitration_id_flow_control):\n \"\"\"\n Transmits 'frames' in order on the bus, according to ISO-15765-2\n\n :param frames: List of frames (which are in turn lists of values) to send\n :param arbitration_id: The arbitration ID used for sending\n :param arbitration_id_flow_control: The arbitration ID used for receiving flow control (FC)\n :return: None\n \"\"\"\n if len(frames) == 0:\n # No data to send\n return None\n elif len(frames) == 1:\n # Single frame\n self.send_message(frames[0], arbitration_id)\n elif len(frames) > 1:\n # Multiple frames\n frame_index = 0\n # Send first frame (FF)\n self.send_message(frames[frame_index], arbitration_id)\n number_of_frames_left_to_send = len(frames) - 1\n number_of_frames_left_to_send_in_block = 0\n frame_index += 1\n st_min = 0\n while number_of_frames_left_to_send > 0:\n receiver_is_ready = False\n while not receiver_is_ready:\n # Wait for receiver to send flow control (FC)\n msg = self.bus.recv(self.N_BS_TIMEOUT)\n if msg is None:\n # Quit on timeout\n return None\n # Verify that msg uses the expected arbitration ID\n elif msg.arbitration_id != arbitration_id_flow_control:\n continue\n fc_frame = msg.data\n\n # Decode Flow Status (FS) from FC message\n fs, block_size, st_min = self.decode_fc(fc_frame)\n if fs == self.FC_FS_WAIT:\n # Flow status (FS) wait (WT)\n continue\n elif fs == self.FC_FS_CTS:\n # Continue to send (CTS)\n receiver_is_ready = True\n number_of_frames_left_to_send_in_block = block_size\n\n if number_of_frames_left_to_send < number_of_frames_left_to_send_in_block or block_size == 0:\n number_of_frames_left_to_send_in_block = number_of_frames_left_to_send\n # If STmin is specified in microseconds (0xF1-0xF9) or using reserved ranges (0x80-0xF0 and\n # 0xFA-0xFF), round up to one millisecond\n if st_min > 0x7F:\n st_min = 1\n elif fs == self.FC_FS_OVFLW:\n # Overflow - abort transmission\n return None\n else:\n # Timeout - did not receive a CTS message in time\n return None\n while number_of_frames_left_to_send_in_block > 0:\n # Send more frames, until it is time to wait for flow control (FC) again\n self.send_message(frames[frame_index], arbitration_id)\n frame_index += 1\n number_of_frames_left_to_send_in_block -= 1\n number_of_frames_left_to_send -= 1\n if number_of_frames_left_to_send_in_block > 0:\n time.sleep(st_min / 1000)\n\n @staticmethod\n def get_frames_from_message(message, padding_value=0x00):\n \"\"\"\n Returns a copy of 'message' split into frames,\n :param message: Message to split\n :param padding_value: Integer value used to pad messages, or None to disable padding (not part of ISO-15765-3)\n :return: List of frames\n \"\"\"\n if padding_value is None:\n padding_enabled = False\n padding_value = 0x00\n else:\n padding_enabled = True\n\n frame_list = []\n message_length = len(message)\n if message_length > IsoTp.MAX_MESSAGE_LENGTH:\n error_msg = \"Message too long for ISO-TP. Max allowed length is {0} bytes, received {1} bytes\".format(\n IsoTp.MAX_MESSAGE_LENGTH, message_length)\n raise ValueError(error_msg)\n if message_length <= IsoTp.MAX_SF_LENGTH:\n # Single frame (SF) message\n if padding_enabled:\n frame = [padding_value] * IsoTp.MAX_FRAME_LENGTH\n else:\n frame = [padding_value] * (message_length + 1)\n frame[0] = (IsoTp.SF_FRAME_ID << 4) | message_length\n for i in range(0, message_length):\n frame[1 + i] = message[i]\n frame_list.append(frame)\n else:\n # Multiple frame message\n bytes_left_to_copy = message_length\n # Create first frame (FF)\n frame = [padding_value] * IsoTp.MAX_FRAME_LENGTH\n frame[0] = (IsoTp.FF_FRAME_ID << 4) | (message_length >> 8)\n frame[1] = message_length & 0xFF\n for i in range(0, IsoTp.MAX_FF_LENGTH):\n frame[2 + i] = message[i]\n frame_list.append(frame)\n # Create consecutive frames (CF)\n bytes_copied = IsoTp.MAX_FF_LENGTH\n bytes_left_to_copy -= bytes_copied\n sn = 0\n while bytes_left_to_copy > 0:\n sn = (sn + 1) % 16\n if not padding_enabled and bytes_left_to_copy < 7:\n # Skip padding on last CF\n frame = [padding_value] * (bytes_left_to_copy + 1)\n else:\n frame = [padding_value] * IsoTp.MAX_FRAME_LENGTH\n frame[0] = (IsoTp.CF_FRAME_ID << 4) | sn\n # Fill current CF\n bytes_to_copy_to_current_cf = min(IsoTp.MAX_CF_LENGTH, bytes_left_to_copy)\n for i in range(bytes_to_copy_to_current_cf):\n frame[1 + i] = message[bytes_copied]\n bytes_left_to_copy = bytes_left_to_copy - 1\n bytes_copied = bytes_copied + 1\n frame_list.append(frame)\n return frame_list" }, { "identifier": "Constants", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Constants(object):\n # NR_SI (Negative Response Service Identifier) is a bit special, since\n # it is not a service per se.\n # From ISO-14229-1 specification: \"The NR_SI value is co-ordinated with\n # the SI values. The NR_SI value is not used as a SI value in order to\n # make A_Data coding and decoding easier.\"\n NR_SI = 0x7F" }, { "identifier": "Iso14229_1", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Iso14229_1(object):\n P3_CLIENT = 5\n\n def __init__(self, tp):\n self.tp = tp\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n @staticmethod\n def get_service_response_id(request_id):\n \"\"\"\n Returns the service response ID for the given request ID\n\n :param request_id: Request service ID\n :return: Corresponding response service ID\n \"\"\"\n return request_id + 0x40\n\n @staticmethod\n def get_service_request_id(response_id):\n \"\"\"\n Returns the service request ID for the given response ID\n\n :param response_id: Response service ID\n :return: Corresponding request service ID\n \"\"\"\n return response_id - 0x40\n\n def send_request(self, data):\n \"\"\"\n Sends a request message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_request(data)\n\n def send_response(self, data):\n \"\"\"\n Sends a response message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_response(data)\n\n def receive_response(self, wait_window):\n \"\"\"\n Attempts to receive a response through the underlying TP layer\n\n :param wait_window: Minimum time (in seconds) to wait before timeout\n :return: The received response if successful,\n None otherwise\n \"\"\"\n start_time = time.process_time()\n while True:\n current_time = time.process_time()\n if (current_time - start_time) > wait_window:\n return None\n\n response = self.tp.indication(wait_window)\n NRC = NegativeResponseCodes\n NRC_RCRRP = NRC.REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING\n if response is not None and len(response) >= 3:\n if (response[0] == Constants.NR_SI and\n response[2] == NRC_RCRRP):\n continue\n break\n return response\n\n @staticmethod\n def is_positive_response(response):\n \"\"\"\n Returns a bool indicating whether 'response' is positive\n\n :param response: ISO-14229-1 response data\n :return: False if response is a NEGATIVE_RESPONSE,\n True otherwise\n \"\"\"\n if (response is not None and\n len(response) > 0 and\n response[0] != Constants.NR_SI):\n return True\n return False\n\n def read_data_by_identifier(self, identifier):\n \"\"\"\n Sends a \"read data by identifier\" request for 'identifier'\n\n :param identifier: Data identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n response = []\n num_dids = len(identifier)\n if num_dids > 0:\n request = [0] * ((num_dids * 2) + 1)\n request[0] = ServiceID.READ_DATA_BY_IDENTIFIER\n for i in range(0, num_dids):\n request[i * 2 + 1] = (identifier[i] >> 8) & 0xFF\n request[i * 2 + 2] = identifier[i] & 0xFF\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n return response\n\n def read_memory_by_address(self, address_and_length_format,\n memory_address, memory_size):\n \"\"\"\n Sends a \"read memory by address\" request for 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.READ_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_memory_by_address(self, address_and_length_format,\n memory_address, memory_size, data):\n \"\"\"\n Sends a \"write memory by address\" request to write 'data' to\n 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :param data: The data to write to 'memory_address'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.WRITE_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_data_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"write data by identifier\" request to write 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data to write to 'identifier'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.WRITE_DATA_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def input_output_control_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"input output control by identifier\" request for 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.INPUT_OUTPUT_CONTROL_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def dynamically_define_data_identifier(self, identifier,\n sub_function, sub_function_arg):\n \"\"\"\n Sends a \"dynamically define data identifier\" request for\n 'identifier'\n\n :param identifier: DDDID to set\n :param sub_function: Sub function\n :param sub_function_arg: Sub function arguments\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (identifier is None or\n sub_function is None or\n sub_function_arg is None):\n return None\n\n request = [0] * (1 + 1 + 2 + len(sub_function_arg) * 4)\n request[0] = ServiceID.DYNAMICALLY_DEFINE_DATA_IDENTIFIER\n request[1] = sub_function\n request[2] = (identifier >> 8) & 0xFF\n request[3] = identifier & 0xFF\n\n offset = 4\n for did in sub_function_arg:\n request[offset + 0] = (did.sourceDataIdentifier >> 8) & 0xFF\n request[offset + 1] = did.sourceDataIdentifier & 0xFF\n request[offset + 2] = did.positionInSourceDataRecord\n request[offset + 3] = did.memorySize\n offset += 4\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def diagnostic_session_control(self, session_type):\n \"\"\"\n Sends a \"DiagnosticSessionControl\" request for specified session\n type\n\n :param session_type: Indicates which kind of session should be\n requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n request[1] = session_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def ecu_reset(self, reset_type):\n \"\"\"\n Sends an \"ECU reset\" request for specified reset type\n\n :param reset_type: Indicates which kind of reset should be requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.ECU_RESET\n request[1] = reset_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_request_seed(self, level, data_record=None):\n \"\"\"\n Sends a Security Access \"Request seed\" message for 'level'\n\n :param level: Security Access Type level to send request seed for\n :param data_record: Optional data to transmit when requesting seed,\n e.g. client identification\n :return: Response data (containing seed) if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n if data_record:\n for data_record in data_record:\n request.append(data_record)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_send_key(self, level, key):\n \"\"\"\n Sends a Security Access \"Send key\" message with 'key' for 'level'\n\n :param level: Security Access Type level to send key for\n :param key: Key to transmit\n :return: Response data if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n for key_byte in key:\n request.append(key_byte)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def read_data_by_periodic_identifier(self, transmission_mode,\n identifier):\n \"\"\"\n Sends a \"read data by periodic identifier\" request for 'identifier'\n\n :param transmission_mode: Transmission mode\n :param identifier: Identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (transmission_mode is None or\n identifier is None or\n len(identifier) == 0):\n return None\n\n request = [0] * (2 + len(identifier))\n request[0] = ServiceID.READ_DATA_BY_PERIODIC_IDENTIFIER\n request[1] = transmission_mode\n\n for i in range(0, len(identifier)):\n request[2 + i] = identifier[i]\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response" }, { "identifier": "NegativeResponseCodes", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class NegativeResponseCodes(object):\n \"\"\"\n ISO-14229-1 negative response codes\n \"\"\"\n POSITIVE_RESPONSE = 0x00\n # 0x01-0x0F ISO SAE Reserved\n GENERAL_REJECT = 0x10\n SERVICE_NOT_SUPPORTED = 0x11\n SUB_FUNCTION_NOT_SUPPORTED = 0x12\n INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT = 0x13\n RESPONSE_TOO_LONG = 0x14\n # 0x15-0x20 ISO SAE Reserved\n BUSY_REPEAT_REQUEST = 0x21\n CONDITIONS_NOT_CORRECT = 0x22\n # 0x23 ISO SAE Reserved\n REQUEST_SEQUENCE_ERROR = 0x24\n NO_RESPONSE_FROM_SUBNET_COMPONENT = 0x25\n FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION = 0x26\n # 0x27-0x30 ISO SAE Reserved\n REQUEST_OUT_OF_RANGE = 0x31\n # 0x32 ISO SAE Reserved\n SECURITY_ACCESS_DENIED = 0x33\n # 0x34 ISO SAE Reserved\n INVALID_KEY = 0x35\n EXCEEDED_NUMBER_OF_ATTEMPTS = 0x36\n REQUIRED_TIME_DELAY_NOT_EXPIRED = 0x37\n # 0x38-0x4F Reserved by extended data link security document\n # 0x50-0x6F ISO SAE Reserved\n UPLOAD_DOWNLOAD_NOT_ACCEPTED = 0x70\n TRANSFER_DATA_SUSPENDED = 0x71\n GENERAL_PROGRAMMING_FAILURE = 0x72\n WRONG_BLOCK_SEQUENCE_COUNTER = 0x73\n # 0x74-0x77 ISO SAE Reserved\n REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING = 0x78\n # 0x79-0x7D ISO SAE Reserved\n SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7E\n SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7F\n # 0x80 ISO SAE Reserved\n RPM_TOO_HIGH = 0x81\n RPM_TOO_LOW = 0x82\n ENGINE_IS_RUNNING = 0x83\n ENGINE_IS_NOT_RUNNING = 0x84\n ENGINE_RUN_TIME_TOO_LOW = 0x85\n TEMPERATURE_TOO_HIGH = 0x86\n TEMPERATURE_TOO_LOW = 0x87\n VEHICLE_SPEED_TOO_HIGH = 0x88\n VEHICLE_SPEED_TOO_LOW = 0x89\n THROTTLE_PEDAL_TOO_HIGH = 0x8A\n THROTTLE_PEDAL_TOO_LOW = 0x8B\n TRANSMISSION_RANGE_NOT_IN_NEUTRAL = 0x8C\n TRANSMISSION_RANGE_NOT_IN_GEAR = 0x8D\n # 0x8E ISO SAE Reserved\n BRAKE_SWITCHES_NOT_CLOSED = 0x8F\n SHIFT_LEVER_NOT_IN_PARK = 0x90\n TORQUE_CONVERTER_CLUTCH_LOCKED = 0x91\n VOLTAGE_TOO_HIGH = 0x92\n VOLTAGE_TOO_LOW = 0x93\n # 0x94-0xEF Reserved for specific conditions not correct\n # 0xF0-0xFE Vehicle manufacturer specific conditions not correct\n # 0xFF ISO SAE Reserved" }, { "identifier": "Services", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Services(object):\n \"\"\"Class structure containing service specific constants, sub-function\n parameters and functions\"\"\"\n\n class DiagnosticSessionControl(BaseService):\n\n service_id = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n\n class DiagnosticSessionType(object):\n # 0x00 ISO SAE Reserved\n DEFAULT_SESSION = 0x01\n PROGRAMMING_SESSION = 0x02\n EXTENDED_DIAGNOSTIC_SESSION = 0x03\n SAFETY_SYSTEM_DIAGNOSTIC_SESSION = 0x04\n # 0x05-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n VEHICLE_MANUFACTURER_SESSION_MIN = 0x40\n VEHICLE_MANUFACTURER_SESSION_MAX = 0x5F\n # 0x60-0x7E System supplier specific\n SYSTEM_SUPPLIER_SESSION_MIN = 0x60\n SYSTEM_SUPPLIER_SESSION_MAX = 0x7E\n # 0x7F ISO SAE Reserved\n\n class EcuReset(BaseService):\n\n service_id = ServiceID.ECU_RESET\n\n class ResetType(object):\n # 0x00 ISO SAE Reserved\n HARD_RESET = 0x01\n KEY_OFF_ON_RESET = 0x02\n SOFT_RESET = 0x03\n ENABLE_RAPID_POWER_SHUTDOWN = 0x04\n DISABLE_RAPID_POWER_SHUTDOWN = 0x05\n # 0x06-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n # 0x60-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n class SecurityAccess(BaseService):\n\n service_id = ServiceID.SECURITY_ACCESS\n\n class RequestSeedOrSendKey(object):\n \"\"\"\n These are lined up so that value X \"request seed level N\" has\n a matching \"send key level N\" at value X+1.\n\n 0x01 is Request seed level 0x01\n 0x02 is Send key level 0x01\n 0x03 is Request seed level 0x02\n 0x04 is Send key level 0x02\n (...)\n 0x41 is Request seed level 0x21\n 0x42 is Send key level 0x21\n\n The security levels numbering is arbitrary and does not imply\n any relationship between the levels.\n \"\"\"\n\n # 0x00 ISO SAE Reserved\n # 0x01-0x42 Vehicle manufacturer specific request\n # seed/send key pairs\n # 0x43-0X5E ISO SAE Reserved\n ISO_26021_2_VALUES = 0x5F\n ISO_26021_2_SEND_KEY = 0x60\n # 0x61-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n __REQUEST_SEED_MIN = 0x01\n __REQUEST_SEED_MAX = 0x41\n __SEND_KEY_MIN = 0x02\n __SEND_KEY_MAX = 0x42\n\n def is_valid_request_seed_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid request seed\n value and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__REQUEST_SEED_MIN\n <= value <= self.__REQUEST_SEED_MAX)\n is_odd = value % 2 == 1\n return valid_interval and is_odd\n\n def is_valid_send_key_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid send key value\n and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__SEND_KEY_MIN\n <= value <= self.__SEND_KEY_MAX)\n is_even = value % 2 == 0\n return valid_interval and is_even\n\n @staticmethod\n def get_send_key_for_request_seed(seed):\n return seed + 1\n\n class TesterPresent(BaseService):\n\n service_id = ServiceID.TESTER_PRESENT" }, { "identifier": "ServiceID", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class ServiceID(object):\n \"\"\"\n ISO-14229-1 service ID definitions\n \"\"\"\n DIAGNOSTIC_SESSION_CONTROL = 0x10\n ECU_RESET = 0x11\n CLEAR_DIAGNOSTIC_INFORMATION = 0x14\n READ_DTC_INFORMATION = 0x19\n READ_DATA_BY_IDENTIFIER = 0x22\n READ_MEMORY_BY_ADDRESS = 0x23\n READ_SCALING_DATA_BY_IDENTIFIER = 0x24\n SECURITY_ACCESS = 0x27\n COMMUNICATION_CONTROL = 0x28\n READ_DATA_BY_PERIODIC_IDENTIFIER = 0x2A\n DYNAMICALLY_DEFINE_DATA_IDENTIFIER = 0x2C\n WRITE_DATA_BY_IDENTIFIER = 0x2E\n INPUT_OUTPUT_CONTROL_BY_IDENTIFIER = 0x2F\n ROUTINE_CONTROL = 0x31\n REQUEST_DOWNLOAD = 0x34\n REQUEST_UPLOAD = 0x35\n TRANSFER_DATA = 0x36\n REQUEST_TRANSFER_EXIT = 0x37\n REQUEST_FILE_TRANSFER = 0x38\n WRITE_MEMORY_BY_ADDRESS = 0x3D\n TESTER_PRESENT = 0x3E\n ACCESS_TIMING_PARAMETER = 0x83\n SECURED_DATA_TRANSMISSION = 0x84\n CONTROL_DTC_SETTING = 0x85\n RESPONSE_ON_EVENT = 0x86\n LINK_CONTROL = 0x87" } ]
from caringcaribou.utils.can_actions import auto_blacklist from caringcaribou.utils.common import list_to_hex_str, parse_int_dec_or_hex from caringcaribou.utils.constants import ARBITRATION_ID_MAX, ARBITRATION_ID_MAX_EXTENDED,ARBITRATION_ID_MIN_EXTENDED from caringcaribou.utils.constants import ARBITRATION_ID_MIN from caringcaribou.utils.iso15765_2 import IsoTp from caringcaribou.utils.iso14229_1 import Constants, Iso14229_1, NegativeResponseCodes, Services, ServiceID from sys import stdout, version_info, stderr import argparse import datetime import time
12,841
Returns the first response received from 'arb_id_response' within 'timeout' seconds or None otherwise. :param arb_id_request: arbitration ID for requests :param arb_id_response: arbitration ID for responses :param level: vehicle manufacturer specific access level to send key for :param key: key to transmit :param timeout: seconds to wait for response before timeout, or None for default UDS timeout :type arb_id_request: int :type arb_id_response: int :type level: int :type key: [int] :type timeout: float or None :return: list of response byte values on success, None otherwise :rtype [int] or None """ # Sanity checks if (not Services.SecurityAccess.RequestSeedOrSendKey() .is_valid_send_key_level(level)): raise ValueError("Invalid send key level") if isinstance(timeout, float) and timeout < 0.0: raise ValueError("Timeout value ({0}) cannot be negative" .format(timeout)) with IsoTp(arb_id_request=arb_id_request, arb_id_response=arb_id_response) as tp: # Setup filter for incoming messages tp.set_filter_single_arbitration_id(arb_id_response) with Iso14229_1(tp) as uds: # Set timeout if timeout is not None: uds.P3_CLIENT = timeout response = uds.security_access_send_key(level=level, key=key) return response def __dump_dids_wrapper(args): """Wrapper used to initiate data identifier dump""" arb_id_request = args.src arb_id_response = args.dst timeout = args.timeout min_did = args.min_did max_did = args.max_did print_results = True dump_dids(arb_id_request, arb_id_response, timeout, min_did, max_did, print_results) def __auto_wrapper(args): """Wrapper used to initiate automated UDS scan""" E=args.E min_id = args.min max_id = args.max blacklist = args.blacklist auto_blacklist_duration = args.autoblacklist delay = args.delay verify = not args.skipverify print_results = True timeout = args.timeout min_did = args.min_did max_did = args.max_did try: arb_id_pairs = uds_discovery(E, min_id, max_id, blacklist, auto_blacklist_duration, delay, verify, print_results) print("\n") if len(arb_id_pairs) == 0: # No UDS discovered print("\nDiagnostics service could not be found.") else: # Print result table print("\nIdentified diagnostics:\n") table_line = "+------------+------------+" print(table_line) print("| CLIENT ID | SERVER ID |") print(table_line) for (client_id, server_id) in arb_id_pairs: print("| 0x{0:08x} | 0x{1:08x} |" .format(client_id, server_id)) print(table_line) print("\n") # Enumerate each pair for (client_id, server_id) in arb_id_pairs: args.src = client_id args.dst = server_id # Print Client/Server result table print("\nTarget Diagnostic IDs:\n") table_line = "+------------+------------+" print(table_line) print("| CLIENT ID | SERVER ID |") print(table_line) print("| 0x{0:08x} | 0x{1:08x} |" .format(client_id, server_id)) print(table_line) print("\nEnumerating Services:\n") found_services = service_discovery(client_id, server_id, timeout) found_subservices = [] print("\nIdentified services:\n") # Print available services result table for service_id in found_services: service_name = UDS_SERVICE_NAMES.get(service_id, "Unknown service") print("Supported service 0x{0:02x}: {1}" .format(service_id, service_name)) print("\n") dump_dids(client_id, server_id, timeout, min_did, max_did, print_results)
''' module_template.py This file contains a template for a simple CaringCaribou module. The module's entry point is the 'module_main' function. Steps to add this module to CaringCaribou and run it: 1. Copy this template into the `caringcaribou/modules` directory: $ cp module_template.py my_module.py 2. In `setup.py`, add an entry under `caringcaribou.modules`, referencing your new module like: `my_module = caringcaribou.modules.my_module` 3. Run: `setup.py install` 4. Verify that the module is available, it should be listed in the output of `cc.py -h` 5. Run the following command to run module and show usage instructions: $ cc.py my_module -h ''' from __future__ import print_function # Handle large ranges efficiently in both python 2 and 3 if version_info[0] == 2: range = xrange UDS_SERVICE_NAMES = { 0x10: "DIAGNOSTIC_SESSION_CONTROL", 0x11: "ECU_RESET", 0x14: "CLEAR_DIAGNOSTIC_INFORMATION", 0x19: "READ_DTC_INFORMATION", 0x20: "RETURN_TO_NORMAL", 0x22: "READ_DATA_BY_IDENTIFIER", 0x23: "READ_MEMORY_BY_ADDRESS", 0x24: "READ_SCALING_DATA_BY_IDENTIFIER", 0x27: "SECURITY_ACCESS", 0x28: "COMMUNICATION_CONTROL", 0x2A: "READ_DATA_BY_PERIODIC_IDENTIFIER", 0x2C: "DYNAMICALLY_DEFINE_DATA_IDENTIFIER", 0x2D: "DEFINE_PID_BY_MEMORY_ADDRESS", 0x2E: "WRITE_DATA_BY_IDENTIFIER", 0x2F: "INPUT_OUTPUT_CONTROL_BY_IDENTIFIER", 0x31: "ROUTINE_CONTROL", 0x34: "REQUEST_DOWNLOAD", 0x35: "REQUEST_UPLOAD", 0x36: "TRANSFER_DATA", 0x37: "REQUEST_TRANSFER_EXIT", 0x38: "REQUEST_FILE_TRANSFER", 0x3D: "WRITE_MEMORY_BY_ADDRESS", 0x3E: "TESTER_PRESENT", 0x7F: "NEGATIVE_RESPONSE", 0x83: "ACCESS_TIMING_PARAMETER", 0x84: "SECURED_DATA_TRANSMISSION", 0x85: "CONTROL_DTC_SETTING", 0x86: "RESPONSE_ON_EVENT", 0x87: "LINK_CONTROL" } NRC_NAMES = { 0x00: "POSITIVE_RESPONSE", 0x10: "GENERAL_REJECT", 0x11: "SERVICE_NOT_SUPPORTED", 0x12: "SUB_FUNCTION_NOT_SUPPORTED", 0x13: "INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT", 0x14: "RESPONSE_TOO_LONG", 0x21: "BUSY_REPEAT_REQUEST", 0x22: "CONDITIONS_NOT_CORRECT", 0x24: "REQUEST_SEQUENCE_ERROR", 0x25: "NO_RESPONSE_FROM_SUBNET_COMPONENT", 0x26: "FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION", 0x31: "REQUEST_OUT_OF_RANGE", 0x33: "SECURITY_ACCESS_DENIED", 0x35: "INVALID_KEY", 0x36: "EXCEEDED_NUMBER_OF_ATTEMPTS", 0x37: "REQUIRED_TIME_DELAY_NOT_EXPIRED", 0x70: "UPLOAD_DOWNLOAD_NOT_ACCEPTED", 0x71: "TRANSFER_DATA_SUSPENDED", 0x72: "GENERAL_PROGRAMMING_FAILURE", 0x73: "WRONG_BLOCK_SEQUENCE_COUNTER", 0x78: "REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING", 0x7E: "SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION", 0x7F: "SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION", 0x81: "RPM_TOO_HIGH", 0x82: "RPM_TOO_LOW", 0x83: "ENGINE_IS_RUNNING", 0x84: "ENGINE_IS_NOT_RUNNING", 0x85: "ENGINE_RUN_TIME_TOO_LOW", 0x86: "TEMPERATURE_TOO_HIGH", 0x87: "TEMPERATURE_TOO_LOW", 0x88: "VEHICLE_SPEED_TOO_HIGH", 0x89: "VEHICLE_SPEED_TOO_LOW", 0x8A: "THROTTLE_PEDAL_TOO_HIGH", 0x8B: "THROTTLE_PEDAL_TOO_LOW", 0x8C: "TRANSMISSION_RANGE_NOT_IN_NEUTRAL", 0x8D: "TRANSMISSION_RANGE_NOT_IN_GEAR", 0x8F: "BRAKE_SWITCHES_NOT_CLOSED", 0x90: "SHIFT_LEVER_NOT_IN_PARK", 0x91: "TORQUE_CONVERTER_CLUTCH_LOCKED", 0x92: "VOLTAGE_TOO_HIGH", 0x93: "VOLTAGE_TOO_LOW" } DELAY_DISCOVERY = 0.01 DELAY_TESTER_PRESENT = 0.5 DELAY_SECSEED_RESET = 0.01 TIMEOUT_SERVICES = 0.2 TIMEOUT_SUBSERVICES = 0.02 # Max number of arbitration IDs to backtrack during verification VERIFICATION_BACKTRACK = 5 # Extra time in seconds to wait for responses during verification VERIFICATION_EXTRA_DELAY = 0.5 BYTE_MIN = 0x00 BYTE_MAX = 0xFF DUMP_DID_MIN = 0x0000 DUMP_DID_MAX = 0xFFFF DUMP_DID_TIMEOUT = 0.2 def uds_discovery(E, min_id, max_id, blacklist_args, auto_blacklist_duration, delay, verify, print_results=True): """Scans for diagnostics support by brute forcing session control messages to different arbitration IDs. Returns a list of all (client_arb_id, server_arb_id) pairs found. :param min_id: start arbitration ID value :param max_id: end arbitration ID value :param blacklist_args: blacklist for arbitration ID values :param auto_blacklist_duration: seconds to scan for interfering arbitration IDs to blacklist automatically :param delay: delay between each message :param verify: whether found arbitration IDs should be verified :param print_results: whether results should be printed to stdout :type min_id: int :type max_id: int :type blacklist_args: [int] :type auto_blacklist_duration: float :type delay: float :type verify: bool :type print_results: bool :return: list of (client_arbitration_id, server_arbitration_id) pairs :rtype [(int, int)] """ # Set defaults #-E为扩展帧 if E: max_id = ARBITRATION_ID_MAX_EXTENDED min_id = ARBITRATION_ID_MIN_EXTENDED elif min_id is None: min_id = ARBITRATION_ID_MIN max_id = ARBITRATION_ID_MAX if auto_blacklist_duration is None: auto_blacklist_duration = 0 if blacklist_args is None: blacklist_args = [] # Sanity checks if max_id < min_id: raise ValueError("max_id must not be smaller than min_id -" " got min:0x{0:x}, max:0x{1:x}".format(min_id, max_id)) if auto_blacklist_duration < 0: raise ValueError("auto_blacklist_duration must not be smaller " "than 0, got {0}'".format(auto_blacklist_duration)) diagnostic_session_control = Services.DiagnosticSessionControl service_id = diagnostic_session_control.service_id sub_function = diagnostic_session_control.DiagnosticSessionType.DEFAULT_SESSION session_control_data = [service_id, sub_function] valid_session_control_responses = [0x50, 0x7F] def is_valid_response(message): return (len(message.data) >= 2 and message.data[1] in valid_session_control_responses) found_arbitration_ids = [] with IsoTp(None, None) as tp: blacklist = set(blacklist_args) # Perform automatic blacklist scan if auto_blacklist_duration > 0: auto_bl_arb_ids = auto_blacklist(tp.bus, auto_blacklist_duration, is_valid_response, print_results) blacklist |= auto_bl_arb_ids # Prepare session control frame sess_ctrl_frm = tp.get_frames_from_message(session_control_data) if E is None: temp = 1 else: temp = 0x100 send_arb_id = min_id - temp while send_arb_id < max_id: send_arb_id += temp if print_results: print("\rSending Diagnostic Session Control to 0x{0:04x}" .format(send_arb_id), end="") stdout.flush() # Send Diagnostic Session Control tp.transmit(sess_ctrl_frm, send_arb_id, None) end_time = time.time() + delay # Listen for response while time.time() < end_time: msg = tp.bus.recv(0) if msg is None: # No response received continue if msg.arbitration_id in blacklist: # Ignore blacklisted arbitration IDs continue if is_valid_response(msg): # Valid response if verify: # Verification - backtrack the latest IDs and # verify that the same response is received verified = False # Set filter to only receive messages for the # arbitration ID being verified tp.set_filter_single_arbitration_id(msg.arbitration_id) if print_results: print("\n Verifying potential response from " "0x{0:04x}".format(send_arb_id)) verify_id_range = range(send_arb_id, send_arb_id - VERIFICATION_BACKTRACK, -1) for verify_arb_id in verify_id_range: if print_results: print(" Resending 0x{0:0x}... " .format(verify_arb_id), end=" ") tp.transmit(sess_ctrl_frm, verify_arb_id, None) # Give some extra time for verification, in # case of slow responses verification_end_time = (time.time() + delay + VERIFICATION_EXTRA_DELAY) while time.time() < verification_end_time: verification_msg = tp.bus.recv(0) if verification_msg is None: continue if is_valid_response(verification_msg): # Verified verified = True # Update send ID - if server responds # slowly, initial value may be faulty. # Also ensures we resume searching on # the next arb ID after the actual # match, rather than the one after the # last potential match (which could lead # to false negatives if multiple servers # listen to adjacent arbitration IDs and # respond slowly) send_arb_id = verify_arb_id break if print_results: # Print result if verified: print("Success") else: print("No response") if verified: # Verification succeeded - stop checking break # Remove filter after verification tp.clear_filters() if not verified: # Verification failed - move on if print_results: print(" False match - skipping") continue if print_results: if not verify: # Blank line needed print() print("Found diagnostics server " "listening at 0x{0:04x}, " "response at 0x{1:04x}" .format(send_arb_id, msg.arbitration_id)) # Add found arbitration ID pair found_arb_id_pair = (send_arb_id, msg.arbitration_id) found_arbitration_ids.append(found_arb_id_pair) if print_results: print() return found_arbitration_ids def __uds_discovery_wrapper(args): """Wrapper used to initiate a UDS discovery scan""" E=args.E min_id = args.min max_id = args.max blacklist = args.blacklist auto_blacklist_duration = args.autoblacklist delay = args.delay verify = not args.skipverify print_results = True try: arb_id_pairs = uds_discovery(E, min_id, max_id, blacklist, auto_blacklist_duration, delay, verify, print_results) if len(arb_id_pairs) == 0: # No UDS discovered print("\nDiagnostics service could not be found.") else: # Print result table print("\nIdentified diagnostics:\n") table_line = "+------------+------------+" print(table_line) print("| CLIENT ID | SERVER ID |") print(table_line) for (client_id, server_id) in arb_id_pairs: print("| 0x{0:08x} | 0x{1:08x} |" .format(client_id, server_id)) print(table_line) except ValueError as e: print("Discovery failed: {0}".format(e)) def service_discovery(arb_id_request, arb_id_response, timeout, min_id=BYTE_MIN, max_id=BYTE_MAX, print_results=True): """Scans for supported UDS services on the specified arbitration ID. Returns a list of found service IDs. :param arb_id_request: arbitration ID for requests :param arb_id_response: arbitration ID for responses :param timeout: delay between each request sent :param min_id: first service ID to scan :param max_id: last service ID to scan :param print_results: whether progress should be printed to stdout :type arb_id_request: int :type arb_id_response: int :type timeout: float :type min_id: int :type max_id: int :type print_results: bool :return: list of supported service IDs :rtype [int] """ found_services = [] with IsoTp(arb_id_request=arb_id_request, arb_id_response=arb_id_response) as tp: # Setup filter for incoming messages tp.set_filter_single_arbitration_id(arb_id_response) # Send requests try: for service_id in range(min_id, max_id + 1): tp.send_request([service_id]) if print_results: print("\rProbing service 0x{0:02x} ({0}/{1}): found {2}" .format(service_id, max_id, len(found_services)), end="") stdout.flush() # Get response msg = tp.bus.recv(timeout) if msg is None: # No response received continue # Parse response if len(msg.data) > 3: # Since service ID is included in the response, mapping is correct even if response is delayed response_id = msg.data[1] response_service_id = msg.data[2] status = msg.data[3] if response_id != Constants.NR_SI: request_id = Iso14229_1.get_service_request_id(response_id) found_services.append(request_id) elif status != NegativeResponseCodes.SERVICE_NOT_SUPPORTED: # Any other response than "service not supported" counts found_services.append(response_service_id) if print_results: print("\nDone!\n") except KeyboardInterrupt: if print_results: print("\nInterrupted by user!\n") return found_services def __service_discovery_wrapper(args): """Wrapper used to initiate a service discovery scan""" arb_id_request = args.src arb_id_response = args.dst timeout = args.timeout # Probe services found_services = service_discovery(arb_id_request, arb_id_response, timeout) # Print results for service_id in found_services: service_name = UDS_SERVICE_NAMES.get(service_id, "Unknown service") print("Supported service 0x{0:02x}: {1}" .format(service_id, service_name)) def sub_discovery(arb_id_request, arb_id_response, diagnostic, service, timeout, print_results=True): """Scans for supported UDS Diagnostic Session Control subservices on the specified arbitration ID. Returns a list of found Diagnostic Session Control subservice IDs. :param arb_id_request: arbitration ID for requests :param arb_id_response: arbitration ID for responses :param timeout: delay between each request sent :param diagnostic: the diagnostic session control subfunction in which the target service is accessible :param service: the target service to be enumerated :param print_results: whether progress should be printed to stdout :type arb_id_request: int :type arb_id_response: int :type timeout: float :type diagnostic: int :type service: int :type print_results: bool :return: list of supported service IDs :rtype [int] """ found_subservices = [] subservice_status = [] try: for i in range(0, 256): if service != Services.DiagnosticSessionControl: extended_session(arb_id_request, arb_id_response, diagnostic) else: extended_session(arb_id_request, arb_id_response, 1) time.sleep(0.1) response = raw_send(arb_id_request, arb_id_response, service, i) service_name = UDS_SERVICE_NAMES.get(service, "Unknown service") print("\rProbing sub-service ID 0x{0:02x} for service {1} (0x{2:02x}).".format(i, service_name, service), end="") if response is None: # No response received continue # Parse response if len(response) >= 2: response_id = response[0] response_service_id = response[1] if len(response) >= 3: status = response[2] else: status = None if Iso14229_1.is_positive_response(response): found_subservices.append(i) subservice_status.append(0x00) elif response_id == Constants.NR_SI and response_service_id == service and status != NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED: # Any other response than "service not supported" counts found_subservices.append(i) subservice_status.append(response_service_id) time.sleep(timeout) except KeyboardInterrupt: if print_results: print("\nInterrupted by user!\n") return found_subservices, subservice_status def __sub_discovery_wrapper(args): """Wrapper used to initiate a subservice discovery scan""" arb_id_request = args.src arb_id_response = args.dst diagnostic = args.dsc service = args.service timeout = args.timeout # Probe subservices found_subservices, subservice_status = sub_discovery(arb_id_request, arb_id_response, diagnostic, service, timeout) service_name = UDS_SERVICE_NAMES.get(service, "Unknown service") # Print results if len(found_subservices) == 0: print("\nNo Sub-Services were discovered for service {0:02x} - {1}.\n".format(service, service_name, end=' ')) else: print("\nSub-Services Discovered for Service {0:02x} - {1}:\n".format(service, service_name, end=' ')) for subservice_id in found_subservices: nrc_description = NRC_NAMES.get(subservice_status[found_subservices.index(subservice_id)]) print("\n0x{0:02x} : {1}".format(subservice_id, nrc_description), end=' ') def raw_send(arb_id_request, arb_id_response, service, session_type): with IsoTp(arb_id_request=arb_id_request, arb_id_response=arb_id_response) as tp: # Setup filter for incoming messages request = [0] * 2 request[0] = service request[1] = session_type tp.set_filter_single_arbitration_id(arb_id_response) with Iso14229_1(tp) as uds: tp.send_request(request) response = uds.receive_response(Iso14229_1.P3_CLIENT) return response def tester_present(arb_id_request, delay, duration, suppress_positive_response): """Sends TesterPresent messages to 'arb_id_request'. Stops automatically after 'duration' seconds or runs forever if this is None. :param arb_id_request: arbitration ID for requests :param delay: seconds between each request :param duration: seconds before automatically stopping, or None to continue forever :param suppress_positive_response: whether positive responses should be suppressed :type arb_id_request: int :type delay: float :type duration: float or None :type suppress_positive_response: bool """ # SPR simply tells the recipient not to send a positive response to # each TesterPresent message if suppress_positive_response: sub_function = 0x80 else: sub_function = 0x00 # Calculate end timestamp if the TesterPresent should automatically # stop after a given duration auto_stop = duration is not None end_time = None if auto_stop: end_time = (datetime.datetime.now() + datetime.timedelta(seconds=duration)) service_id = Services.TesterPresent.service_id message_data = [service_id, sub_function] print("Sending TesterPresent to arbitration ID {0} (0x{0:02x})" .format(arb_id_request)) print("\nPress Ctrl+C to stop\n") with IsoTp(arb_id_request, None) as can_wrap: counter = 1 while True: can_wrap.send_request(message_data) print("\rCounter:", counter, end="") stdout.flush() time.sleep(delay) counter += 1 if auto_stop and datetime.datetime.now() >= end_time: break def __tester_present_wrapper(args): """Wrapper used to initiate a TesterPresent session""" arb_id_request = args.src delay = args.delay duration = args.duration suppress_positive_response = args.spr tester_present(arb_id_request, delay, duration, suppress_positive_response) def ecu_reset(arb_id_request, arb_id_response, reset_type, timeout): """Sends an ECU Reset message to 'arb_id_request'. Returns the first response received from 'arb_id_response' within 'timeout' seconds or None otherwise. :param arb_id_request: arbitration ID for requests :param arb_id_response: arbitration ID for responses :param reset_type: value corresponding to a reset type :param timeout: seconds to wait for response before timeout, or None for default UDS timeout :type arb_id_request: int :type arb_id_response int :type reset_type: int :type timeout: float or None :return: list of response byte values on success, None otherwise :rtype [int] or None """ # Sanity checks if not BYTE_MIN <= reset_type <= BYTE_MAX: raise ValueError("reset type must be within interval " "0x{0:02x}-0x{1:02x}" .format(BYTE_MIN, BYTE_MAX)) if isinstance(timeout, float) and timeout < 0.0: raise ValueError("timeout value ({0}) cannot be negative" .format(timeout)) with IsoTp(arb_id_request=arb_id_request, arb_id_response=arb_id_response) as tp: # Setup filter for incoming messages tp.set_filter_single_arbitration_id(arb_id_response) with Iso14229_1(tp) as uds: # Set timeout if timeout is not None: uds.P3_CLIENT = timeout response = uds.ecu_reset(reset_type=reset_type) return response def __ecu_reset_wrapper(args): """Wrapper used to initiate ECU Reset""" arb_id_request = args.src arb_id_response = args.dst reset_type = args.reset_type timeout = args.timeout print("Sending ECU reset, type 0x{0:02x} to arbitration ID {1} " "(0x{1:02x})".format(reset_type, arb_id_request)) try: response = ecu_reset(arb_id_request, arb_id_response, reset_type, timeout) except ValueError as e: print("ValueError: {0}".format(e)) return # Decode response if response is None: print("No response was received") else: response_length = len(response) if response_length == 0: # Empty response print("Received empty response") elif response_length == 1: # Invalid response length print("Received response [{0:02x}] (1 byte), expected at least " "2 bytes".format(response[0], len(response))) elif Iso14229_1.is_positive_response(response): # Positive response handling response_service_id = response[0] subfunction = response[1] expected_response_id = \ Iso14229_1.get_service_response_id( Services.EcuReset.service_id) if (response_service_id == expected_response_id and subfunction == reset_type): # Positive response pos_msg = "Received positive response" if response_length > 2: # Additional data can be seconds left to reset # (powerDownTime) or manufacturer specific additional_data = list_to_hex_str(response[2:], ",") pos_msg += (" with additional data: [{0}]" .format(additional_data)) print(pos_msg) else: # Service and/or subfunction mismatch print("Response service ID 0x{0:02x} and subfunction " "0x{1:02x} do not match expected values 0x{2:02x} " "and 0x{3:02x}".format(response_service_id, subfunction, Services.EcuReset.service_id, reset_type)) else: # Negative response handling print_negative_response(response) def print_negative_response(response): """ Helper function for decoding and printing a negative response received from a UDS server. :param response: Response data after CAN-TP layer has been removed :type response: [int] :return: Nothing """ nrc = response[2] nrc_description = NRC_NAMES.get(nrc, "Unknown NRC value") print("Received negative response code (NRC) 0x{0:02x}: {1}" .format(nrc, nrc_description)) def __security_seed_wrapper(args): """Wrapper used to initiate security seed dump""" arb_id_request = args.src arb_id_response = args.dst reset_type = args.reset session_type = args.sess_type level = args.sec_level num_seeds = args.num reset_delay = args.delay seed_list = [] try: print("Security seed dump started. Press Ctrl+C to stop.\n") while num_seeds > len(seed_list) or num_seeds == 0: # Extended diagnostics response = extended_session(arb_id_request, arb_id_response, session_type) if not Iso14229_1.is_positive_response(response): print("Unable to enter extended session. Retrying...\n") continue # Request seed response = request_seed(arb_id_request, arb_id_response, level, None, None) if response is None: print("\nInvalid response") elif Iso14229_1.is_positive_response(response): seed_list.append(list_to_hex_str(response[2:])) print("Seed received: {}\t(Total captured: {})" .format(list_to_hex_str(response[2:]), len(seed_list)), end="\r") stdout.flush() else: print_negative_response(response) break if reset_type: ecu_reset(arb_id_request, arb_id_response, reset_type, None) time.sleep(reset_delay) except KeyboardInterrupt: print("Interrupted by user.") except ValueError as e: print(e) return if len(seed_list) > 0: print("\n") print("Security Access Seeds captured:") for seed in seed_list: print(seed) def extended_session(arb_id_request, arb_id_response, session_type): with IsoTp(arb_id_request=arb_id_request, arb_id_response=arb_id_response) as tp: # Setup filter for incoming messages tp.set_filter_single_arbitration_id(arb_id_response) with Iso14229_1(tp) as uds: response = uds.diagnostic_session_control(session_type) return response def request_seed(arb_id_request, arb_id_response, level, data_record, timeout): """Sends an Request seed message to 'arb_id_request'. Returns the first response received from 'arb_id_response' within 'timeout' seconds or None otherwise. :param arb_id_request: arbitration ID for requests :param arb_id_response: arbitration ID for responses :param level: vehicle manufacturer specific access level to request seed for :param data_record: optional vehicle manufacturer specific data to transmit when requesting seed :param timeout: seconds to wait for response before timeout, or None for default UDS timeout :type arb_id_request: int :type arb_id_response: int :type level: int :type data_record: [int] or None :type timeout: float or None :return: list of response byte values on success, None otherwise :rtype [int] or None """ # Sanity checks if (not Services.SecurityAccess.RequestSeedOrSendKey() .is_valid_request_seed_level(level)): raise ValueError("Invalid request seed level") if isinstance(timeout, float) and timeout < 0.0: raise ValueError("Timeout value ({0}) cannot be negative" .format(timeout)) with IsoTp(arb_id_request=arb_id_request, arb_id_response=arb_id_response) as tp: # Setup filter for incoming messages tp.set_filter_single_arbitration_id(arb_id_response) with Iso14229_1(tp) as uds: # Set timeout if timeout is not None: uds.P3_CLIENT = timeout response = uds.security_access_request_seed(level, data_record) return response def send_key(arb_id_request, arb_id_response, level, key, timeout): """ Sends a Send key message to 'arb_id_request'. Returns the first response received from 'arb_id_response' within 'timeout' seconds or None otherwise. :param arb_id_request: arbitration ID for requests :param arb_id_response: arbitration ID for responses :param level: vehicle manufacturer specific access level to send key for :param key: key to transmit :param timeout: seconds to wait for response before timeout, or None for default UDS timeout :type arb_id_request: int :type arb_id_response: int :type level: int :type key: [int] :type timeout: float or None :return: list of response byte values on success, None otherwise :rtype [int] or None """ # Sanity checks if (not Services.SecurityAccess.RequestSeedOrSendKey() .is_valid_send_key_level(level)): raise ValueError("Invalid send key level") if isinstance(timeout, float) and timeout < 0.0: raise ValueError("Timeout value ({0}) cannot be negative" .format(timeout)) with IsoTp(arb_id_request=arb_id_request, arb_id_response=arb_id_response) as tp: # Setup filter for incoming messages tp.set_filter_single_arbitration_id(arb_id_response) with Iso14229_1(tp) as uds: # Set timeout if timeout is not None: uds.P3_CLIENT = timeout response = uds.security_access_send_key(level=level, key=key) return response def __dump_dids_wrapper(args): """Wrapper used to initiate data identifier dump""" arb_id_request = args.src arb_id_response = args.dst timeout = args.timeout min_did = args.min_did max_did = args.max_did print_results = True dump_dids(arb_id_request, arb_id_response, timeout, min_did, max_did, print_results) def __auto_wrapper(args): """Wrapper used to initiate automated UDS scan""" E=args.E min_id = args.min max_id = args.max blacklist = args.blacklist auto_blacklist_duration = args.autoblacklist delay = args.delay verify = not args.skipverify print_results = True timeout = args.timeout min_did = args.min_did max_did = args.max_did try: arb_id_pairs = uds_discovery(E, min_id, max_id, blacklist, auto_blacklist_duration, delay, verify, print_results) print("\n") if len(arb_id_pairs) == 0: # No UDS discovered print("\nDiagnostics service could not be found.") else: # Print result table print("\nIdentified diagnostics:\n") table_line = "+------------+------------+" print(table_line) print("| CLIENT ID | SERVER ID |") print(table_line) for (client_id, server_id) in arb_id_pairs: print("| 0x{0:08x} | 0x{1:08x} |" .format(client_id, server_id)) print(table_line) print("\n") # Enumerate each pair for (client_id, server_id) in arb_id_pairs: args.src = client_id args.dst = server_id # Print Client/Server result table print("\nTarget Diagnostic IDs:\n") table_line = "+------------+------------+" print(table_line) print("| CLIENT ID | SERVER ID |") print(table_line) print("| 0x{0:08x} | 0x{1:08x} |" .format(client_id, server_id)) print(table_line) print("\nEnumerating Services:\n") found_services = service_discovery(client_id, server_id, timeout) found_subservices = [] print("\nIdentified services:\n") # Print available services result table for service_id in found_services: service_name = UDS_SERVICE_NAMES.get(service_id, "Unknown service") print("Supported service 0x{0:02x}: {1}" .format(service_id, service_name)) print("\n") dump_dids(client_id, server_id, timeout, min_did, max_did, print_results)
if ServiceID.DIAGNOSTIC_SESSION_CONTROL in found_services:
12
2023-11-13 05:05:46+00:00
16k
L1bra1/WeakMotion
train_WeakMotionNet.py
[ { "identifier": "WeakMotionNet", "path": "weak_model.py", "snippet": "class WeakMotionNet(nn.Module):\n def __init__(self, out_seq_len=1, FGBG_category_num=2, height_feat_size=13):\n super(WeakMotionNet, self).__init__()\n self.out_seq_len = out_seq_len\n\n self.motion_pred = MotionPrediction(seq_len=self.out_seq_len)\n self.FGBG_classify = FGBGEstimation(motion_category_num=FGBG_category_num)\n self.stpn = STPN(height_feat_size=height_feat_size)\n\n\n def forward(self, bevs):\n bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)\n\n # Backbone network\n x = self.stpn(bevs)\n\n # FG/BG segmentation head\n FGBG_class_pred = self.FGBG_classify(x)\n\n # Motion Displacement prediction\n disp = self.motion_pred(x)\n disp = disp.view(-1, 2, x.size(-2), x.size(-1))\n\n return disp, FGBG_class_pred" }, { "identifier": "DatasetSingleSeq_Stage2", "path": "data/weak_nuscenes_dataloader.py", "snippet": "class DatasetSingleSeq_Stage2(Dataset):\n \"\"\"\n Generate the nuScenes training dataset for Stage2\n\n Parameters\n ----------\n dataset_root : Path to input data root directory\n weakdata_root: Path to weak supervision data root directory\n FBdata_root: Path to FG/BG masks predicted by PreSegNet in Stage1\n split : [train/val/test]\n annotation_ratio: Desired FG/BG annotation ratio. Should be consistent with the ratio in Stage1\n num_points_seg: Desired number of points in the current frame. Will be used to train the FG/BG segmentation head\n num_points_motion: Desired number of FG points in the three frames. Will be used for Chamfer loss\n \"\"\"\n def __init__(self, dataset_root=None, weakdata_root=None, FBdata_root=None, split='train', future_frame_skip=0, voxel_size=(0.25, 0.25, 0.4),\n area_extents=np.array([[-32., 32.], [-32., 32.], [-3., 2.]]), dims=(256, 256, 13), num_category=5,\n annotation_ratio=1.0, num_points_seg = 30000, num_points_motion = 12000):\n\n if dataset_root is None:\n raise ValueError(\"The {} dataset root is None. Should specify its value.\".format(split))\n\n self.dataset_root = dataset_root\n print(\"data root:\", dataset_root)\n self.weakdata_root = weakdata_root\n self.FBdata_root = FBdata_root\n\n seq_dirs = []\n if split == 'train':\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d) + '/0.npy'\n seq_dirs.append(tmp_0)\n else:\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d) + '/0.npy'\n seq_dirs.append(tmp_0)\n\n self.seq_files = seq_dirs\n self.num_sample_seqs = len(self.seq_files)\n print(\"The number of {} sequences: {}\".format(split, self.num_sample_seqs))\n\n # For training, the size of dataset should be 17065 * 1; for validation: 1719; for testing: 4309\n if split == 'train' and self.num_sample_seqs != 17065:\n warnings.warn(\">> The size of training dataset is not 17065 * 2.\\n\")\n elif split == 'val' and self.num_sample_seqs != 1719:\n warnings.warn(\">> The size of validation dataset is not 1719.\\n\")\n elif split == 'test' and self.num_sample_seqs != 4309:\n warnings.warn('>> The size of test dataset is not 4309.\\n')\n\n self.split = split\n self.voxel_size = voxel_size\n self.area_extents = area_extents\n self.future_frame_skip = future_frame_skip\n self.dims = dims\n self.annotation_ratio = annotation_ratio\n self.num_points_seg = num_points_seg\n self.num_points_motion = num_points_motion\n self.num_category = num_category\n\n def __len__(self):\n return self.num_sample_seqs\n\n def sample_foreground_point(self, pc, FGBG_label, use_GT_label=True):\n pc, not_close = remove_close(pc, radius=1.0)\n pc, filter_idx = filter_pc(pc, extents=self.area_extents)\n\n if use_GT_label:\n FGBG_label = FGBG_label[not_close]\n FGBG_label = FGBG_label[filter_idx]\n FG_mask = FGBG_label == 2\n else:\n FG_mask = FGBG_label\n\n FG_point = pc[FG_mask]\n FG_point_num = FG_point.shape[0]\n\n if FG_point_num != 0:\n if FG_point_num >= self.num_points_motion:\n sample_idx = np.random.choice(FG_point_num, self.num_points_motion, replace=False)\n FG_point_num = self.num_points_motion\n else:\n sample_idx = np.concatenate((np.arange(FG_point_num),\n np.random.choice(FG_point_num, self.num_points_motion - FG_point_num, replace=True)), axis=-1)\n FG_point = FG_point[sample_idx]\n else:\n FG_point = np.zeros((self.num_points_motion, 3))\n\n return FG_point, FG_point_num\n\n def __getitem__(self, idx):\n seq_file = self.seq_files[idx]\n gt_data_handle = np.load(seq_file, allow_pickle=True)\n gt_dict = gt_data_handle.item()\n\n dims = gt_dict['3d_dimension']\n num_future_pcs = gt_dict['num_future_pcs']\n num_past_pcs = gt_dict['num_past_pcs']\n pixel_indices = gt_dict['pixel_indices']\n\n sparse_disp_field_gt = gt_dict['disp_field']\n all_disp_field_gt = np.zeros((num_future_pcs, dims[0], dims[1], 2), dtype=np.float32)\n all_disp_field_gt[:, pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_disp_field_gt[:]\n\n sparse_valid_pixel_maps = gt_dict['valid_pixel_map']\n all_valid_pixel_maps = np.zeros((num_future_pcs, dims[0], dims[1]), dtype=np.float32)\n all_valid_pixel_maps[:, pixel_indices[:, 0], pixel_indices[:, 1]] = sparse_valid_pixel_maps[:]\n\n sparse_pixel_cat_maps = gt_dict['pixel_cat_map']\n pixel_cat_map = np.zeros((dims[0], dims[1], self.num_category), dtype=np.float32)\n pixel_cat_map[pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_pixel_cat_maps[:]\n\n non_empty_map = np.zeros((dims[0], dims[1]), dtype=np.float32)\n non_empty_map[pixel_indices[:, 0], pixel_indices[:, 1]] = 1.0\n\n padded_voxel_points = list()\n for i in range(num_past_pcs):\n indices = gt_dict['voxel_indices_' + str(i)]\n curr_voxels = np.zeros(dims, dtype=np.bool)\n curr_voxels[indices[:, 0], indices[:, 1], indices[:, 2]] = 1\n padded_voxel_points.append(curr_voxels)\n padded_voxel_points = np.stack(padded_voxel_points, 0).astype(np.float32)\n\n # get weak supervision\n if self.split == 'train':\n scene_name = seq_file.split('/')[-2]\n weak_file_name = os.path.join(os.path.join(self.weakdata_root, scene_name), '0.npy')\n weak_data_handle = np.load(weak_file_name, allow_pickle=True)\n weak_dict = weak_data_handle.item()\n\n # get FG/BG annotations for the current frame,\n # this procedure is the same as the data preparation in Stage1\n # 0: past frame; 1: current frame; 2: future frame\n\n pc_seg = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_seg = weak_dict['points_label_1']\n FGBG_gt_mask_seg = convert_semantic_to_FGBG(label_seg[:, 0])\n sample_idx = weak_dict['sample_idx_1']\n\n selected_num = np.floor(self.annotation_ratio * len(sample_idx)).astype(np.int64)\n selected_sample_idx = sample_idx[:selected_num]\n\n annotation_mask = np.zeros(len(sample_idx), dtype=np.float32)\n annotation_mask[selected_sample_idx] = 1 # 0: point without annotation; 1: point with annotation\n FGBG_gt_mask_seg[annotation_mask == 0] = 3 # 1: Background; 2: Foreground; 3: Unlabelled\n\n pc_seg, not_close = remove_close(pc_seg, radius=1.0)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[not_close]\n pc_seg, filter_idx = filter_pc(pc_seg, extents=self.area_extents)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[filter_idx]\n\n curr_seg_num = pc_seg.shape[0]\n if curr_seg_num >= self.num_points_seg:\n pc_sample_idx = np.random.choice(curr_seg_num, self.num_points_seg, replace=False)\n curr_seg_num = self.num_points_seg\n else:\n pc_sample_idx = np.concatenate((np.arange(curr_seg_num),\n np.random.choice(curr_seg_num, self.num_points_seg - curr_seg_num, replace=True)), axis=-1)\n point_FGBG_gt_mask_seg = FGBG_gt_mask_seg[pc_sample_idx]\n pc_seg = pc_seg[pc_sample_idx]\n\n # get foreground points in three frames for chamfer loss\n if self.annotation_ratio ==1:\n # When using full annotations, we directly extract ground truth foreground points for chamfer loss\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n label_0 = weak_dict['points_label_0']\n FGBG_gt_mask_0 = convert_semantic_to_FGBG(label_0[:, 0]) # 1: Background; 2: Foreground\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, FGBG_gt_mask_0)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_1 = weak_dict['points_label_1']\n FGBG_gt_mask_1 = convert_semantic_to_FGBG(label_1[:, 0])\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, FGBG_gt_mask_1)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n label_2 = weak_dict['points_label_2']\n FGBG_gt_mask_2 = convert_semantic_to_FGBG(label_2[:, 0])\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, FGBG_gt_mask_2)\n else:\n # When using partial annotations, we extract foreground points predicted by PreSegNet for Chamfer loss\n pred_FGBG_file_name = os.path.join(self.FBdata_root, scene_name + '.npz')\n pred_FGBG_data = np.load(pred_FGBG_file_name)\n\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n pred_FGBG_0 = pred_FGBG_data['pred_0']\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, pred_FGBG_0, use_GT_label=False)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n pred_FGBG_1 = pred_FGBG_data['pred_1']\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, pred_FGBG_1, use_GT_label=False)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n pred_FGBG_2 = pred_FGBG_data['pred_2']\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, pred_FGBG_2, use_GT_label=False)\n\n else:\n pc_seg = np.zeros(1)\n point_FGBG_gt_mask_seg = np.zeros(1)\n curr_seg_num = np.zeros(1)\n FG_point_0 = np.zeros(1)\n FG_point_num_0 = np.zeros(1)\n FG_point_1 = np.zeros(1)\n FG_point_num_1 = np.zeros(1)\n FG_point_2 = np.zeros(1)\n FG_point_num_2 = np.zeros(1)\n\n return padded_voxel_points, all_disp_field_gt, pixel_cat_map, \\\n non_empty_map, all_valid_pixel_maps, num_future_pcs, \\\n pc_seg, point_FGBG_gt_mask_seg, curr_seg_num, \\\n FG_point_0, FG_point_num_0, FG_point_1, FG_point_num_1, FG_point_2, FG_point_num_2" }, { "identifier": "DatasetSingleSeq_Stage2", "path": "data/weak_waymo_dataloader.py", "snippet": "class DatasetSingleSeq_Stage2(Dataset):\n \"\"\"\n Generate the Waymo training dataset for Stage2\n\n Parameters\n ----------\n dataset_root : Path to input data root directory\n weakdata_root: Path to weak supervision data root directory\n FBdata_root: Path to FG/BG masks predicted by PreSegNet in Stage1\n split : [train/val]\n annotation_ratio: Desired FG/BG annotation ratio. Should be consistent with the ratio in Stage1\n num_points_seg: Desired number of points in the current frame. Will be used to train the FG/BG segmentation head\n num_points_motion: Desired number of FG points in the three frames. Will be used for Chamfer loss\n \"\"\"\n def __init__(self, dataset_root=None, weakdata_root=None, FBdata_root=None, split='train', future_frame_skip=0, voxel_size=(0.25, 0.25, 0.4),\n area_extents=np.array([[-32., 32.], [-32., 32.], [-1., 4.]]), dims=(256, 256, 13), num_category=5,\n annotation_ratio=1.0, num_points_seg = 40000, num_points_motion = 12000):\n\n if dataset_root is None:\n raise ValueError(\"The {} dataset root is None. Should specify its value.\".format(split))\n\n self.dataset_root = dataset_root\n print(\"data root:\", dataset_root)\n self.weakdata_root = weakdata_root\n self.FBdata_root = FBdata_root\n\n seq_dirs = []\n if split == 'train':\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d)\n seq_dirs.append(tmp_0)\n else:\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d)\n seq_dirs.append(tmp_0)\n\n self.seq_files = seq_dirs\n self.num_sample_seqs = len(self.seq_files)\n print(\"The number of {} sequences: {}\".format(split, self.num_sample_seqs))\n\n # For training, the size of dataset should be 14351; for validation/testing: 3634\n if split == 'train' and self.num_sample_seqs != 14351:\n warnings.warn(\">> The size of training dataset is not 14351.\\n\")\n elif split == 'val' and self.num_sample_seqs != 3634:\n warnings.warn(\">> The size of validation dataset is not 3634.\\n\")\n\n\n self.split = split\n self.voxel_size = voxel_size\n self.area_extents = area_extents\n self.future_frame_skip = future_frame_skip\n self.dims = dims\n self.annotation_ratio = annotation_ratio\n self.num_points_seg = num_points_seg\n self.num_points_motion = num_points_motion\n self.num_category = num_category\n\n def __len__(self):\n return self.num_sample_seqs\n\n def sample_foreground_point(self, pc, FGBG_label, use_GT_label=True):\n pc, not_close = remove_close(pc, radius=1.0)\n pc, filter_idx = filter_pc(pc, extents=self.area_extents)\n\n if use_GT_label:\n FGBG_label = FGBG_label[not_close]\n FGBG_label = FGBG_label[filter_idx]\n FG_mask = FGBG_label == 2\n else:\n FG_mask = FGBG_label\n\n FG_point = pc[FG_mask]\n FG_point_num = FG_point.shape[0]\n\n if FG_point_num != 0:\n if FG_point_num >= self.num_points_motion:\n sample_idx = np.random.choice(FG_point_num, self.num_points_motion, replace=False)\n FG_point_num = self.num_points_motion\n else:\n sample_idx = np.concatenate((np.arange(FG_point_num),\n np.random.choice(FG_point_num, self.num_points_motion - FG_point_num, replace=True)), axis=-1)\n FG_point = FG_point[sample_idx]\n else:\n FG_point = np.zeros((self.num_points_motion, 3))\n\n return FG_point, FG_point_num\n\n def __getitem__(self, idx):\n seq_file = self.seq_files[idx]\n gt_data_handle = np.load(seq_file, allow_pickle=True)\n gt_dict = gt_data_handle.item()\n\n dims = gt_dict['3d_dimension']\n num_future_pcs = gt_dict['num_future_pcs']\n num_past_pcs = gt_dict['num_past_pcs']\n pixel_indices = gt_dict['pixel_indices']\n\n sparse_disp_field_gt = gt_dict['disp_field']\n all_disp_field_gt = np.zeros((num_future_pcs, dims[0], dims[1], 2), dtype=np.float32)\n all_disp_field_gt[:, pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_disp_field_gt[:]\n\n sparse_valid_pixel_maps = gt_dict['valid_pixel_map']\n all_valid_pixel_maps = np.zeros((num_future_pcs, dims[0], dims[1]), dtype=np.float32)\n all_valid_pixel_maps[:, pixel_indices[:, 0], pixel_indices[:, 1]] = sparse_valid_pixel_maps[:]\n\n sparse_pixel_cat_maps = gt_dict['pixel_cat_map']\n pixel_cat_map = np.zeros((dims[0], dims[1], self.num_category), dtype=np.float32)\n pixel_cat_map[pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_pixel_cat_maps[:]\n\n non_empty_map = np.zeros((dims[0], dims[1]), dtype=np.float32)\n non_empty_map[pixel_indices[:, 0], pixel_indices[:, 1]] = 1.0\n\n padded_voxel_points = list()\n for i in range(num_past_pcs):\n indices = gt_dict['voxel_indices_' + str(i)]\n curr_voxels = np.zeros(dims, dtype=np.bool)\n curr_voxels[indices[:, 0], indices[:, 1], indices[:, 2]] = 1\n padded_voxel_points.append(curr_voxels)\n padded_voxel_points = np.stack(padded_voxel_points, 0).astype(np.float32)\n\n # get weak supervision\n if self.split == 'train':\n scene_name = seq_file.split('/')[-1]\n weak_file_name = os.path.join(self.weakdata_root, scene_name)\n weak_data_handle = np.load(weak_file_name, allow_pickle=True)\n weak_dict = weak_data_handle.item()\n\n # get FG/BG annotations for the current frame,\n # this procedure is the same as the data preparation in Stage1\n # 0: past frame; 1: current frame; 2: future frame\n\n pc_seg = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_seg = weak_dict['points_label_1']\n FGBG_gt_mask_seg = convert_semantic_to_FGBG_waymo(label_seg[:, 0])\n sample_idx = weak_dict['sample_idx_1']\n\n selected_num = np.floor(self.annotation_ratio * len(sample_idx)).astype(np.int64)\n selected_sample_idx = sample_idx[:selected_num]\n\n annotation_mask = np.zeros(len(sample_idx), dtype=np.float32)\n annotation_mask[selected_sample_idx] = 1 # 0: point without annotation; 1: point with annotation\n FGBG_gt_mask_seg[annotation_mask == 0] = 3 # 1: Background; 2: Foreground; 3: Unlabelled\n\n pc_seg, not_close = remove_close(pc_seg, radius=1.0)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[not_close]\n pc_seg, filter_idx = filter_pc(pc_seg, extents=self.area_extents)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[filter_idx]\n\n curr_seg_num = pc_seg.shape[0]\n if curr_seg_num >= self.num_points_seg:\n pc_sample_idx = np.random.choice(curr_seg_num, self.num_points_seg, replace=False)\n curr_seg_num = self.num_points_seg\n else:\n pc_sample_idx = np.concatenate((np.arange(curr_seg_num),\n np.random.choice(curr_seg_num, self.num_points_seg - curr_seg_num, replace=True)), axis=-1)\n point_FGBG_gt_mask_seg = FGBG_gt_mask_seg[pc_sample_idx]\n pc_seg = pc_seg[pc_sample_idx]\n\n # get foreground points in three frames for chamfer loss\n if self.annotation_ratio ==1:\n # When using full annotations, we directly extract ground truth foreground points for chamfer loss\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n label_0 = weak_dict['points_label_0']\n FGBG_gt_mask_0 = convert_semantic_to_FGBG_waymo(label_0[:, 0]) # 1: Background; 2: Foreground\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, FGBG_gt_mask_0)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_1 = weak_dict['points_label_1']\n FGBG_gt_mask_1 = convert_semantic_to_FGBG_waymo(label_1[:, 0])\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, FGBG_gt_mask_1)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n label_2 = weak_dict['points_label_2']\n FGBG_gt_mask_2 = convert_semantic_to_FGBG_waymo(label_2[:, 0])\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, FGBG_gt_mask_2)\n else:\n # When using partial annotations, we extract foreground points predicted by PreSegNet for Chamfer loss\n pred_FGBG_file_name = os.path.join(self.FBdata_root, scene_name.split('.')[0] + '.npz')\n pred_FGBG_data = np.load(pred_FGBG_file_name)\n\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n pred_FGBG_0 = pred_FGBG_data['pred_0']\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, pred_FGBG_0, use_GT_label=False)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n pred_FGBG_1 = pred_FGBG_data['pred_1']\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, pred_FGBG_1, use_GT_label=False)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n pred_FGBG_2 = pred_FGBG_data['pred_2']\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, pred_FGBG_2, use_GT_label=False)\n\n else:\n pc_seg = np.zeros(1)\n point_FGBG_gt_mask_seg = np.zeros(1)\n curr_seg_num = np.zeros(1)\n FG_point_0 = np.zeros(1)\n FG_point_num_0 = np.zeros(1)\n FG_point_1 = np.zeros(1)\n FG_point_num_1 = np.zeros(1)\n FG_point_2 = np.zeros(1)\n FG_point_num_2 = np.zeros(1)\n\n return padded_voxel_points, all_disp_field_gt, pixel_cat_map, \\\n non_empty_map, all_valid_pixel_maps, num_future_pcs, \\\n pc_seg, point_FGBG_gt_mask_seg, curr_seg_num, \\\n FG_point_0, FG_point_num_0, FG_point_1, FG_point_num_1, FG_point_2, FG_point_num_2" }, { "identifier": "FGBG_seg_loss", "path": "loss_utils.py", "snippet": "def FGBG_seg_loss(FGBG_pred, point_FGBG_gt_mask, source_pc, source_num, voxel_size, area_extents):\n \"\"\"\n Foreground Background segmentation loss\n ----------\n\n Inputs:\n FGBG_pred: [B, 2, dim_0, dim_1], predicted Foreground/Background BEV map\n point_FGBG_gt_mask: [B, N], per-point Foreground/Background ground truth, (1: BG, 2: FG, 3: Unannotated)\n source_pc: [B, N, 3], point cloud in current frame\n source_num: [B], unrepeated point number in each sample\n voxel_size, area_extents: voxel size and range of area,\n \"\"\"\n\n batch_size = FGBG_pred.shape[0]\n device = FGBG_pred.device\n\n loss_FGBG_seg = torch.zeros((1), device=device, dtype=FGBG_pred.dtype)\n\n for batch_index in range(batch_size):\n\n # get current batch\n curr_source_num = source_num[batch_index]\n curr_source_pc_np = source_pc[batch_index, :curr_source_num, :].numpy()\n curr_point_FGBG_gt_mask = point_FGBG_gt_mask[batch_index, :curr_source_num].float().to(device) # 1: Background; 2: Foreground; 3: Unannotated\n curr_FGBG_pred = FGBG_pred[batch_index]\n\n # generate FGBG ground truth and weight for each point\n curr_point_BG_gt_mask = (curr_point_FGBG_gt_mask == 1).float().unsqueeze(0)\n curr_point_FG_gt_mask = (curr_point_FGBG_gt_mask == 2).float().unsqueeze(0)\n\n curr_point_FGBG_gt_map = torch.cat([curr_point_BG_gt_mask, curr_point_FG_gt_mask], 0).permute(1, 0)\n\n # weight assigned to different categories. 0.005 for BG; 1.0 for FG; 0.0 for unlabelled\n curr_FGBG_weight_map = (curr_point_BG_gt_mask * 0.005 + curr_point_FG_gt_mask * 1.0).squeeze(0)\n curr_annotated_point_num = torch.sum((curr_point_FGBG_gt_mask != 3).float())\n\n # get FGBG prediction for each point\n curr_voxel_indices = gen_voxel_indices_for_pc(curr_source_pc_np, voxel_size, area_extents)\n curr_point_FGBG_pred = curr_FGBG_pred[:, curr_voxel_indices[:, 0], curr_voxel_indices[:, 1]].permute(1, 0)\n\n # compute current loss\n curr_log_softmax_FGBG_pred = F.log_softmax(curr_point_FGBG_pred, dim=1)\n curr_loss_FGBG_pred = torch.sum(- curr_point_FGBG_gt_map * curr_log_softmax_FGBG_pred, dim=1) * curr_FGBG_weight_map\n curr_loss_FGBG_predd = torch.sum(curr_loss_FGBG_pred) / (curr_annotated_point_num + 1e-6)\n\n # accumulate loss\n loss_FGBG_seg = loss_FGBG_seg + curr_loss_FGBG_predd\n\n loss_FGBG_seg = loss_FGBG_seg / batch_size\n return loss_FGBG_seg" }, { "identifier": "CCD_loss", "path": "loss_utils.py", "snippet": "def CCD_loss(disp_pred, pc_0, pc_0_num, pc_1, pc_1_num, pc_2, pc_2_num, non_empty_map, voxel_size, area_extents,\n epoch, epoch_threshold=10, theta2=1):\n \"\"\"\n Consistency-aware Chamfer Distance loss\n ----------\n\n Inputs:\n disp_pred: [B, 2, dim_0, dim_1], predicted 2D displacement BEV map\n\n pc_0: [B, M, 3], predicted foreground points in the past frame (-0.5s)\n pc_0_num: [B], unrepeated foreground point number in each past frame\n\n pc_1: [B, M, 3], predicted foreground points in the current frame (0s)\n pc_1_num: [B], unrepeated foreground point number in each current frame\n\n pc_2: [B, M, 3], predicted foreground points in the future frame (+0.5s)\n pc_2_num: [B], unrepeated foreground point number in each future frame\n\n non_empty_map: [B, dim_0, dim_1] nonempty mask\n voxel_size, area_extents: voxel size and range of area,\n\n epoch: the number of current training epoch\n epoch_threshold: After epoch_threshold epochs, we start to reweight multi-frame Chamfer loss\n theta2: hyper-parameter in Gaussian kernel, used in Eq.(6)\n \"\"\"\n\n batch_size = disp_pred.shape[0]\n device = disp_pred.device\n loss_disp = torch.zeros((1), device=device, dtype=disp_pred.dtype)\n\n valid_sample_num = 0\n for batch_index in range(batch_size):\n\n # 0: past frame; 1: current frame; 2: future frame\n curr_pc_0_num = pc_0_num[batch_index]\n curr_pc_1_num = pc_1_num[batch_index]\n curr_pc_2_num = pc_2_num[batch_index]\n if (curr_pc_0_num > 0) and (curr_pc_1_num > 0) and (curr_pc_2_num > 0):\n valid_sample_num = valid_sample_num + 1\n curr_valid_map = non_empty_map[batch_index]\n\n # get source and target point clouds, predicted 2D BEV flow\n curr_pc_0_np = pc_0[batch_index, :curr_pc_0_num, :].numpy() # target pc, past frame\n curr_pc_1_np = pc_1[batch_index, :curr_pc_1_num, :].numpy() # current pc, source frame\n curr_pc_2_np = pc_2[batch_index, :curr_pc_2_num, :].numpy() # target pc, future frame\n curr_disp_pred = disp_pred[batch_index, :, :, :]\n\n # get predicted 3D flow for each point\n curr_voxel_indices = gen_voxel_indices_for_pc(curr_pc_1_np, voxel_size, area_extents)\n curr_point_disp_pred = curr_disp_pred[:, curr_voxel_indices[:, 0], curr_voxel_indices[:, 1]].permute(1, 0)\n\n # get FG and BG map for the current frame, the map is estimated by the PreSegNet in Stage1\n curr_fg_map = torch.zeros_like(curr_valid_map)\n curr_fg_map[curr_voxel_indices[:, 0], curr_voxel_indices[:, 1]] = 1\n curr_fg_map = curr_fg_map * curr_valid_map\n fg_voxel_num = torch.sum(curr_fg_map)\n\n curr_bg_map = (1 - curr_fg_map) * curr_valid_map\n bg_voxel_num = torch.sum(curr_bg_map)\n\n curr_pc_0 = torch.from_numpy(curr_pc_0_np).to(device).float()\n curr_pc_1 = torch.from_numpy(curr_pc_1_np).to(device).float()\n curr_pc_2 = torch.from_numpy(curr_pc_2_np).to(device).float()\n curr_point_3d_disp_pred = torch.cat([curr_point_disp_pred, torch.zeros_like(curr_point_disp_pred[:, 0:1])], -1)\n\n # compute confidence weights for the three point clouds\n if epoch > epoch_threshold:\n # After epoch_threshold epochs, we start to reweight multi-frame Chamfer loss\n weight_P, weight_C, weight_F = gen_confidence_weight(curr_pc_0, curr_pc_1, curr_pc_2, curr_point_3d_disp_pred, theta2=theta2)\n else:\n weight_P, weight_C, weight_F = None, None, None\n\n # Consistency-aware Chamfer Distance loss function for the foreground points\n # backward term (backward warped current frame, past frame)\n warped_source_pc_backward = curr_pc_1 - curr_point_3d_disp_pred\n fg_loss_backward = weighted_chamfer_loss(warped_source_pc_backward, curr_pc_0, weight_C, weight_P)\n\n # forward term (forward warped current frame, future frame)\n warped_source_pc_forward = curr_pc_1 + curr_point_3d_disp_pred\n fg_loss_forward = weighted_chamfer_loss(warped_source_pc_forward, curr_pc_2, weight_C, weight_F)\n\n fg_loss = (fg_loss_backward + fg_loss_forward) / 2.0\n\n # generate loss for the background points. Eq.(13)\n bg_gt = torch.zeros_like(curr_disp_pred) # background points are regarded as static\n bg_loss = torch.sum(torch.abs(curr_disp_pred * curr_bg_map.unsqueeze(0) - bg_gt * curr_bg_map.unsqueeze(0)), 0)\n bg_loss = torch.sum(bg_loss) / (torch.sum(curr_bg_map) + 1e-6)\n\n # combine the losses from the foreground and the background. Eq.(12)\n curr_loss = (fg_loss * fg_voxel_num + 0.005 * bg_loss * bg_voxel_num) \\\n / (fg_voxel_num + bg_voxel_num + 1e-6)\n\n loss_disp = loss_disp + curr_loss\n\n loss_disp = loss_disp / valid_sample_num\n return loss_disp" }, { "identifier": "evaluate_FGBG_prediction", "path": "evaluation_utils.py", "snippet": "def evaluate_FGBG_prediction(FGBG_pred, non_empty_map_numpy, pixel_cat_map_gt_numpy, overall_cls_gt, overall_cls_pred,\n datatype='nuScenes'):\n\n # Convert the category map\n max_prob = np.amax(pixel_cat_map_gt_numpy, axis=-1)\n filter_mask = max_prob == 1.0 # Note: some of the cell probabilities are soft probabilities\n pixel_cat_map_numpy = np.argmax(pixel_cat_map_gt_numpy,\n axis=-1) + 1 # category starts from 1 (background), etc\n\n # Convert category label to FG/BG label\n pixel_FGBG_map_numpy = pixel_cat_map_numpy.copy()\n if datatype == 'nuScenes':\n # 1: background or empty; 2: Vehicle; 3: Ped; 4: Bike; 5: Others\n pixel_FGBG_map_numpy[pixel_FGBG_map_numpy > 1] = 2\n elif datatype == 'Waymo':\n # 1: background or empty; 2: Vehicle; 3: Ped; 4: Cyclist; 5: Sign, regarded as background\n tmp = pixel_FGBG_map_numpy.copy()\n pixel_FGBG_map_numpy[tmp > 1] = 2\n pixel_FGBG_map_numpy[(tmp == 5)] = 1\n\n pixel_FGBG_map_numpy = (pixel_FGBG_map_numpy * non_empty_map_numpy * filter_mask).astype(\n np.int32) # 0: Empty; 1: Background; 2: Foreground\n\n FGBG_pred_numpy = FGBG_pred.cpu().numpy()\n FGBG_pred_numpy = np.transpose(FGBG_pred_numpy, (0, 2, 3, 1))\n FGBG_pred_numpy = np.argmax(FGBG_pred_numpy, axis=-1) + 1\n FGBG_pred_numpy = (FGBG_pred_numpy * non_empty_map_numpy * filter_mask).astype(np.int32)\n\n border = 8\n roi_mask = np.zeros_like(non_empty_map_numpy)\n roi_mask[:, border:-border, border:-border] = 1.0\n\n # For computing confusion matrix, in order to compute FG/BG classification accuracy for each category\n count_mask = non_empty_map_numpy * filter_mask * roi_mask\n idx_fg = np.where(count_mask > 0)\n\n overall_cls_gt.append(pixel_FGBG_map_numpy[idx_fg])\n overall_cls_pred.append(FGBG_pred_numpy[idx_fg])\n\n return overall_cls_gt, overall_cls_pred" }, { "identifier": "evaluate_motion_prediction", "path": "evaluation_utils.py", "snippet": "def evaluate_motion_prediction(disp_pred, FGBG_pred, all_disp_field_gt, all_valid_pixel_maps, future_steps,\n distance_intervals, selected_future_sweeps, cell_groups,\n use_FGBG_pred_masking=True, datatype='nuScenes'):\n\n pred_shape = disp_pred.size()\n disp_pred = disp_pred.view(all_disp_field_gt.size(0), -1, pred_shape[-3], pred_shape[-2], pred_shape[-1])\n disp_pred = disp_pred.contiguous()\n disp_pred = disp_pred.cpu().numpy()\n\n if use_FGBG_pred_masking:\n FGBG_pred_numpy = FGBG_pred.cpu().numpy()\n FGBG_pred_numpy = np.argmax(FGBG_pred_numpy, axis=1)\n mask = FGBG_pred_numpy == 0 # predicted Background mask\n\n # For those with very small movements, we consider them as static\n last_pred = disp_pred[:, -1, :, :, :]\n last_pred_norm = np.linalg.norm(last_pred, ord=2, axis=1) # out: (batch, h, w)\n thd_mask = last_pred_norm <= 0.2\n\n cat_weight_map = np.ones_like(FGBG_pred_numpy, dtype=np.float32)\n cat_weight_map[mask] = 0.0\n cat_weight_map[thd_mask] = 0.0\n cat_weight_map = cat_weight_map[:, np.newaxis, np.newaxis, ...] # (batch, 1, 1, h, w)\n\n disp_pred = disp_pred * cat_weight_map # small motion, static, background\n\n\n # Pre-processing\n all_disp_field_gt = all_disp_field_gt.numpy() # (bs, seq, h, w, channel)\n future_steps = future_steps.numpy()[0]\n\n valid_pixel_maps = all_valid_pixel_maps[:, -future_steps:, ...].contiguous()\n valid_pixel_maps = valid_pixel_maps.numpy()\n\n all_disp_field_gt = all_disp_field_gt[:, -future_steps:, ]\n all_disp_field_gt = np.transpose(all_disp_field_gt, (0, 1, 4, 2, 3))\n all_disp_field_gt_norm = np.linalg.norm(all_disp_field_gt, ord=2, axis=2)\n\n upper_thresh = 0.2\n if datatype == 'nuScenes':\n upper_bound = 1 / 20 * upper_thresh\n elif datatype == 'Waymo':\n upper_bound = 1 / 10 * upper_thresh\n\n static_cell_mask = all_disp_field_gt_norm <= upper_bound\n static_cell_mask = np.all(static_cell_mask, axis=1) # along the temporal axis\n moving_cell_mask = np.logical_not(static_cell_mask)\n\n for j, d in enumerate(distance_intervals):\n for slot, s in enumerate((selected_future_sweeps - 1)): # selected_future_sweeps: [4, 8, ...]\n curr_valid_pixel_map = valid_pixel_maps[:, s]\n\n if j == 0: # corresponds to static cells\n curr_mask = np.logical_and(curr_valid_pixel_map, static_cell_mask)\n else:\n # We use the displacement between keyframe and the last sample frame as metrics\n last_gt_norm = all_disp_field_gt_norm[:, -1]\n mask = np.logical_and(d[0] <= last_gt_norm, last_gt_norm < d[1])\n\n curr_mask = np.logical_and(curr_valid_pixel_map, mask)\n curr_mask = np.logical_and(curr_mask, moving_cell_mask)\n\n # we evaluate the performance for cells within the range [-30m, 30m] along both x, y dimensions.\n border = 8\n roi_mask = np.zeros_like(curr_mask, dtype=np.bool_)\n roi_mask[:, border:-border, border:-border] = True\n curr_mask = np.logical_and(curr_mask, roi_mask)\n\n cell_idx = np.where(curr_mask == True)\n\n gt = all_disp_field_gt[:, s]\n pred = disp_pred[:, -1, :, :, :]\n norm_error = np.linalg.norm(gt - pred, ord=2, axis=1)\n\n cell_groups[j][slot].append(norm_error[cell_idx])\n\n return cell_groups" } ]
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np import time import sys import argparse import os from shutil import copytree, copy from weak_model import WeakMotionNet from data.weak_nuscenes_dataloader import DatasetSingleSeq_Stage2 from data.weak_waymo_dataloader import DatasetSingleSeq_Stage2 as DatasetSingleSeq_Stage2_waymo from sklearn.metrics import confusion_matrix from tqdm import tqdm from loss_utils import FGBG_seg_loss, CCD_loss from evaluation_utils import evaluate_FGBG_prediction, evaluate_motion_prediction
11,577
def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def check_folder(folder_path): if not os.path.exists(folder_path): os.mkdir(folder_path) return folder_path out_seq_len = 1 # The number of future frames we are going to predict height_feat_size = 13 # The size along the height dimension parser = argparse.ArgumentParser() parser.add_argument('-md', '--motiondata', default='/path_to/nuScenes/input-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-wd', '--weakdata', default='/path_to/nuScenes/weak-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-FBd', '--FBdata', default='/path_to/nuScenes/FGBG-data/nuscenes_seg_0-01/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes']) parser.add_argument('-t', '--evaldata', default='/path_to/nuScenes/input-data/val/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training') parser.add_argument('--batch', default=8, type=int, help='Batch size') parser.add_argument('--nepoch', default=60, type=int, help='Number of epochs') parser.add_argument('--nworker', default=4, type=int, help='Number of workers') parser.add_argument('--log', default=True, action='store_true', help='Whether to log') parser.add_argument('--logpath', default='', help='The path to the output log file') parser.add_argument('--gpu', default='1') parser.add_argument('--annotation_ratio', default=0.01, type=float) args = parser.parse_args() print(args) num_epochs = args.nepoch need_log = args.log BATCH_SIZE = args.batch num_workers = args.nworker os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu datatype = args.datatype annotation_ratio = args.annotation_ratio def main(): start_epoch = 1 # Whether to log the training information if need_log: logger_root = args.logpath if args.logpath != '' else 'logs' time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S") if args.resume == '': model_save_path = check_folder(logger_root) model_save_path = check_folder(os.path.join(model_save_path, 'Stage2')) model_save_path = check_folder(os.path.join(model_save_path, time_stamp)) log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "w") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[0:]))) saver.write(args.__repr__() + "\n\n") saver.flush() else: model_save_path = args.resume log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "a") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[1:]))) saver.write(args.__repr__() + "\n\n") saver.flush() # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) tmp = args.motiondata trainset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes': trainset = DatasetSingleSeq_Stage2(dataset_root=args.motiondata, weakdata_root=args.weakdata, FBdata_root=args.FBdata, split=trainset_split, annotation_ratio=annotation_ratio, voxel_size=voxel_size, area_extents=area_extents) elif datatype == 'Waymo': trainset = DatasetSingleSeq_Stage2_waymo(dataset_root=args.motiondata, weakdata_root=args.weakdata, FBdata_root=args.FBdata, split=trainset_split, annotation_ratio=annotation_ratio, voxel_size=voxel_size, area_extents=area_extents) trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=num_workers) print("Training dataset size:", len(trainset)) tmp = args.evaldata evalset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes': evalset = DatasetSingleSeq_Stage2(dataset_root=args.evaldata, split=evalset_split, voxel_size=voxel_size, area_extents=area_extents) elif datatype == 'Waymo': evalset = DatasetSingleSeq_Stage2_waymo(dataset_root=args.evaldata, split=evalset_split, voxel_size=voxel_size, area_extents=area_extents) evalloader = torch.utils.data.DataLoader(evalset, batch_size=1, shuffle=False, num_workers=num_workers) print("Training dataset size:", len(trainset))
""" Train WeakMotionNet in Stage2 Some of the code are modified based on 'train_single_seq.py' in MotionNet. Reference: MotionNet (https://www.merl.com/research/?research=license-request&sw=MotionNet) """ class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def check_folder(folder_path): if not os.path.exists(folder_path): os.mkdir(folder_path) return folder_path out_seq_len = 1 # The number of future frames we are going to predict height_feat_size = 13 # The size along the height dimension parser = argparse.ArgumentParser() parser.add_argument('-md', '--motiondata', default='/path_to/nuScenes/input-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-wd', '--weakdata', default='/path_to/nuScenes/weak-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-FBd', '--FBdata', default='/path_to/nuScenes/FGBG-data/nuscenes_seg_0-01/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes']) parser.add_argument('-t', '--evaldata', default='/path_to/nuScenes/input-data/val/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training') parser.add_argument('--batch', default=8, type=int, help='Batch size') parser.add_argument('--nepoch', default=60, type=int, help='Number of epochs') parser.add_argument('--nworker', default=4, type=int, help='Number of workers') parser.add_argument('--log', default=True, action='store_true', help='Whether to log') parser.add_argument('--logpath', default='', help='The path to the output log file') parser.add_argument('--gpu', default='1') parser.add_argument('--annotation_ratio', default=0.01, type=float) args = parser.parse_args() print(args) num_epochs = args.nepoch need_log = args.log BATCH_SIZE = args.batch num_workers = args.nworker os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu datatype = args.datatype annotation_ratio = args.annotation_ratio def main(): start_epoch = 1 # Whether to log the training information if need_log: logger_root = args.logpath if args.logpath != '' else 'logs' time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S") if args.resume == '': model_save_path = check_folder(logger_root) model_save_path = check_folder(os.path.join(model_save_path, 'Stage2')) model_save_path = check_folder(os.path.join(model_save_path, time_stamp)) log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "w") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[0:]))) saver.write(args.__repr__() + "\n\n") saver.flush() else: model_save_path = args.resume log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "a") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[1:]))) saver.write(args.__repr__() + "\n\n") saver.flush() # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) tmp = args.motiondata trainset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes': trainset = DatasetSingleSeq_Stage2(dataset_root=args.motiondata, weakdata_root=args.weakdata, FBdata_root=args.FBdata, split=trainset_split, annotation_ratio=annotation_ratio, voxel_size=voxel_size, area_extents=area_extents) elif datatype == 'Waymo': trainset = DatasetSingleSeq_Stage2_waymo(dataset_root=args.motiondata, weakdata_root=args.weakdata, FBdata_root=args.FBdata, split=trainset_split, annotation_ratio=annotation_ratio, voxel_size=voxel_size, area_extents=area_extents) trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=num_workers) print("Training dataset size:", len(trainset)) tmp = args.evaldata evalset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes': evalset = DatasetSingleSeq_Stage2(dataset_root=args.evaldata, split=evalset_split, voxel_size=voxel_size, area_extents=area_extents) elif datatype == 'Waymo': evalset = DatasetSingleSeq_Stage2_waymo(dataset_root=args.evaldata, split=evalset_split, voxel_size=voxel_size, area_extents=area_extents) evalloader = torch.utils.data.DataLoader(evalset, batch_size=1, shuffle=False, num_workers=num_workers) print("Training dataset size:", len(trainset))
model = WeakMotionNet(out_seq_len=out_seq_len, FGBG_category_num=2, height_feat_size=height_feat_size)
0
2023-11-12 07:03:29+00:00
16k
c3exchange/c3-smartcontracts-v1
contracts_unified/core/main.py
[ { "identifier": "update", "path": "contracts_unified/core/bare_calls/update.py", "snippet": "@Subroutine(TealType.none)\ndef update() -> Expr:\n \"\"\"Implements the contract method called on update\"\"\"\n\n return sender_is_creator()" }, { "identifier": "delete", "path": "contracts_unified/core/bare_calls/update.py", "snippet": "@Subroutine(TealType.none)\ndef delete() -> Expr:\n \"\"\"Implements the contract method called on delete\"\"\"\n\n return sender_is_creator()" }, { "identifier": "account_move", "path": "contracts_unified/core/methods/account_move.py", "snippet": "@ABIReturnSubroutine\ndef account_move(\n source_account: AccountAddress,\n user_op: OperationMetaData,\n delegation_chain: DelegationChain,\n _server_data: abi.DynamicBytes,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Moves funds between two accounts\n\n Arguments:\n\n source_account (AccountAddress): Source account address.\n user_op (OperationMetaData): Operation metadata containing destination account, cash and pool.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n _server_data (abi.DynamicBytes): Server data. Unused.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n # Constants\n abi_false = abi.Bool()\n\n # Extracted operation data\n data = AccountMoveData()\n cash = abi.make(SignedInstrumentBasket)\n pool = abi.make(SignedInstrumentBasket)\n\n # Sender and receiver accounts\n destination_account = AccountAddress()\n\n # Health check\n health = ExcessMargin()\n\n i = abi.Uint64()\n length = abi.Uint64()\n abi_zero_int = abi.Uint64()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Set constants\n abi_false.set(Int(0)),\n abi_zero_int.set(Int(0)),\n\n # Validate sender\n cast(Expr, sender_is_sig_validator()),\n\n # No delegation is allowed for account move\n Assert(delegation_chain.length() == Int(0)),\n\n # Get the source and destination accounts\n user_op.operation.use(lambda op_data:\n Seq(\n data.decode(op_data.get()),\n data.operation.use(lambda op: Assert(op.get() == OperationId.AccountMove)),\n data.destination_account.store_into(destination_account),\n data.cash.store_into(cash),\n data.pool.store_into(pool),\n )\n ),\n\n # Validate the source account is not the destination account\n Assert(source_account.get() != destination_account.get()),\n\n # Check the closer-to-zero condition for the pool basket\n cast(Expr, closer_to_zero(source_account, pool)),\n\n # Update both users to the current index\n length.set(pool.length()),\n For(i.set(Int(0)), i.get() < length.get(), i.set(i.get() + Int(1))).Do(\n pool[i.get()].use(lambda instrument_amount:\n instrument_amount.instrument.use(lambda instrument:\n Seq(\n cast(Expr, perform_pool_move(source_account, instrument, abi_zero_int)),\n cast(Expr, perform_pool_move(destination_account, instrument, abi_zero_int))\n )\n )\n )\n ),\n\n # Perform update\n cast(Expr, signed_account_move_baskets(source_account, destination_account, cash, pool, abi_false, abi_false)),\n\n # Check health\n # NOTE: No need to check old vs new because all account moves make health worse\n health.set(health_check(source_account, abi_false)),\n Assert(Not(signed_ltz(health.get()))),\n )" }, { "identifier": "clean_orders", "path": "contracts_unified/core/methods/clean_orders.py", "snippet": "@ABIReturnSubroutine\ndef clean_orders(\n orders: abi.DynamicArray[OrderData],\n) -> Expr:\n \"\"\"\n Clean any expired orders from the order book\n\n Arguments:\n\n orders: The orders to analyze.\n \"\"\"\n\n i = abi.Uint64()\n length = abi.Uint64()\n order_data = OrderData()\n order_id = abi.make(OrderId)\n\n return Seq(\n # Loop through all orders\n length.set(orders.length()),\n For(i.set(Int(0)), i.get() < length.get(), i.set(i.get() + Int(1))).Do(\n # Check if order is expired\n order_data.set(orders[i.get()]),\n order_data.expiration_time.use(lambda expires:\n If(Global.latest_timestamp() > expires.get())\n .Then(\n # Delete order\n order_id.set(cast(abi.ReturnedValue, OrderStateHandler.get_order_id(order_data))),\n cast(Expr, OrderStateHandler.delete_order_onchain(order_id)),\n )\n ),\n ),\n )" }, { "identifier": "create", "path": "contracts_unified/core/methods/create.py", "snippet": "@ABIReturnSubroutine\ndef create(\n pricecaster_id: EncodedAppId,\n wormhole_token_bridge_id: EncodedAppId,\n liquidation_factors: EncodedLiquidationFactors,\n withdraw_buffer_address: abi.Address,\n signature_validator_address: abi.Address,\n operator_address: abi.Address,\n quant_address: abi.Address,\n fee_target_address: abi.Address,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the contract method called at creation time\"\"\"\n\n return Seq(\n # Generate budget for the call\n setup(opup_budget.get()),\n\n # Initialize global state\n GlobalStateHandler.set_init_timestamp(),\n GlobalStateHandler.set_instrument_count(Int(0)),\n GlobalStateHandler.set_pricecaster_id(pricecaster_id.get()),\n GlobalStateHandler.set_wormhole_bridge_id(wormhole_token_bridge_id.get()),\n GlobalStateHandler.set_liquidation_factors(liquidation_factors.get()),\n GlobalStateHandler.set_withdraw_buffer(withdraw_buffer_address.get()),\n GlobalStateHandler.set_signature_validator(signature_validator_address.get()),\n GlobalStateHandler.set_operator_address(operator_address.get()),\n GlobalStateHandler.set_quant_address(quant_address.get()),\n GlobalStateHandler.set_fee_target(fee_target_address.get()),\n )" }, { "identifier": "deposit", "path": "contracts_unified/core/methods/deposit.py", "snippet": "@ABIReturnSubroutine\ndef deposit(\n account: AccountAddress,\n deposit_txn: abi.Transaction,\n payload: DepositWord,\n instrument_id: InstrumentId,\n instant_pool_move: Amount,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the standard Deposit contract method.\n\n Arguments:\n\n account (AccountAddress): Target account address to deposit to.\n deposit_txn (Transaction): The ABI \"Transaction-Type\" argument referencing the previous transaction to this call in the \"Standard Deposit\" group. Must be of type \"payment\" of \"asset transfer\".\n payload (DepositWord): Payload, must equal to \"Deposit\" string-literal.\n instrument_id (InstrumentId): Instrument to transfer.\n instant_pool_move (Amount): Optional amount to move to instant pool.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n\"\"\"\n\n deposit_asset_id = abi.Uint64()\n deposit_amount = abi.Uint64()\n element = InstrumentListElement()\n\n return Seq(\n # Generate budget for deposit\n setup(opup_budget.get()),\n\n # Validate deposit transaction\n Assert(\n And(\n # We don't really need to check rekey_to field,\n # but it's still good for us if we don't have to support unintended use cases.\n deposit_txn.get().rekey_to() == Global.zero_address(),\n deposit_txn.get().asset_close_to() == Global.zero_address(),\n )\n ),\n\n # Get deposit info from transaction\n Cond(\n [deposit_txn.get().type_enum() == TxnType.AssetTransfer, Seq(\n Assert(deposit_txn.get().asset_receiver() == Global.current_application_address()),\n deposit_asset_id.set(deposit_txn.get().xfer_asset()),\n deposit_amount.set(deposit_txn.get().asset_amount()),\n )],\n [deposit_txn.get().type_enum() == TxnType.Payment, Seq(\n Assert(deposit_txn.get().receiver() == Global.current_application_address()),\n deposit_asset_id.set(Int(0)),\n deposit_amount.set(deposit_txn.get().amount()),\n )],\n ),\n\n # Validate deposit asset is given instrument ID\n element.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n element.asset_id.use(lambda asset_id: Assert(deposit_asset_id.get() == asset_id.get())),\n\n # Perform deposit\n cast(Expr, inner_deposit_asset(account, payload, instrument_id, deposit_amount, instant_pool_move)),\n )" }, { "identifier": "fund_mbr", "path": "contracts_unified/core/methods/fund_mbr.py", "snippet": "@ABIReturnSubroutine\ndef fund_mbr(\n payment_txn: abi.PaymentTransaction,\n) -> Expr:\n \"\"\"Register payment in algos for the MBR fund of the contract\n\n Arguments:\n\n payment_txn: The payment transaction that will fund this contract\"\"\"\n\n return Seq(\n Assert(payment_txn.get().receiver() == Global.current_application_address()),\n GlobalStateHandler.add_mbr_fund(payment_txn.get().amount())\n )" }, { "identifier": "liquidate", "path": "contracts_unified/core/methods/liquidate.py", "snippet": "@ABIReturnSubroutine\ndef liquidate(\n liquidator_account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n _server_data: abi.DynamicBytes,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Performs liquidation of a user's position\"\"\"\n\n # Constants\n abi_false = abi.Bool()\n abi_true = abi.Bool()\n abi_zero = Ratio()\n\n # Liquidation data\n data = LiquidationData()\n\n liquidatee_account = AccountAddress()\n liquidatee_maint_health = ExcessMargin()\n\n cash = abi.make(SignedInstrumentBasket)\n pool = abi.make(SignedInstrumentBasket)\n\n liquidator_health = ExcessMargin()\n\n factors = LiquidationFactors()\n cash_factor = Ratio()\n pool_factor = Ratio()\n\n cash_take_value = Price()\n pool_take_value = Price()\n pool_give_value = Price()\n\n alpha_numerator = ExcessMargin()\n alpha_denominator = ExcessMargin()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Set constants\n abi_false.set(Int(0)),\n abi_true.set(Int(1)),\n abi_zero.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # Extract liquidation data\n user_op.operation.use(lambda op_data:\n Seq(\n data.decode(op_data.get()),\n data.operation.use(lambda op: Assert(op.get() == OperationId.Liquidate)),\n data.liquidatee.store_into(liquidatee_account),\n data.cash.store_into(cash),\n data.pool.store_into(pool),\n )\n ),\n\n # Validate liquidatee is not liquidator\n Assert(liquidatee_account.get() != liquidator_account.get()),\n\n # Validate liquidatee is liquidatable\n liquidatee_maint_health.set(health_check(liquidatee_account, abi_true)),\n Assert(signed_ltz(liquidatee_maint_health.get())),\n\n # Perform netting on liquidatee account\n cast(Expr, perform_netting(liquidatee_account, liquidator_account)),\n\n # Get global constants\n factors.decode(GlobalStateHandler.get_liquidation_factors()),\n cash_factor.set(factors.cash_liquidation_factor),\n pool_factor.set(factors.pool_liquidation_factor),\n\n # Check the closer-to-zero condition for the pool basket\n cast(Expr, closer_to_zero(liquidatee_account, pool)),\n\n # Calculate basket values\n # NOTE: The cash_take_value and pool_give_value use the cash_factor, where as the pool_take_value uses the pool_factor\n # See the formulas from the design doc for more info.\n cash_take_value.set(calculate_basket_value(cash, abi_false, cash_factor, abi_true, abi_true, abi_false)),\n pool_take_value.set(calculate_basket_value(pool, abi_false, pool_factor, abi_true, abi_true, abi_false)),\n pool_give_value.set(calculate_basket_value(pool, abi_true, cash_factor, abi_true, abi_false, abi_false)),\n\n # Check inequality is satisfied\n Assert(cash_take_value.get() + pool_take_value.get() <= pool_give_value.get()),\n\n # Ensure fairness by calculating alpha and scaling the baskets\n # alpha = health(initial) / (initial_haircut * take_assets * price + initial_haircut * (1 - opt_util) * take_liabilities * price - (1 + initial_margin) * give_liabilities * price)\n # NOTE: health_check sets up the local state handler for itself, so we don't need to\n # NOTE: Reusing the above variables for the values used when calculating the denominator\n alpha_numerator.set(health_check(liquidatee_account, abi_false)),\n cash_take_value.set(calculate_basket_value(cash, abi_false, abi_zero, abi_false, abi_false, abi_false)),\n pool_take_value.set(calculate_basket_value(pool, abi_false, abi_zero, abi_false, abi_false, abi_true)),\n pool_give_value.set(calculate_basket_value(pool, abi_true, abi_zero, abi_false, abi_true, abi_false)),\n alpha_denominator.set(pool_give_value.get() - (cash_take_value.get() + pool_take_value.get())),\n\n # Clamp alpha to be between 0 and 1\n alpha_numerator.set(signed_abs(alpha_numerator.get())),\n\n If(alpha_numerator.get() > alpha_denominator.get())\n .Then(alpha_numerator.set(alpha_denominator.get())),\n\n # Scale the basket values to be fair\n cash.set(cast(abi.ReturnedValue, scale_basket(cash, alpha_numerator, alpha_denominator))),\n pool.set(cast(abi.ReturnedValue, scale_basket(pool, alpha_numerator, alpha_denominator))),\n\n # Perform liquidation swaps, all relevant glboal indexes are updated after netting\n cast(Expr, signed_account_move_baskets(liquidatee_account, liquidator_account, cash, pool, abi_false, abi_true)),\n\n # Verify liquidator is still healthy\n # NOTE: Liquidator must always be in the green after liquidation\n # NOTE: Liquidatee will always be healthier by design\n liquidator_health.set(health_check(liquidator_account, abi_false)),\n Assert(Not(signed_ltz(liquidator_health.get()))),\n )" }, { "identifier": "pool_move", "path": "contracts_unified/core/methods/pool_move.py", "snippet": "@ABIReturnSubroutine\ndef pool_move(\n account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n _server_data: abi.DynamicBytes,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Transfers instruments from user's address to the pool\n\n Arguments:\n\n account (AccountAddress): User's account address.\n user_op (OperationMetaData): Operation metadata containing a basket of instruments.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n _server_data (abi.DynamicBytes): Server data. Unused.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n abi_false = abi.Bool()\n\n user_old_health = ExcessMargin()\n user_health = ExcessMargin()\n\n data = PoolMoveData()\n instrument = InstrumentId()\n amount = SignedAmount()\n\n user_data = UserInstrumentData()\n price = Price()\n cash = Amount()\n neg_cash = SignedAmount()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Load constants\n abi_false.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # Get basket from user_op.data\n user_op.operation.use(lambda op_data:\n Seq(\n data.decode(op_data.get()),\n data.operation.use(lambda op: Assert(op.get() == OperationId.PoolMove)),\n instrument.set(data.instrument),\n amount.set(data.amount),\n )\n ),\n\n # Get old health\n user_old_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),\n\n # Move funds\n cast(Expr, perform_pool_move(account, instrument, amount)),\n\n # When there is a negative movement, we need to check that the user can support itself without netting\n If(signed_ltz(amount.get())).Then(\n # Get instrument price\n price.set(cast(abi.ReturnedValue, get_normalized_price(instrument))),\n # Extract user cash\n user_data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument))),\n cash.set(user_data.cash),\n neg_cash.set(signed_neg(cash.get())),\n # Remove all user cash temporarily\n cast(Expr, signed_add_to_cash(account, instrument, neg_cash)),\n # Recalculate health without netting the borrowed asset, ensure it is positive\n user_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),\n user_health.set(signed_add(user_health.get(), WideRatio([price.get(), cash.get()], [Int(PRICECASTER_RESCALE_FACTOR)]))),\n Assert(Not(signed_ltz(user_health.get()))),\n # Add all the cash back\n cast(Expr, signed_add_to_cash(account, instrument, cash)),\n ),\n\n # Validate user is still healthy\n user_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),\n Assert(Or(Not(signed_ltz(user_health.get())), signed_gte(user_health.get(), user_old_health.get()))),\n )" }, { "identifier": "portal_transfer", "path": "contracts_unified/core/methods/portal_transfer.py", "snippet": "@ABIReturnSubroutine\ndef portal_transfer(vaa: abi.DynamicBytes, *, output: abi.DynamicBytes) -> Expr:\n \"\"\"\n\n Called at the end of a transfer from the portal to C3 and\n use as a \"marker\" and VAA source for the deposit operation.\n\n Decoding and validation of the VAA, along with sender check is performed\n in the deposit operation, where this txn is referenced.\n\n \"\"\"\n\n return Seq(\n Assert(Len(vaa.get()) != Int(0), comment=\"Empty VAA\"),\n # Anything works here, since wormhole requires some value\n output.set(Bytes(\"base16\", \"0x00\")),\n )" }, { "identifier": "settle", "path": "contracts_unified/core/methods/settle.py", "snippet": "@ABIReturnSubroutine\ndef settle(\n add_order_txn: abi.ApplicationCallTransaction,\n buy_account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n server_args: SettleExtraData,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Settles two orders\n\n Arguments:\n\n add_order_txn (ApplicationCallTransaction): The previous add_order transaction in this group that added the sell order to the order book.\n buy_account (AccountAddress): The buyer user's account address.\n user_op (OperationMetaData): Operation metadata containing buyer order data.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n server_args (SettleExtraData): Extra data for the settle operation.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n abi_false = abi.Bool()\n add_order_op = OperationMetaData()\n add_order_data = abi.make(abi.DynamicBytes)\n\n buy_order = OrderData()\n sell_order = OrderData()\n\n sell_account = AccountAddress()\n\n buy_order_id = abi.make(OrderId)\n sell_order_id = abi.make(OrderId)\n\n buy_order_onchain = OnChainOrderData()\n sell_order_onchain = OnChainOrderData()\n\n # Amounts for each order's buy/sell side\n buyer_sell_amount = Amount()\n buyer_buy_amount = Amount()\n seller_sell_amount = Amount()\n seller_buy_amount = Amount()\n\n # Remaining amounts for each order's buy/sell side\n buyer_sell_remaining = Amount()\n buyer_borrow_remaining = Amount()\n buyer_repay_remaining = Amount()\n\n seller_sell_remaining = Amount()\n seller_borrow_remaining = Amount()\n seller_repay_remaining = Amount()\n\n # New remaining amounts for each order's buy/sell side\n buyer_new_sell_remaining = Amount()\n buyer_new_borrow_remaining = Amount()\n buyer_new_repay_remaining = Amount()\n\n seller_new_sell_remaining = Amount()\n seller_new_borrow_remaining = Amount()\n seller_new_repay_remaining = Amount()\n\n buyer_new_order_onchain = OnChainOrderData()\n seller_new_order_onchain = OnChainOrderData()\n\n buyer_buy_instrument = InstrumentId()\n buyer_sell_instrument = InstrumentId()\n seller_buy_instrument = InstrumentId()\n seller_sell_instrument = InstrumentId()\n\n buyer_to_send = Amount()\n seller_to_send = Amount()\n\n buyer_to_borrow = Amount()\n seller_to_borrow = Amount()\n buyer_to_repay = Amount()\n seller_to_repay = Amount()\n\n buyer_buy_delta = Amount()\n seller_buy_delta = Amount()\n buyer_sell_delta = Amount()\n seller_sell_delta = Amount()\n\n neg_borrow = SignedAmount()\n\n buyer_fees = Amount()\n seller_fees = Amount()\n\n buyer_old_health = ExcessMargin()\n buyer_health = ExcessMargin()\n seller_old_health = ExcessMargin()\n seller_health = ExcessMargin()\n\n buyer_negative_margin = Boolean()\n seller_negative_margin = Boolean()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Set constants\n abi_false.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # Extract the buy order\n user_op.operation.use(lambda op_data:\n Seq(\n buy_order.decode(op_data.get()),\n buy_order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),\n buy_order.account.use(lambda acc: Assert(acc.get() == buy_account.get())),\n )\n ),\n\n # Add the order to the order book\n cast(Expr, OrderStateHandler.add_order(buy_order)),\n\n # Validate the sell order\n Assert(add_order_txn.get().application_id() == Global.current_application_id()),\n Assert(add_order_txn.get().on_completion() == OnComplete.NoOp),\n Assert(add_order_txn.get().application_args.length() == ADD_ORDER_ARG_COUNT),\n Assert(add_order_txn.get().application_args[ARG_INDEX_SELECTOR] == ADD_ORDER_SIG),\n\n # Get the sell order\n sell_account.decode(add_order_txn.get().application_args[ARG_INDEX_ACCOUNT]),\n add_order_op.decode(add_order_txn.get().application_args[ARG_INDEX_OP]),\n add_order_op.operation.store_into(add_order_data),\n sell_order.decode(add_order_data.get()),\n\n # Get order IDs\n buy_order_id.set(OrderStateHandler.get_order_id(buy_order)),\n sell_order_id.set(OrderStateHandler.get_order_id(sell_order)),\n\n # Get on chain order data\n buy_order_onchain.set(cast(abi.ReturnedValue, OrderStateHandler.get_order_onchain(buy_order_id))),\n sell_order_onchain.set(cast(abi.ReturnedValue, OrderStateHandler.get_order_onchain(sell_order_id))),\n\n # Validate the asset pair matches\n buy_order.sell_instrument.store_into(buyer_sell_instrument),\n buy_order.buy_instrument.store_into(buyer_buy_instrument),\n sell_order.sell_instrument.store_into(seller_sell_instrument),\n sell_order.buy_instrument.store_into(seller_buy_instrument),\n\n Assert(buyer_sell_instrument.get() == seller_buy_instrument.get()),\n Assert(buyer_buy_instrument.get() == seller_sell_instrument.get()),\n\n # Validate the orders are not expired\n buy_order.expiration_time.use(lambda expiration_time:\n Assert(expiration_time.get() > Global.latest_timestamp())\n ),\n sell_order.expiration_time.use(lambda expiration_time:\n Assert(expiration_time.get() > Global.latest_timestamp())\n ),\n\n # Validate the orders match\n buyer_sell_amount.set(buy_order.sell_amount),\n buyer_buy_amount.set(buy_order.buy_amount),\n seller_sell_amount.set(sell_order.sell_amount),\n seller_buy_amount.set(sell_order.buy_amount),\n\n Assert(\n BytesGe(\n BytesMul(Itob(buyer_sell_amount.get()), Itob(seller_sell_amount.get())),\n BytesMul(Itob(buyer_buy_amount.get()), Itob(seller_buy_amount.get()))\n )\n ),\n\n # Validate that the swap is fair for both the seller and the buyer\n buyer_to_send.set(server_args.buyer_to_send),\n seller_to_send.set(server_args.seller_to_send),\n\n Assert(\n BytesGe(\n BytesMul(Itob(buyer_to_send.get()), Itob(seller_sell_amount.get())),\n BytesMul(Itob(seller_to_send.get()), Itob(seller_buy_amount.get()))\n )\n ),\n\n Assert(\n BytesGe(\n BytesMul(Itob(seller_to_send.get()), Itob(buyer_sell_amount.get())),\n BytesMul(Itob(buyer_to_send.get()), Itob(buyer_buy_amount.get()))\n )\n ),\n\n # Validate that we are not sending more than allowed\n buyer_sell_remaining.set(buy_order_onchain.sell_remaining),\n Assert(buyer_sell_remaining.get() >= buyer_to_send.get()),\n seller_sell_remaining.set(sell_order_onchain.sell_remaining),\n Assert(seller_sell_remaining.get() >= seller_to_send.get()),\n\n # Validate that we are not borrowing more thn allowed\n buyer_borrow_remaining.set(buy_order_onchain.borrow_remaining),\n buyer_to_borrow.set(server_args.buyer_to_borrow),\n Assert(buyer_borrow_remaining.get() >= buyer_to_borrow.get()),\n\n seller_borrow_remaining.set(sell_order_onchain.borrow_remaining),\n seller_to_borrow.set(server_args.seller_to_borrow),\n Assert(seller_borrow_remaining.get() >= seller_to_borrow.get()),\n\n # Validate that we are not repaying more than allowed\n buyer_repay_remaining.set(buy_order_onchain.repay_remaining),\n buyer_to_repay.set(server_args.buyer_to_repay),\n Assert(buyer_repay_remaining.get() >= buyer_to_repay.get()),\n\n seller_repay_remaining.set(sell_order_onchain.repay_remaining),\n seller_to_repay.set(server_args.seller_to_repay),\n Assert(seller_repay_remaining.get() >= seller_to_repay.get()),\n\n # Validate that the fees are lower than the maximum possible\n buyer_fees.set(server_args.buyer_fees),\n seller_fees.set(server_args.seller_fees),\n Assert(buyer_fees.get() <= (buyer_to_send.get() / MAX_FEES_DIVISOR)),\n Assert(seller_fees.get() <= (buyer_to_send.get() / MAX_FEES_DIVISOR)),\n\n # We shouldn't borrow / repay more than the assets traded, including fees.\n Assert(buyer_to_borrow.get() <= buyer_to_send.get() + buyer_fees.get()),\n Assert(buyer_to_repay.get() <= seller_to_send.get()),\n Assert(seller_to_borrow.get() <= seller_to_send.get()),\n Assert(seller_to_repay.get() <= buyer_to_send.get() - seller_fees.get()),\n\n # Generate the updated order book for the buy order\n buyer_new_sell_remaining.set(buyer_sell_remaining.get() - buyer_to_send.get()),\n buyer_new_borrow_remaining.set(buyer_borrow_remaining.get() - buyer_to_borrow.get()),\n buyer_new_repay_remaining.set(buyer_repay_remaining.get() - buyer_to_repay.get()),\n buyer_new_order_onchain.set(buyer_new_sell_remaining, buyer_new_borrow_remaining, buyer_new_repay_remaining),\n\n # Generate the updated order book for the sell order\n seller_new_sell_remaining.set(seller_sell_remaining.get() - seller_to_send.get()),\n seller_new_borrow_remaining.set(seller_borrow_remaining.get() - seller_to_borrow.get()),\n seller_new_repay_remaining.set(seller_repay_remaining.get() - seller_to_repay.get()),\n seller_new_order_onchain.set(seller_new_sell_remaining, seller_new_borrow_remaining, seller_new_repay_remaining),\n\n # Calculate the swap amounts\n buyer_buy_delta.set(seller_to_send.get()),\n seller_buy_delta.set(buyer_to_send.get() - seller_fees.get()),\n buyer_sell_delta.set(signed_neg(buyer_to_send.get() + buyer_fees.get())),\n seller_sell_delta.set(signed_neg(seller_to_send.get())),\n\n # Update the on chain order data\n OrderStateHandler.set_order_onchain(buy_order_id, buyer_new_order_onchain),\n OrderStateHandler.set_order_onchain(sell_order_id, seller_new_order_onchain),\n\n # Get old health for both users if needed\n buyer_negative_margin.set(server_args.buyer_negative_margin),\n seller_negative_margin.set(server_args.seller_negative_margin),\n\n If(buyer_negative_margin.get()).Then(\n buyer_old_health.set(cast(abi.ReturnedValue, health_check(buy_account, abi_false))),\n ),\n\n If(seller_negative_margin.get()).Then(\n seller_old_health.set(cast(abi.ReturnedValue, health_check(sell_account, abi_false))),\n ),\n\n # Handle borrow updates\n If(buyer_to_borrow.get() > Int(0)).Then(\n neg_borrow.set(signed_neg(buyer_to_borrow.get())),\n cast(Expr, perform_pool_move(buy_account, buyer_sell_instrument, neg_borrow)),\n ),\n If(seller_to_borrow.get() > Int(0)).Then(\n neg_borrow.set(signed_neg(seller_to_borrow.get())),\n cast(Expr, perform_pool_move(sell_account, seller_sell_instrument, neg_borrow)),\n ),\n\n # Perform swap updates\n cast(Expr, signed_add_to_cash(buy_account, buyer_buy_instrument, buyer_buy_delta)),\n cast(Expr, signed_add_to_cash(sell_account, seller_buy_instrument, seller_buy_delta)),\n cast(Expr, signed_add_to_cash(buy_account, buyer_sell_instrument, buyer_sell_delta)),\n cast(Expr, signed_add_to_cash(sell_account, seller_sell_instrument, seller_sell_delta)),\n\n # Collect the fees\n cast(Expr, collect_fees(buyer_sell_instrument, buyer_fees)),\n cast(Expr, collect_fees(seller_buy_instrument, seller_fees)),\n\n # Handle repay updates\n If(buyer_to_repay.get() > Int(0)).Then(\n cast(Expr, perform_pool_move(buy_account, buyer_buy_instrument, buyer_to_repay)),\n ),\n If(seller_to_repay.get() > Int(0)).Then(\n cast(Expr, perform_pool_move(sell_account, seller_buy_instrument, seller_to_repay)),\n ),\n\n # Validate the users are still healthy\n buyer_health.set(cast(abi.ReturnedValue, health_check(buy_account, abi_false))),\n Assert(Or(Not(signed_ltz(buyer_health.get())), And(buyer_negative_margin.get(), signed_gte(buyer_health.get(), buyer_old_health.get())))),\n seller_health.set(cast(abi.ReturnedValue, health_check(sell_account, abi_false))),\n Assert(Or(Not(signed_ltz(seller_health.get())), And(seller_negative_margin.get(), signed_gte(seller_health.get(), seller_old_health.get())))),\n )" }, { "identifier": "add_order", "path": "contracts_unified/core/methods/settle.py", "snippet": "@ABIReturnSubroutine\ndef add_order(\n # NOTE: Any update on this function must update ADD_ORDER_SIG and ADD_ORDER_ARG_COUNT above\n account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n opup_budget: Amount,\n) -> Expr:\n\n \"\"\"Adds an order to the order book\n\n Arguments:\n\n account (AccountAddress): User's account address.\n user_op (OperationMetaData): Operation metadata containing order data.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n order = OrderData()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Validate signature validator' call\n cast(Expr, sender_is_sig_validator()),\n\n # Get order from user_op.data\n user_op.operation.use(lambda op_data:\n Seq(\n order.decode(op_data.get()),\n order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),\n order.account.use(lambda acc: Assert(acc.get() == account.get()))\n )\n ),\n\n # Add order to the order book\n cast(Expr, OrderStateHandler.add_order(order))\n )" }, { "identifier": "update_instrument", "path": "contracts_unified/core/methods/update_instrument.py", "snippet": "@ABIReturnSubroutine\ndef update_instrument(\n info: UpdateInstrumentInfo,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the method that adds an instrument to the Core contract storage box.\n\n Arguments:\n\n info (UpdateInstrumentInfo): Instrument information to add or update.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n abi_zero = abi.Uint64()\n abi_rate_one = abi.Uint64()\n abi_zero_address = abi.Address()\n\n timestamp = RelativeTimestamp()\n\n asset_id = AssetId()\n initial_haircut = Ratio()\n initial_margin = Ratio()\n maintenance_haircut = Ratio()\n maintenance_margin = Ratio()\n optimal_utilization = Ratio()\n min_rate = InterestRate()\n opt_rate = InterestRate()\n max_rate = InterestRate()\n borrow_index = abi.Uint64()\n lend_index = abi.Uint64()\n borrowed = Amount()\n liquidity = Amount()\n entry = InstrumentListElement()\n\n instrument_id = InstrumentId()\n instrument_count = abi.Uint64()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Validate sender\n Assert(Txn.sender() == GlobalStateHandler.get_quant_address()),\n\n # Initialize the instrument box first if it doesn't exist\n cast(Expr, GlobalStateHandler.initialize()),\n\n # Get init time\n timestamp.set(GlobalStateHandler.get_relative_timestamp()),\n\n # Create the instrument list element\n abi_zero.set(Int(0)),\n abi_rate_one.set(RATE_ONE),\n abi_zero_address.set(Global.zero_address()),\n\n # Extract fields from info\n asset_id.set(info.asset_id),\n initial_haircut.set(info.initial_haircut),\n initial_margin.set(info.initial_margin),\n maintenance_haircut.set(info.maintenance_haircut),\n maintenance_margin.set(info.maintenance_margin),\n optimal_utilization.set(info.optimal_utilization),\n min_rate.set(info.min_rate),\n opt_rate.set(info.opt_rate),\n max_rate.set(info.max_rate),\n\n # Load the current instrument count and validate it\n instrument_id.set(info.instrument_id),\n instrument_count.set(GlobalStateHandler.get_instrument_count()),\n Assert(instrument_id.get() <= instrument_count.get()),\n\n # Validate instrument zero is always algo\n If(instrument_id.get() == Int(0))\n .Then(Assert(asset_id.get() == Int(0))),\n\n # Check for new entry vs old entry\n If(instrument_id.get() == instrument_count.get())\n .Then(\n # Perform optin to asset if needed\n If(asset_id.get() != Int(0), cast(Expr, inner_asset_opt_in(asset_id))),\n\n # Create the new entry\n borrow_index.set(abi_rate_one),\n lend_index.set(abi_rate_one),\n borrowed.set(abi_zero),\n liquidity.set(abi_zero),\n\n # Increase the instrument count\n GlobalStateHandler.set_instrument_count(instrument_count.get() + Int(1)),\n )\n .Else(\n # Not a new instrument, we need to accrue the interest\n cast(Expr, perform_pool_move(abi_zero_address, instrument_id, abi_zero)),\n # Retain the accrued interest values for the new entry\n entry.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n # NOTE: The timestamp should be the same as the one for a new instrument\n entry.borrow_index.store_into(borrow_index),\n entry.lend_index.store_into(lend_index),\n entry.borrowed.store_into(borrowed),\n entry.liquidity.store_into(liquidity),\n ),\n\n # Create the new entry\n entry.set(\n asset_id,\n initial_haircut,\n initial_margin,\n maintenance_haircut,\n maintenance_margin,\n timestamp,\n borrow_index,\n lend_index,\n optimal_utilization,\n min_rate,\n opt_rate,\n max_rate,\n borrowed,\n liquidity,\n ),\n\n # Perform update/insert for entry\n GlobalStateHandler.set_instrument(instrument_id, entry),\n\n # Ensure we have enough funds for mbr\n cast(Expr, GlobalStateHandler.ensure_mbr_fund()),\n )" }, { "identifier": "update_parameter", "path": "contracts_unified/core/methods/update_parameter.py", "snippet": "@ABIReturnSubroutine\ndef update_parameter(\n key_to_update: abi.DynamicBytes,\n updated_value: abi.DynamicBytes,\n) -> Expr:\n \"\"\"Implements the method that changes a global parameter of the contract.\n\n Arguments:\n\n key_to_update (abi.DynamicBytes): Key of the parameter to update\n updated_value (abi.DynamicBytes): New value of the parameter\n\n \"\"\"\n\n key = ScratchVar(TealType.bytes)\n value = ScratchVar(TealType.bytes)\n\n return Seq(\n key.store(key_to_update.get()),\n value.store(updated_value.get()),\n If(key.load() == KEY_LIQUIDATION_FACTORS).Then(\n Assert(GlobalStateHandler.get_quant_address() == Txn.sender()),\n GlobalStateHandler.set_liquidation_factors(value.load())\n ).Else(\n Assert(Global.creator_address() == Txn.sender()),\n Cond(\n [key.load() == KEY_PRICECASTER_ID, GlobalStateHandler.set_pricecaster_id(value.load())],\n [key.load() == KEY_WORMHOLE_BRIDGE_ID, GlobalStateHandler.set_wormhole_bridge_id(value.load())],\n [key.load() == KEY_SIGNATURE_VALIDATOR, GlobalStateHandler.set_signature_validator(value.load())],\n [key.load() == KEY_QUANT_ADDRESS, GlobalStateHandler.set_quant_address(value.load())],\n [key.load() == KEY_FEE_TARGET, GlobalStateHandler.set_fee_target(value.load())],\n [key.load() == KEY_WITHDRAW_BUFFER, GlobalStateHandler.set_withdraw_buffer(value.load())],\n [key.load() == KEY_OPERATOR_ADDRESS, GlobalStateHandler.set_operator_address(value.load())],\n )\n )\n )" }, { "identifier": "withdraw", "path": "contracts_unified/core/methods/withdraw.py", "snippet": "@ABIReturnSubroutine\ndef withdraw(\n account: AccountAddress,\n user_op: OperationMetaData,\n delegation_chain: DelegationChain,\n server_params: WithdrawExtraData,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Withdraws funds from a user and sends them to a given Wormhole or Algorand address, depending on target chain\n\n Args:\n\n account (AccountAddress): The user account address.\n user_op (OperationMetaData): The user operation metadata. This contains signed withdraw data: instrument, amount, receiver, and maximum amount to borrow.\n delegation_chain (DelegationChain): The delegation chain. For withdraw operations this must be empty.\n server_params (abi.Uint64): The server parameters. For withdraw, this parameter just contains server' own balance.\n opup_budget (Amount): Additional computation budget for the operation.\n\n \"\"\"\n\n # Holds the withdraw buffer address\n wormhole_withdraw_buffer = abi.Address()\n\n # Constants\n abi_false = abi.Bool()\n\n # Holds extracted withdraw data from the user_op\n withdraw_data = WithdrawData()\n\n # Holds extracted withdraw data from the user_op\n instrument_id = InstrumentId()\n amount = Amount()\n receiver = WormholeAddress()\n max_borrow = Amount()\n amount_to_deduct = SignedAmount()\n amount_to_withdraw = SignedAmount()\n amount_to_borrow = SignedAmount()\n max_fees = Amount()\n\n # User balance, to calculate the cash/pool split of the withdrawal\n position = UserInstrumentData()\n balance = Amount()\n\n # Fees to be collected\n withdraw_fee = Amount()\n\n # Used to validate the user's health\n user_health = abi.Uint64()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Load constants\n abi_false.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # No delegation is allowed for withdraw\n Assert(delegation_chain.length() == Int(0)),\n\n # Decode and extract withdraw operation\n user_op.operation.use(lambda op_data:\n Seq(\n withdraw_data.decode(op_data.get()),\n withdraw_data.operation.use(lambda op: Assert(op.get() == OperationId.Withdraw)),\n withdraw_data.instrument.store_into(instrument_id),\n withdraw_data.amount.store_into(amount),\n withdraw_data.receiver.store_into(receiver),\n withdraw_data.max_borrow.store_into(max_borrow),\n withdraw_data.max_fees.store_into(max_fees),\n )\n ),\n\n # Calculate cash and pool withdrawal amounts\n position.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n balance.set(position.cash),\n server_params.locked_cash.use(lambda locked_cash:\n balance.set(balance.get() - locked_cash.get()),\n ),\n\n # Get the fees\n withdraw_fee.set(server_params.withdraw_fee),\n\n # Do not exceed maximum fee limit specified in request.\n Assert(withdraw_fee.get() <= max_fees.get()),\n\n # Validate the user is not borrowing more than they have allowed\n Assert(amount.get() <= max_borrow.get() + balance.get()),\n\n # Calculate withdrawal amounts\n If(amount.get() > balance.get())\n .Then(\n amount_to_borrow.set(signed_neg(amount.get() - balance.get())),\n )\n .Else(\n amount_to_borrow.set(Int(0)),\n ),\n # This is the delta value to apply to the user cash\n amount_to_deduct.set(signed_neg(amount.get())),\n # This is the amount the user will actually get, implicitly fails if fees are bigger than the amount\n amount_to_withdraw.set(amount.get() - withdraw_fee.get()),\n\n # Borrow if needed\n If(amount_to_borrow.get() != Int(0))\n .Then(cast(Expr, perform_pool_move(account, instrument_id, amount_to_borrow))),\n\n # Remove assets\n cast(Expr, signed_add_to_cash(account, instrument_id, amount_to_deduct)),\n\n # Pay fees\n cast(Expr, collect_fees(instrument_id, withdraw_fee)),\n\n # Validate user is still healthy\n # NOTE: Withdraw always makes the user less healthy, so we don't need to check\n # the user's health before the withdrawal\n user_health.set(health_check(account, abi_false)),\n Assert(Not(signed_ltz(user_health.get()))),\n\n # Now that assets/liabilities are up to date, send out payment transaction.\n # If we are withdrawing to offchain, we need to check wormhole transactions\n wormhole_withdraw_buffer.set(GlobalStateHandler.get_withdraw_buffer()),\n receiver.chain_id.use(lambda chain_id:\n receiver.address.use(lambda address:\n If(\n chain_id.get() == Int(ALGORAND_CHAIN_ID),\n cast(Expr, submit_withdraw_onchain(address, instrument_id, amount_to_withdraw)),\n cast(Expr, submit_withdraw_offchain(wormhole_withdraw_buffer, instrument_id, amount_to_withdraw)),\n )\n )\n ),\n )" }, { "identifier": "wormhole_deposit", "path": "contracts_unified/core/methods/wormhole_deposit.py", "snippet": "@ABIReturnSubroutine\ndef wormhole_deposit(\n portal_transfer_txn: abi.ApplicationCallTransaction,\n account: AccountAddress,\n payload: DepositWord,\n instrument_id: InstrumentId,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the contract method called during an ASA deposit via Wormhole.\n\n Arguments:\n\n portal_transfer_txn (ApplicationCallTransaction): The ABI \"ApplicationCallTransaction\" argument referencing the previous transaction to this call in the \"Wormhole Deposit\" group. Must be of type \"application call\".\n account (AccountAddress): Target account address to deposit to.\n payload (DepositWord): Payload, must equal to \"WormholeDeposit\" string-literal.\n instrument_id (InstrumentId): Instrument to transfer.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n ----------------------------------------------------------------------------------------------------------------------------------\n\n Security rationale: The completeTransfer method of the Wormhole Token Bridge guarantees that:\n\n - The VAA was processed by the vaaVerify method of the Wormhole Core.\n - The VAA matches the completeTransfer arg.\n - The portal_transfer method exists in the group and has the proper target appId matching the Vaa.\n - The portal_transfer method has the correct sender (the server in our case)\n\n If we can ensure that the completeTransfer method exists in the group and it's from\n the canonical Wormhole Token Bridge Appid, we can transitively check remaining properties\n for additional security.\n\n Additionally, the innertxn doing the transfer actually uses the VAA information which\n we ensure is correct for the three sources: this method, the completeTransfer method and the\n vaaVerify method in the Core.\n\n ref: https://github.com/wormhole-foundation/wormhole/blob/5255e933d68629f0643207b0f9d3fa797af5cbf7/algorand/token_bridge.py#L466\n\n \"\"\"\n\n vaa = portal_transfer_txn.get().application_args[1]\n complete_transfer_txn = Gtxn[portal_transfer_txn.get().group_index() - Int(1)]\n decoded_payload = DecodedWormholePayload()\n abi_vaa = abi.make(abi.DynamicBytes)\n abi_amount = abi.Uint64()\n abi_repay_amount = abi.Uint64()\n abi_receiver = abi.Address()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Ensure there are no rogue transactions past the box-budget setup\n Assert(Global.group_size() == Txn.group_index() + Int(2), comment=\"Unknown transactions ahead detected\"),\n\n # Ensure completeTransfer from canonical Wormhole Token Bridge exists.\n Assert(complete_transfer_txn.application_args[0] == Bytes(\"completeTransfer\"), comment=\"expected completeTransfer method call\"),\n Assert(complete_transfer_txn.application_id() == GlobalStateHandler.get_wormhole_bridge_id(), comment=\"completeTransfer call appId unknown\"),\n\n # In our current design, owner == creator, so this is valid. What we should check?\n Assert(complete_transfer_txn.sender() == GlobalStateHandler.get_operator_address(), comment=\"completeTransfer call sender unknown\"),\n\n # Ensure VAAs match\n abi_vaa.decode(vaa),\n\n # The completeTransfer code ensures his VAA equals portal_transfer VAA, we check here\n # if we match our VAA\n Assert(complete_transfer_txn.application_args[1] == abi_vaa.get(), comment=\"VAAs do not match\"),\n\n # Decode the VAA\n decoded_payload.set(cast(abi.ReturnedValue, decode_wormhole_payload(abi_vaa))),\n abi_amount.set(decoded_payload.amount),\n abi_repay_amount.set(decoded_payload.repay_amount),\n abi_receiver.set(decoded_payload.receiver),\n\n # Validate the VAA, do we need more checks?\n XAssert(\n abi_receiver.get() == account.get(),\n comment=\"Receiving user address mismatch\",\n ),\n\n # Perform deposit\n cast(Expr, inner_deposit_asset(account, payload, instrument_id, abi_amount, abi_repay_amount)),\n )" } ]
from pyteal import ( BareCallActions, CallConfig, MethodConfig, OnCompleteAction, OptimizeOptions, Reject, Router, ) from contracts_unified.core.bare_calls import delete, update from contracts_unified.core.methods import ( account_move, add_order, clean_orders, create, deposit, fund_mbr, liquidate, pool_move, portal_transfer, settle, update_instrument, update_parameter, withdraw, wormhole_deposit, )
11,556
""" This file implements the router of the Core contract. """ CORE_ROUTER = Router( "C3 Core", BareCallActions( update_application=OnCompleteAction.always(update()),
""" This file implements the router of the Core contract. """ CORE_ROUTER = Router( "C3 Core", BareCallActions( update_application=OnCompleteAction.always(update()),
delete_application=OnCompleteAction.always(delete()),
1
2023-11-17 20:54:15+00:00
16k
cyberark/ark-sdk-python
ark_sdk_python/auth/ark_isp_auth.py
[ { "identifier": "ArkAuth", "path": "ark_sdk_python/auth/ark_auth.py", "snippet": "class ArkAuth(ABC):\n def __init__(self, cache_authentication: bool = True) -> None:\n self._logger = get_logger(app=self.__class__.__name__)\n self._cache_authentication = cache_authentication\n self._cache_keyring = None\n if cache_authentication:\n self._cache_keyring = ArkKeyring(self.authenticator_name())\n self.__token = None\n self._active_profile = None\n self._active_auth_profile = None\n\n def _resolve_cache_postfix(self, auth_profile: ArkAuthProfile) -> str:\n \"\"\"\n Resolves the postfix used to get the token based on the auth method\n\n Args:\n auth_profile (ArkAuthProfile): _description_\n\n Returns:\n str: _description_\n \"\"\"\n postfix = auth_profile.username\n if auth_profile.auth_method == ArkAuthMethod.Direct and auth_profile.auth_method_settings:\n direct_method_settings = cast(DirectArkAuthMethodSettings, auth_profile.auth_method_settings)\n if direct_method_settings.endpoint:\n postfix = f'{postfix}_{urlparse(direct_method_settings.endpoint).netloc}'\n return postfix\n\n @abstractmethod\n def _perform_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret] = None, force: bool = False\n ) -> ArkToken:\n \"\"\"\n Performs the actual authentication, based on the implementation\n\n Args:\n profile (ArkProfile): Profile to authenticate on\n auth_profile (ArkAuthProfile): Specific auth profile for the authentication\n secret (Optional[ArkSecret]): Secret used for authentication. Defaults to None\n force (bool): Force authenticate and ignore caching\n\n Returns:\n Optional[ArkToken]: Token of the authentication to be used\n \"\"\"\n\n @abstractmethod\n def _perform_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n \"\"\"\n Tries to perform refresh authentication on the existing token\n This is not promised for all authenticators\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n token (ArkToken): _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n\n def authenticate(\n self,\n profile: Optional[ArkProfile] = None,\n auth_profile: Optional[ArkAuthProfile] = None,\n secret: Optional[ArkSecret] = None,\n force: bool = False,\n refresh_auth: bool = False,\n ) -> ArkToken:\n \"\"\"\n Authenticates with the specified authenticator implementation.\n The implementation is based on the `_perform_authentication` method.\n When caching is allowed, authorization credentials are loaded from the cache.\n\n Args:\n profile (Optional[ArkProfile]): Profile containing information about the environment and authentication methods\n auth_profile (Optional[ArkAuthProfile]): Specific auth profile to use instead of the profile, when provided\n secret (Optional[ArkSecret]): Secret used for authentication\n force (bool): Determines whether to force authentication without cached credentials\n refresh_auth (bool): Attempts to refresh an existing cached auth when it is available\n\n Raises:\n ArkAuthException: _description_\n\n Returns:\n ArkToken: The authentication token to use. The token is also saved in the object.\n \"\"\"\n if not auth_profile and not profile:\n raise ArkAuthException('Either a profile or a specific auth profile must be supplied')\n if not auth_profile and profile:\n if self.authenticator_name() in profile.auth_profiles:\n auth_profile = profile.auth_profiles[self.authenticator_name()]\n else:\n raise ArkAuthException(\n f'{self.authenticator_human_readable_name()} [{self.authenticator_name()}] is not defined within the authentication profiles'\n )\n if not profile:\n profile = ArkProfileLoader.load_default_profile()\n if auth_profile.auth_method not in self.supported_auth_methods() and auth_profile.auth_method != ArkAuthMethod.Default:\n raise ArkAuthException(\n f'{self.authenticator_human_readable_name()} does not support authentication method {auth_profile.auth_method}'\n )\n if auth_profile.auth_method == ArkAuthMethod.Default:\n auth_profile.auth_method, auth_profile.auth_method_settings = self.default_auth_method()\n if auth_profile.auth_method in ArkAuthMethodsRequireCredentials and not auth_profile.username:\n raise ArkAuthException(f'{self.authenticator_human_readable_name()} requires a username and optionally a secret')\n ark_token = None\n token_refreshed = False\n if self._cache_authentication and self._cache_keyring and not force:\n # Load the postfix of the token based on the auth profile and method type\n ark_token = self._cache_keyring.load_token(profile, self._resolve_cache_postfix(auth_profile))\n if ark_token and ark_token.expires_in.replace(tzinfo=None) <= datetime.now():\n # Expired, try to refresh\n if refresh_auth and ark_token.refresh_token:\n ark_token = self._perform_refresh_authentication(profile, auth_profile, ark_token)\n if ark_token:\n token_refreshed = True\n else:\n ark_token = None\n if not ark_token:\n ark_token = self._perform_authentication(profile, auth_profile, secret, force)\n if self._cache_authentication and self._cache_keyring:\n self._cache_keyring.save_token(profile, ark_token, self._resolve_cache_postfix(auth_profile))\n elif refresh_auth and not token_refreshed:\n try:\n ark_token = self._perform_refresh_authentication(profile, auth_profile, ark_token)\n if self._cache_authentication and self._cache_keyring:\n self._cache_keyring.save_token(profile, ark_token, self._resolve_cache_postfix(auth_profile))\n except Exception as ex: # Fallback to normal authentication\n self._logger.info(\n f'Refresh auth for [{self.authenticator_human_readable_name()}] failed, falling back to normal authentication [{str(ex)}]'\n )\n ark_token = self._perform_authentication(profile, auth_profile, secret, force)\n if self._cache_authentication and self._cache_keyring:\n self._cache_keyring.save_token(profile, ark_token, self._resolve_cache_postfix(auth_profile))\n self.__token = ark_token\n self._active_profile = profile\n self._active_auth_profile = auth_profile\n return ark_token\n\n def is_authenticated(self, profile: ArkProfile) -> bool:\n \"\"\"\n Checks whether the specified profile is authenticated (has a valid token), either from the keyring or in memory.\n If the valid token originated from the keyring, it is loaded into memory.\n\n Args:\n profile (ArkProfile): _description_\n\n Returns:\n bool: _description_\n \"\"\"\n self._logger.info(f'Checking if [{self.authenticator_name()}] is authenticated')\n if self.__token:\n self._logger.info('Token is already loaded')\n return True\n if self.authenticator_name() in profile.auth_profiles and self._cache_keyring:\n self.__token = self._cache_keyring.load_token(profile, profile.auth_profiles[self.authenticator_name()].username)\n if self.__token and self.__token.expires_in.replace(tzinfo=None) <= datetime.now():\n self.__token = None\n else:\n self._logger.info('Loaded token from cache successfully')\n return self.__token != None\n return False\n\n def load_authentication(self, profile: Optional[ArkProfile] = None, refresh_auth: bool = False) -> Optional[ArkToken]:\n \"\"\"\n Loads and returns the authentication token from the cache, if it exists.\n If specified, the method also attempts to refresh the token as needed.\n\n Args:\n profile (Optional[ArkProfile], optional): _description_. Defaults to None.\n refresh_auth (bool, optional): _description_. Defaults to False.\n\n Returns:\n Optional[ArkToken]: _description_\n \"\"\"\n self._logger.info(f'Trying to load [{self.authenticator_name()}] authentication')\n if not profile:\n if self._active_profile:\n profile = self._active_profile\n else:\n profile = ArkProfileLoader.load_default_profile()\n auth_profile = self._active_auth_profile\n if not auth_profile and self.authenticator_name() in profile.auth_profiles:\n auth_profile = profile.auth_profiles[self.authenticator_name()]\n if auth_profile:\n self._logger.info(\n f'Loading authentication for profile [{profile.profile_name}] and auth profile [{self.authenticator_name()}] of type [{auth_profile.auth_method.value}]'\n )\n if self._cache_keyring:\n self.__token = self._cache_keyring.load_token(profile, self._resolve_cache_postfix(auth_profile))\n if refresh_auth:\n if (\n self.__token\n and self.__token.expires_in.replace(tzinfo=None) - timedelta(seconds=DEFAULT_EXPIRATION_GRACE_DELTA_SECONDS)\n > datetime.now()\n ):\n self._logger.info('Token did not pass grace expiration, no need to refresh')\n else:\n self._logger.info('Trying to refresh token authentication')\n self.__token = self._perform_refresh_authentication(profile, auth_profile, self.__token)\n if self.__token and self.__token.expires_in.replace(tzinfo=None) > datetime.now():\n self._logger.info('Token refreshed')\n if self.__token and self._cache_authentication and self._cache_keyring:\n self._cache_keyring.save_token(profile, self.__token, self._resolve_cache_postfix(auth_profile))\n if self.__token and self.__token.expires_in.replace(tzinfo=None) <= datetime.now():\n self.__token = None\n if self.__token:\n self._active_profile = profile\n self._active_auth_profile = auth_profile\n return self.__token\n return None\n\n @property\n def token(self) -> Optional[ArkToken]:\n return self.__token\n\n @property\n def active_profile(self) -> Optional[ArkProfile]:\n return self._active_profile\n\n @property\n def active_auth_profile(self) -> Optional[ArkAuthProfile]:\n return self._active_auth_profile\n\n @staticmethod\n @abstractmethod\n def authenticator_name() -> str:\n \"\"\"\n Returns the name of the authenticator used for the auth profile and services.\n\n Returns:\n str: _description_\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def authenticator_human_readable_name() -> str:\n \"\"\"\n Returns the human-readable name of the authenticator.\n\n Returns:\n str: _description_\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def supported_auth_methods() -> List[ArkAuthMethod]:\n \"\"\"\n Returns the authenticator's supported authentication methods.\n\n Returns:\n List[ArkAuthMethod]: _description_\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def default_auth_method() -> Tuple[ArkAuthMethod, ArkAuthMethodSettings]:\n \"\"\"\n Returns the default authentication method and settings.\n\n Returns:\n Tuple[ArkAuthMethod, ArkAuthMethodSettings]: _description_\n \"\"\"" }, { "identifier": "ArkIdentity", "path": "ark_sdk_python/auth/identity/ark_identity.py", "snippet": "class ArkIdentity:\n def __init__(\n self,\n username: str,\n password: Optional[str],\n identity_url: Optional[str] = None,\n mfa_type: Optional[str] = None,\n logger: Optional[logging.Logger] = None,\n cache_authentication: bool = True,\n verify: Optional[Union[str, bool]] = None,\n load_cache: bool = False,\n cache_profile: Optional[ArkProfile] = None,\n ) -> None:\n self.__username = username\n self.__password = password\n self.__identity_url = identity_url or self.__resolve_fqdn_from_username()\n if not self.__identity_url.startswith('https://'):\n self.__identity_url = f'https://{self.__identity_url}'\n self.__mfa_type = mfa_type\n self.__logger = logger or get_logger(app=self.__class__.__name__)\n self.__interaction_process: Optional[Process] = None\n self.__is_polling: bool = False\n self.__keyring = ArkKeyring(self.__class__.__name__.lower()) if cache_authentication else None\n self.__cache_authentication = cache_authentication\n\n self.__session = Session()\n self.__session_details = None\n self.__session_exp = None\n self.__session.headers.update(ArkIdentityFQDNResolver.default_headers())\n if verify is None:\n if ArkSystemConfig.trusted_certificate() is not None:\n verify = ArkSystemConfig.trusted_certificate()\n else:\n verify = ArkSystemConfig.is_verifiying_certificates()\n self.__verify = verify\n self.__session.verify = verify\n if load_cache and cache_authentication and cache_profile:\n self.__load_cache(cache_profile)\n\n def __load_cache(self, profile: Optional[ArkProfile] = None) -> bool:\n if self.__keyring and profile:\n token = self.__keyring.load_token(profile, f'{self.__username}_identity')\n session = self.__keyring.load_token(profile, f'{self.__username}_identity_session')\n if token and session:\n import dill as pickle\n\n try:\n self.__session_details = AdvanceAuthResult.parse_raw(token.token.get_secret_value())\n except ValidationError:\n self.__session_details = IdpAuthStatusResult.parse_raw(token.token.get_secret_value())\n self.__session_exp = token.expires_in\n self.__session = pickle.loads(codecs.decode(session.token.get_secret_value().encode(), \"base64\"))\n self.__session.verify = self.__verify\n self.__identity_url = token.endpoint\n return True\n return False\n\n def __save_cache(self, profile: Optional[ArkProfile] = None) -> None:\n if self.__keyring and profile and self.__session_details:\n import dill as pickle\n\n delta = self.__session_details.token_lifetime or DEFAULT_TOKEN_LIFETIME_SECONDS\n self.__session_exp = datetime.now() + timedelta(seconds=delta)\n self.__keyring.save_token(\n profile,\n ArkToken(\n token=self.__session_details.json(),\n username=self.__username,\n endpoint=self.__identity_url,\n token_type=ArkTokenType.Internal,\n auth_method=ArkAuthMethod.Other,\n expires_in=self.__session_exp,\n refresh_token=self.__session_details.refresh_token,\n ),\n f'{self.__username}_identity',\n )\n self.__keyring.save_token(\n profile,\n ArkToken(\n token=codecs.encode(pickle.dumps(self.__session), 'base64').decode(),\n username=self.__username,\n endpoint=self.__identity_url,\n token_type=ArkTokenType.Internal,\n auth_method=ArkAuthMethod.Other,\n expires_in=self.__session_exp,\n ),\n f'{self.__username}_identity_session',\n )\n\n def __resolve_fqdn_from_username(self) -> str:\n tenant_suffix = self.__username[self.__username.index('@') :]\n return ArkIdentityFQDNResolver.resolve_tenant_fqdn_from_tenant_suffix(tenant_suffix=tenant_suffix)\n\n def __start_authentication(self) -> StartAuthResponse:\n self.__logger.info(f'Starting authentication with user {self.__username} and fqdn {self.__identity_url}')\n response = self.__session.post(\n url=f'{self.__identity_url}/Security/StartAuthentication',\n json={'User': self.__username, 'Version': '1.0', 'PlatformTokenResponse': True},\n )\n try:\n parsed_res: StartAuthResponse = StartAuthResponse.parse_raw(response.text)\n if not parsed_res.result.challenges and not parsed_res.result.idp_redirect_url:\n raise ValidationError('No challenges or idp redirect url on start auth')\n except (ValidationError, TypeError) as ex:\n try:\n if 'PodFqdn' in response.text:\n fqdn = TenantFqdnResponse.parse_raw(response.text)\n self.__identity_url = f'https://{fqdn.result.pod_fqdn}'\n self.__session = Session()\n self.__session.verify = self.__verify\n self.__session.headers.update(ArkIdentityFQDNResolver.default_headers())\n return self.__start_authentication()\n except Exception:\n pass\n raise ArkException('Identity start authentication failed to be parsed / validated') from ex\n return parsed_res\n\n def __advance_authentication(\n self, mechanism_id: str, session_id: str, answer: str, action: str\n ) -> Union[AdvanceAuthMidResponse, AdvanceAuthResponse]:\n self.__logger.info(f'Advancing authentication with user {self.__username} and fqdn {self.__identity_url} and action {action}')\n response = self.__session.post(\n url=f'{self.__identity_url}/Security/AdvanceAuthentication',\n json={'SessionId': session_id, 'MechanismId': mechanism_id, 'Action': action, 'Answer': answer},\n )\n try:\n parsed_res: AdvanceAuthMidResponse = AdvanceAuthMidResponse.parse_raw(response.text)\n if parsed_res.result.summary == 'LoginSuccess':\n parsed_res: AdvanceAuthResponse = AdvanceAuthResponse.parse_raw(response.text)\n except (ValidationError, TypeError) as ex:\n raise ArkException(f'Identity advance authentication failed to be parsed / validated [{response.text}]') from ex\n return parsed_res\n\n def __identity_idp_auth_status(self, session_id: str) -> IdpAuthStatusResponse:\n self.__logger.info(f'Calling idp auth status for fqdn {self.__identity_url} and session id {session_id}')\n response = self.__session.post(\n url=f'{self.__identity_url}/Security/OobAuthStatus',\n json={'SessionId': session_id},\n )\n try:\n parsed_res: IdpAuthStatusResponse = IdpAuthStatusResponse.parse_raw(response.text)\n except (ValidationError, TypeError) as ex:\n raise ArkException(f'Identity idp auth status failed to be parsed / validated [{response.text}]') from ex\n return parsed_res\n\n def __start_input_process(\n self, pipe_write: Connection, pipe_read: Connection, mechanism: Mechanism, oob_advance_resp: AdvanceAuthMidResponse\n ) -> None:\n if self.__interaction_process:\n raise ArkException('Interaction thread is already in progress')\n if sys.platform not in ['win32', 'cygwin']:\n ctx = get_context('fork')\n else:\n ctx = get_context('spawn')\n self.__interaction_process = ctx.Process(\n target=input_process,\n args=(\n pipe_write,\n pipe_read,\n mechanism,\n oob_advance_resp,\n ),\n )\n self.__interaction_process.start()\n\n def __stop_input_process(self) -> None:\n if self.__interaction_process:\n self.__interaction_process.kill()\n self.__interaction_process.join()\n self.__interaction_process = None\n\n def __poll_authentication(\n self,\n profile: ArkProfile,\n mechanism: Mechanism,\n start_auth_response: StartAuthResponse,\n oob_advance_resp: AdvanceAuthMidResponse,\n is_interactive: bool,\n ) -> None:\n try:\n if self.__is_polling:\n raise ArkException('MFA Polling is already in progress')\n self.__is_polling = True\n input_conn, output_conn = Pipe(duplex=True)\n if is_interactive:\n self.__start_input_process(input_conn, output_conn, mechanism, oob_advance_resp)\n start_time = datetime.now()\n while self.__is_polling:\n current_time = datetime.now()\n if (current_time - start_time).seconds >= POLL_TIME_SECONDS:\n self.__is_polling = False\n raise ArkException('Timeout reached while polling for user answer')\n if output_conn.poll():\n mfa_code = output_conn.recv()\n advance_resp = self.__advance_authentication(\n mechanism.mechanism_id, start_auth_response.result.session_id, mfa_code, 'Answer'\n )\n if isinstance(advance_resp, AdvanceAuthResponse):\n input_conn.send('DONE')\n else:\n input_conn.send('CONTINUE')\n else:\n advance_resp = self.__advance_authentication(mechanism.mechanism_id, start_auth_response.result.session_id, '', 'Poll')\n if isinstance(advance_resp, AdvanceAuthResponse):\n # Done here, save the token\n self.__is_polling = False\n if is_interactive:\n self.__stop_input_process()\n self.__session_details = advance_resp.result\n self.__session.headers.update(\n {'Authorization': f'Bearer {advance_resp.result.auth}', **ArkIdentityFQDNResolver.default_headers()}\n )\n delta = self.__session_details.token_lifetime or DEFAULT_TOKEN_LIFETIME_SECONDS\n self.__session_exp = datetime.now() + timedelta(seconds=delta)\n if self.__cache_authentication:\n self.__save_cache(profile)\n return\n time.sleep(POLL_INTERVAL_MS)\n except Exception as ex:\n if is_interactive:\n self.__stop_input_process()\n if not self.__session_details:\n raise ex\n finally:\n if is_interactive:\n self.__stop_input_process()\n\n def __pick_mechanism(self, challenge: Challenge) -> Mechanism:\n factors = {'otp': '📲 Push / Code', 'sms': '📟 SMS', 'email': '📧 Email', 'pf': '📞 Phone call'}\n supported_mechanisms = [m for m in challenge.mechanisms if m.name.lower() in SUPPORTED_MECHANISMS]\n answers = inquirer.prompt(\n [\n inquirer.List(\n 'mfa',\n 'Please pick one of the following MFA methods',\n choices=[factors[m.name.lower()] for m in supported_mechanisms],\n default=factors[self.__mfa_type] if self.__mfa_type and self.__mfa_type in factors else None,\n carousel=True,\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n raise ArkException('Failed to get answer for which MFA method to use')\n self.__mfa_type = next(filter(lambda f: factors[f] == answers['mfa'], factors.keys()))\n return next(filter(lambda m: factors[m.name.lower()] == answers['mfa'], supported_mechanisms))\n\n def __perform_idp_authentication(\n self, start_auth_response: StartAuthResponse, profile: Optional[ArkProfile] = None, interactive: bool = False\n ) -> None:\n if self.__is_polling:\n raise ArkException('MFA / IDP Polling is already in progress')\n # Print the user some info if we are interactive\n if interactive:\n ArkArgsFormatter.print_normal_bright(\n \"\\nYou are now being redirected from your browser to your external identity provider for authentication\\n\"\n \"If the browser did not open, you may also click the following URL to access your identity provider authentication\\n\\n\"\n f\"{start_auth_response.result.idp_redirect_short_url}\\n\"\n )\n\n # Error can be ignored\n webbrowser.open(start_auth_response.result.idp_redirect_short_url, new=0, autoraise=True)\n\n # Start polling for idp auth\n self.__is_polling = True\n start_time = datetime.now()\n while self.__is_polling:\n current_time = datetime.now()\n if (current_time - start_time).seconds >= POLL_TIME_SECONDS:\n self.__is_polling = False\n raise ArkException('Timeout reached while polling for idp auth')\n idp_auth_status = self.__identity_idp_auth_status(start_auth_response.result.idp_login_session_id)\n if idp_auth_status.result.state == 'Success' and idp_auth_status.result.token:\n # We managed to successfully authenticate\n # Done here, save the token\n self.__session_details = idp_auth_status.result\n self.__session.headers.update(\n {'Authorization': f'Bearer {idp_auth_status.result.token}', **ArkIdentityFQDNResolver.default_headers()}\n )\n delta = self.__session_details.token_lifetime or DEFAULT_TOKEN_LIFETIME_SECONDS\n self.__session_exp = datetime.now() + timedelta(seconds=delta)\n if self.__cache_authentication:\n self.__save_cache(profile)\n break\n time.sleep(POLL_INTERVAL_MS)\n\n @classmethod\n def has_cache_record(cls, profile: ArkProfile, username: str, refresh_auth_allowed: bool) -> bool:\n \"\"\"\n Checks if a cache record exists for the specified profile and username.\n\n Args:\n profile (ArkProfile): _description_\n username (str): _description_\n refresh_auth_allowed (bool): _description_\n\n Returns:\n bool: _description_\n \"\"\"\n keyring = ArkKeyring(cls.__name__.lower())\n token = keyring.load_token(profile, f'{username}_identity')\n session = keyring.load_token(profile, f'{username}_identity_session')\n if token is not None and session is not None:\n if token.expires_in and token.expires_in < datetime.now():\n if token.refresh_token and refresh_auth_allowed:\n return True\n return False\n return True\n return False\n\n @classmethod\n @cached(cache=LRUCache(maxsize=1024))\n def is_idp_user(cls, username: str) -> bool:\n \"\"\"\n Checks whether or not the specified username is from an external IDP.\n\n Args:\n username (str): _description_\n\n Returns:\n bool: _description_\n \"\"\"\n if re.match('.*@cyberark\\\\.cloud\\\\.(\\\\d)+', username) is not None:\n return False\n identity = ArkIdentity(username=username, password='')\n resp = identity.__start_authentication()\n return resp.result.idp_redirect_url != None\n\n def get_apps(self) -> Dict:\n \"\"\"\n Returns the applications to which the user is logged in.\n\n Raises:\n ArkException: _description_\n\n Returns:\n Dict: _description_\n \"\"\"\n if not self.__session_details:\n raise ArkException('Identity authentication is required first')\n cookies = self.__session.cookies.copy()\n response = self.__session.post(url=f'{self.__identity_url}/UPRest/GetUPData')\n self.__session.cookies = cookies\n return json.loads(response.text)\n\n def auth_identity(self, profile: Optional[ArkProfile] = None, interactive: bool = False, force: bool = False) -> None:\n \"\"\"\n Authenticates to Identity with the information specified in the constructor.\n If MFA is configured and `interactive` is enabled, the user is prompted for the MFA secret.\n The auth token and other details are stored in the object for future use.\n\n Args:\n profile (Optional[ArkProfile]): Profile to use (loaded from cache, if available)\n interactive (bool): Determines whether interactive user prompts are allowed\n force (bool): Determines whether to ignore cache and force authentication (defaults to false)\n\n Raises:\n ArkException: _description_\n \"\"\"\n self.__logger.debug('Attempting to authenticate to Identity')\n self.__session_details = None\n if self.__cache_authentication and not force and self.__load_cache(profile):\n # Check if expired\n if self.__session_exp.replace(tzinfo=None) > datetime.now():\n self.__logger.info('Loaded identity details from cache')\n return\n self.__session = Session()\n self.__session.verify = self.__verify\n self.__session.headers.update(ArkIdentityFQDNResolver.default_headers())\n\n # Start the authentication\n start_auth_response = self.__start_authentication()\n if start_auth_response.result.idp_redirect_url:\n # External IDP Flow, ignore the mechanisms and just open a browser\n self.__perform_idp_authentication(start_auth_response, profile, interactive)\n return\n\n # Check if password is part of the first challenges list and if so, answer it directly\n current_challenge_idx = 0\n for mechanism in start_auth_response.result.challenges[current_challenge_idx].mechanisms:\n if mechanism.name.lower() == 'up':\n current_challenge_idx += 1\n # Password, answer it\n if not self.__password:\n if not interactive:\n raise ArkAuthException('No password and not interactive, cannot continue')\n answers = inquirer.prompt(\n [inquirer.Password('answer', message='Identity Security Platform Secret')],\n render=ArkInquirerRender(),\n )\n if not answers:\n raise ArkAuthException('Canceled by user')\n self.__password = answers['answer']\n advance_resp = self.__advance_authentication(\n mechanism.mechanism_id, start_auth_response.result.session_id, self.__password, 'Answer'\n )\n if isinstance(advance_resp, AdvanceAuthResponse) and len(start_auth_response.result.challenges) == 1:\n # Done here, save the token\n self.__session_details = advance_resp.result\n self.__session.headers.update(\n {'Authorization': f'Bearer {advance_resp.result.auth}', **ArkIdentityFQDNResolver.default_headers()}\n )\n delta = self.__session_details.token_lifetime or DEFAULT_TOKEN_LIFETIME_SECONDS\n self.__session_exp = datetime.now() + timedelta(seconds=delta)\n if self.__cache_authentication:\n self.__save_cache(profile)\n return\n break\n\n # Pick MFA for the user\n if interactive:\n self.__pick_mechanism(start_auth_response.result.challenges[current_challenge_idx])\n\n # Handle a case where MFA type was supplied\n if self.__mfa_type and self.__mfa_type.lower() in SUPPORTED_MECHANISMS and current_challenge_idx == 1:\n for mechanism in start_auth_response.result.challenges[current_challenge_idx].mechanisms:\n if mechanism.name.lower() == self.__mfa_type.lower():\n oob_advance_resp = self.__advance_authentication(\n mechanism.mechanism_id, start_auth_response.result.session_id, '', 'StartOOB'\n )\n self.__poll_authentication(profile, mechanism, start_auth_response, oob_advance_resp, interactive)\n return\n\n if not interactive:\n raise ArkNonInteractiveException('User interaction is not supported while not interactive and mfa type given was not found')\n\n # Handle the rest of the challenges, might also handle the first challenge if no password is in the mechanisms\n for challenge in start_auth_response.result.challenges[current_challenge_idx:]:\n mechanism = self.__pick_mechanism(challenge)\n oob_advance_resp = self.__advance_authentication(mechanism.mechanism_id, start_auth_response.result.session_id, '', 'StartOOB')\n self.__poll_authentication(profile, mechanism, start_auth_response, oob_advance_resp, interactive)\n\n # pylint: disable=unused-argument\n def refresh_auth_identity(self, profile: Optional[ArkProfile] = None, interactive: bool = False, force: bool = False) -> None:\n \"\"\"\n Performs a token refresh with the object's existing details.\n\n Args:\n profile (Optional[ArkProfile]): The profile to load from the cache, if available\n force (bool): Determines whether to ignore cache and force authentication (defaults to false)\n\n Raises:\n ArkAuthException: _description_\n \"\"\"\n from jose.jwt import get_unverified_claims\n\n if not self.__session_details.token:\n # We only refresh platform token at the moment, call the normal authentication instead\n return self.auth_identity(profile, interactive, force)\n\n self.__logger.debug('Attempting to refresh authenticate to Identity')\n self.__session = Session()\n self.__session.verify = self.__verify\n self.__session.headers.update(ArkIdentityFQDNResolver.default_headers())\n decoded_token = get_unverified_claims(self.__session_details.token)\n platform_tenant_id = decoded_token['tenant_id']\n cookies = {\n f'refreshToken-{platform_tenant_id}': self.__session_details.refresh_token,\n f'idToken-{platform_tenant_id}': self.__session_details.token,\n }\n response = self.__session.post(\n url=f'{self.__identity_url}/OAuth2/RefreshPlatformToken',\n cookies=cookies,\n )\n if response.status_code != HTTPStatus.OK:\n raise ArkAuthException('Failed to refresh token')\n new_token = response.cookies.get(f'idToken-{platform_tenant_id}')\n new_refresh_token = response.cookies.get(f'refreshToken-{platform_tenant_id}')\n if not new_token or not new_refresh_token:\n raise ArkAuthException('Failed to retrieve refresh tokens cookies')\n self.__session_details.token = new_token\n self.__session_details.refresh_token = new_refresh_token\n self.__session_details.token_lifetime = (\n datetime.fromtimestamp(get_unverified_claims(new_token)['exp'])\n - datetime.fromtimestamp(get_unverified_claims(new_token)['iat'])\n ).seconds\n delta = self.__session_details.token_lifetime or DEFAULT_TOKEN_LIFETIME_SECONDS\n self.__session_exp = datetime.now() + timedelta(seconds=delta)\n if self.__cache_authentication:\n self.__save_cache(profile)\n\n @property\n def session(self) -> Session:\n return self.__session\n\n @property\n def session_token(self) -> Optional[str]:\n if self.__session_details:\n if self.__session_details.token:\n return self.__session_details.token\n if 'auth' in self.__session_details.__dict__ and self.__session_details.auth:\n return self.__session_details.auth\n return None\n\n @property\n def session_details(self) -> Optional[AdvanceAuthResult]:\n return self.__session_details\n\n @property\n def identity_url(self) -> str:\n return self.__identity_url" }, { "identifier": "ArkIdentityServiceUser", "path": "ark_sdk_python/auth/identity/ark_identity_service_user.py", "snippet": "class ArkIdentityServiceUser:\n def __init__(\n self,\n username: str,\n token: str,\n app_name: str,\n identity_url: Optional[str] = None,\n env: Optional[AwsEnv] = None,\n logger: Optional[logging.Logger] = None,\n cache_authentication: bool = True,\n verify: Optional[Union[str, bool]] = None,\n load_cache: bool = False,\n cache_profile: Optional[ArkProfile] = None,\n ) -> None:\n self.__username = username\n self.__token = token\n self.__app_name = app_name\n self.__env = env or AwsEnv(os.getenv(DEPLOY_ENV, AwsEnv.PROD.value))\n self.__identity_url = identity_url or self.__resolve_fqdn_from_username()\n self.__logger = logger or get_logger(app=self.__class__.__name__)\n self.__keyring = ArkKeyring(self.__class__.__name__.lower()) if cache_authentication else None\n self.__cache_authentication = cache_authentication\n\n self.__session = Session()\n self.__session_token = None\n self.__session_exp = None\n self.__session.headers.update(ArkIdentityFQDNResolver.default_system_headers())\n if verify is None:\n if ArkSystemConfig.trusted_certificate() is not None:\n verify = ArkSystemConfig.trusted_certificate()\n else:\n verify = ArkSystemConfig.is_verifiying_certificates()\n self.__session.verify = verify\n if load_cache and cache_authentication and cache_profile:\n self.__load_cache(cache_profile)\n\n def __load_cache(self, profile: Optional[ArkProfile] = None) -> bool:\n if self.__keyring and profile:\n token = self.__keyring.load_token(profile, f'{self.__username}_identity_service_user')\n if token and token.username == self.__username:\n self.__session_token = token.token.get_secret_value()\n self.__session_exp = token.expires_in\n self.__session.headers.update({'Authorization': f'Bearer {self.__session_token}'})\n return True\n return False\n\n def __save_cache(self, profile: Optional[ArkProfile] = None) -> None:\n if self.__keyring and profile and self.__session_token:\n self.__session_exp = datetime.now() + timedelta(hours=4)\n self.__keyring.save_token(\n profile,\n ArkToken(\n token=self.__session_token,\n username=self.__username,\n endpoint=self.__identity_url,\n token_type=ArkTokenType.Internal,\n auth_method=ArkAuthMethod.Other,\n expires_in=self.__session_exp,\n ),\n f'{self.__username}_identity_service_user',\n )\n\n def __resolve_fqdn_from_username(self) -> str:\n tenant_suffix = self.__username[self.__username.index('@') :]\n return ArkIdentityFQDNResolver.resolve_tenant_fqdn_from_tenant_suffix(\n tenant_suffix=tenant_suffix, identity_env_url=IDENTITY_ENV_URLS[self.__env]\n )\n\n def auth_identity(self, profile: Optional[ArkProfile] = None, force: bool = False) -> None:\n \"\"\"\n Authenticates to Identity with a service user.\n This method creates an auth token and authorizes to the service.\n\n Args:\n profile (Optional[ArkProfile]): Profile to be used to load from caching, if available\n force (bool): Determines whether to discard existing cache, defaults to `False`\n\n Raises:\n ArkAuthException: _description_\n \"\"\"\n # Login to identity with the service service user\n self.__logger.info(f'Authenticating to service user via endpoint [{self.__identity_url}]')\n if self.__cache_authentication and not force and self.__load_cache(profile):\n # Check if expired\n if self.__session_exp.replace(tzinfo=None) > datetime.now():\n self.__logger.info('Loaded identity service user details from cache')\n return\n\n token_response: Response = self.__session.post(\n url=f'{self.__identity_url}/Oauth2/Token/{self.__app_name}',\n auth=HTTPBasicAuth(self.__username, self.__token),\n verify=True,\n data={'grant_type': 'client_credentials', 'scope': 'api'},\n )\n if token_response.status_code != HTTPStatus.OK:\n raise ArkAuthException('Failed logging in to identity service user')\n auth_result = json.loads(token_response.text)\n if 'access_token' not in auth_result.keys():\n raise ArkAuthException('Failed logging in to identity service user, access token not found')\n access_token = auth_result['access_token']\n\n # Authorize to the application with the service user\n params = {\n 'client_id': self.__app_name,\n 'response_type': 'id_token',\n 'scope': 'openid profile api',\n 'redirect_uri': 'https://cyberark.cloud/redirect',\n }\n self.__logger.info(f'Trying to request a platform authorization with params [{params}]')\n authorize_response = self.__session.get(\n url=f'{self.__identity_url}/OAuth2/Authorize/{self.__app_name}',\n headers={'Authorization': f'Bearer {access_token}'},\n params=params,\n allow_redirects=False,\n )\n if authorize_response.status_code != HTTPStatus.FOUND or 'Location' not in authorize_response.headers:\n raise ArkAuthException('Failed to authorize to application')\n # Parse the authorized token and return the session with it\n location_header_splitted = authorize_response.headers['Location'].split('#', 1)\n if len(location_header_splitted) != 2:\n raise ArkAuthException('Failed to parse location header to retrieve token from')\n parsed_query = parse_qs(location_header_splitted[1])\n if 'id_token' not in parsed_query or len(parsed_query['id_token']) != 1:\n raise ArkAuthException('Failed to parse id token from location header')\n self.__session_token = parsed_query['id_token'][0]\n self.__session.headers.update({'Authorization': f'Bearer {self.__session_token}', **ArkIdentityFQDNResolver.default_headers()})\n self.__session_exp = datetime.now() + timedelta(hours=4)\n self.__logger.info(\n f'Created a service user session via endpoint [{self.__identity_url}] ' f'with user [{self.__username}] to platform'\n )\n if self.__cache_authentication:\n self.__save_cache(profile)\n\n @property\n def session(self) -> Session:\n return self.__session\n\n @property\n def session_token(self) -> Optional[str]:\n return self.__session_token\n\n @property\n def identity_url(self) -> str:\n return self.__identity_url" }, { "identifier": "ArkSystemConfig", "path": "ark_sdk_python/common/ark_system_config.py", "snippet": "class ArkSystemConfig:\n _NO_COLOR = False\n _IS_INTERACTIVE = True\n _IS_CERTIFICATE_VERIFICATION = True\n _IS_ALLOWING_OUTPUT = False\n _TRUSTED_CERT = None\n\n @staticmethod\n def disable_color():\n ArkSystemConfig._NO_COLOR = True\n\n @staticmethod\n def enable_color():\n ArkSystemConfig._NO_COLOR = False\n\n @staticmethod\n def is_coloring():\n return not ArkSystemConfig._NO_COLOR\n\n @staticmethod\n def enable_interactive():\n ArkSystemConfig._IS_INTERACTIVE = True\n\n @staticmethod\n def disable_interactive():\n ArkSystemConfig._IS_INTERACTIVE = False\n\n @staticmethod\n def is_interactive():\n return ArkSystemConfig._IS_INTERACTIVE\n\n @staticmethod\n def allow_output():\n ArkSystemConfig._IS_ALLOWING_OUTPUT = True\n\n @staticmethod\n def disallow_output():\n ArkSystemConfig._IS_ALLOWING_OUTPUT = False\n\n @staticmethod\n def is_allowing_output():\n return ArkSystemConfig._IS_ALLOWING_OUTPUT\n\n @staticmethod\n def enable_verbose_logging(log_level: Optional[str] = None):\n log_level = log_level or 'DEBUG'\n os.environ[LOG_LEVEL] = log_level\n\n @staticmethod\n def disable_verbose_logging() -> None:\n os.environ[LOG_LEVEL] = 'CRITICAL'\n\n @staticmethod\n def set_logger_style(logger_style: str) -> None:\n if logger_style in ['default']:\n os.environ[LOGGER_STYLE] = logger_style\n else:\n os.environ[LOGGER_STYLE] = 'default'\n\n @staticmethod\n def enable_certificate_verification() -> None:\n ArkSystemConfig._IS_CERTIFICATE_VERIFICATION = True\n\n @staticmethod\n def disable_certificate_verification() -> None:\n ArkSystemConfig._IS_CERTIFICATE_VERIFICATION = False\n\n @staticmethod\n def is_verifiying_certificates() -> bool:\n if ARK_DISABLE_CERTIFICATE_VERIFICATION_ENV_VAR in os.environ:\n return False\n return ArkSystemConfig._IS_CERTIFICATE_VERIFICATION\n\n @staticmethod\n def set_trusted_certificate(trusted_cert: str) -> None:\n ArkSystemConfig._TRUSTED_CERT = trusted_cert\n\n @staticmethod\n def trusted_certificate() -> str:\n return ArkSystemConfig._TRUSTED_CERT" }, { "identifier": "ROOT_DOMAIN", "path": "ark_sdk_python/common/env/ark_env_mapping.py", "snippet": "ROOT_DOMAIN: Final[Dict[AwsEnv, str]] = {\n AwsEnv.PROD: 'cyberark.cloud',\n}" }, { "identifier": "AwsEnv", "path": "ark_sdk_python/common/env/ark_env_mapping.py", "snippet": "class AwsEnv(str, Enum):\n PROD = 'prod'" }, { "identifier": "ArkProfile", "path": "ark_sdk_python/models/ark_profile.py", "snippet": "class ArkProfile(ArkModel):\n profile_name: str = Field(default='ark', alias='Profile Name', description='Profile name for storage')\n profile_description: str = Field(default='Default Ark Profile', alias='Profile Description', description='Info about the profile')\n auth_profiles: Dict[str, ArkAuthProfile] = Field(\n description='Authentication profiles configurations, map from name of the authenticator to its profile', default_factory=dict\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('auth_profiles', pre=True)\n def validate_auth_profiles(cls, val):\n auth_profiles = {}\n for k, v in val.items():\n auth_profile = ArkAuthProfile.parse_obj(v)\n # Make sure that the settings are parsed with the correct class\n # Due to properties overlapping\n if 'auth_method_settings' in v:\n auth_profile.auth_method_settings = ArkAuthMethodSettingsMap[auth_profile.auth_method].parse_obj(v['auth_method_settings'])\n auth_profiles[k] = auth_profile\n return auth_profiles" }, { "identifier": "ArkAuthException", "path": "ark_sdk_python/models/ark_exceptions.py", "snippet": "class ArkAuthException(ArkException):\n pass" }, { "identifier": "ArkException", "path": "ark_sdk_python/models/ark_exceptions.py", "snippet": "class ArkException(Exception):\n pass" }, { "identifier": "ArkAuthMethod", "path": "ark_sdk_python/models/auth/ark_auth_method.py", "snippet": "class ArkAuthMethod(str, Enum):\n Identity = 'identity'\n IdentityServiceUser = 'identity_service_user'\n Direct = 'direct'\n Default = 'default'\n Other = 'other'" }, { "identifier": "ArkAuthMethodSettings", "path": "ark_sdk_python/models/auth/ark_auth_method.py", "snippet": "class ArkAuthMethodSettings(ArkModel):\n pass" }, { "identifier": "IdentityArkAuthMethodSettings", "path": "ark_sdk_python/models/auth/ark_auth_method.py", "snippet": "class IdentityArkAuthMethodSettings(ArkAuthMethodSettings):\n identity_mfa_method: Literal[('pf', 'sms', 'email', 'otp')] = Field(\n description='MFA method if mfa is needed', default='email', alias='MFA Method to use by default [pf, sms, email, otp]'\n )\n identity_mfa_interactive: bool = Field(description='Allow interactive MFA (passcodes)', alias='Allow Interactive MFA', default=True)\n identity_application: Optional[str] = Field(description='Identity application to use once logged in', alias='Identity Application')\n identity_url: Optional[str] = Field(\n description='Identity url to use for authentication instead of fqdn resolving', alias='Identity Url'\n )" }, { "identifier": "IdentityServiceUserArkAuthMethodSettings", "path": "ark_sdk_python/models/auth/ark_auth_method.py", "snippet": "class IdentityServiceUserArkAuthMethodSettings(ArkAuthMethodSettings):\n identity_authorization_application: str = Field(\n description='Identity application to authorize once logged in with the service user',\n default='__identity_cybr_user_oidc',\n alias='Service User Authorization Application',\n )" }, { "identifier": "ArkAuthProfile", "path": "ark_sdk_python/models/auth/ark_auth_profile.py", "snippet": "class ArkAuthProfile(ArkModel):\n username: Optional[str] = Field(description='Username to authenticate with', alias='Username')\n auth_method: ArkAuthMethod = Field(\n description='Authentication type to use when an authenticator supports multiple types',\n alias='Authentication Method',\n default=ArkAuthMethod.Default,\n )\n auth_method_settings: ArkAuthMethodSettings = Field(\n description='Authentication method settings used for the authenticator',\n alias='Authentication Method Settings',\n default_factory=DefaultArkAuthMethodSettings,\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('auth_method_settings', pre=True, always=True, allow_reuse=True)\n def parse_method_settings(cls, v, values):\n if 'auth_method' in values:\n return ArkAuthMethodSettingsMap[values['auth_method']].parse_obj(v)\n return parse_obj_as(ArkAuthMethodSettingsTypes, v)" }, { "identifier": "ArkSecret", "path": "ark_sdk_python/models/auth/ark_secret.py", "snippet": "class ArkSecret(ArkModel):\n secret: Optional[SecretStr] = Field(alias='Secret', description='Secret to be used')\n\n class Config:\n json_encoders = {SecretStr: lambda v: v.get_secret_value()}" }, { "identifier": "ArkToken", "path": "ark_sdk_python/models/auth/ark_token.py", "snippet": "class ArkToken(ArkModel):\n token: SecretStr = Field(description='Actual token', alias='Token')\n username: Optional[str] = Field(description='Username whos token is related to', alias='Username')\n endpoint: Optional[str] = Field(description='Endpoint associated with the token', alias='Authentication Endpoint')\n token_type: ArkTokenType = Field(description='Token type', alias='Token Type', default=ArkTokenType.JWT)\n auth_method: Optional[ArkAuthMethod] = Field(description='The authenticaton method type of this token', alias='Authentication Method')\n expires_in: Optional[datetime] = Field(description='When the token will expire', alias='Expires In')\n refresh_token: Optional[str] = Field(description='Refresh token used for refreshing the existing token', alias='Refresh Token')\n metadata: Dict[str, Any] = Field(description='Token metadata', alias='Token Metadata', default_factory=dict)\n\n class Config:\n json_encoders = {SecretStr: lambda v: v.get_secret_value()}" }, { "identifier": "ArkTokenType", "path": "ark_sdk_python/models/auth/ark_token.py", "snippet": "class ArkTokenType(str, Enum):\n JWT = 'JSON Web Token'\n Cookies = 'Cookies'\n Token = 'Token'\n Password = 'Password'\n Custom = 'Custom'\n Internal = 'Internal'" } ]
import codecs import os import pickle from datetime import datetime, timedelta from typing import Final, List, Optional, Tuple, cast from overrides import overrides from ark_sdk_python.auth.ark_auth import ArkAuth from ark_sdk_python.auth.identity.ark_identity import ArkIdentity from ark_sdk_python.auth.identity.ark_identity_service_user import ArkIdentityServiceUser from ark_sdk_python.common.ark_system_config import ArkSystemConfig from ark_sdk_python.common.env import ROOT_DOMAIN, AwsEnv from ark_sdk_python.models import ArkProfile from ark_sdk_python.models.ark_exceptions import ArkAuthException, ArkException from ark_sdk_python.models.auth import ( ArkAuthMethod, ArkAuthMethodSettings, ArkAuthProfile, ArkSecret, ArkToken, ArkTokenType, IdentityArkAuthMethodSettings, IdentityServiceUserArkAuthMethodSettings, )
11,757
# pylint: disable=unused-argument AUTH_NAME: Final[str] = 'isp' AUTH_HUMAN_READABLE_NAME: Final[str] = 'Identity Security Platform' AUTH_METHODS: Final[List[ArkAuthMethod]] = [ ArkAuthMethod.Identity, ArkAuthMethod.IdentityServiceUser, ] DEFAULT_AUTH_METHOD: Final[ArkAuthMethod] = ArkAuthMethod.Identity DEFAULT_AUTH_METHOD_SETTINGS: Final[IdentityArkAuthMethodSettings] = IdentityArkAuthMethodSettings() DEFAULT_TOKEN_LIFETIME: Final[int] = 3600
# pylint: disable=unused-argument AUTH_NAME: Final[str] = 'isp' AUTH_HUMAN_READABLE_NAME: Final[str] = 'Identity Security Platform' AUTH_METHODS: Final[List[ArkAuthMethod]] = [ ArkAuthMethod.Identity, ArkAuthMethod.IdentityServiceUser, ] DEFAULT_AUTH_METHOD: Final[ArkAuthMethod] = ArkAuthMethod.Identity DEFAULT_AUTH_METHOD_SETTINGS: Final[IdentityArkAuthMethodSettings] = IdentityArkAuthMethodSettings() DEFAULT_TOKEN_LIFETIME: Final[int] = 3600
class ArkISPAuth(ArkAuth):
0
2023-11-13 09:24:31+00:00
16k
mohenghui/detectAuto_v8
ultralytics/trackers/bot_sort.py
[ { "identifier": "TrackState", "path": "ultralytics/trackers/basetrack.py", "snippet": "class TrackState:\n \"\"\"Enumeration of possible object tracking states.\"\"\"\n\n New = 0\n Tracked = 1\n Lost = 2\n Removed = 3" }, { "identifier": "BYTETracker", "path": "ultralytics/trackers/byte_tracker.py", "snippet": "class BYTETracker:\n \"\"\"\n BYTETracker: A tracking algorithm built on top of YOLOv8 for object detection and tracking.\n\n The class is responsible for initializing, updating, and managing the tracks for detected objects in a video\n sequence. It maintains the state of tracked, lost, and removed tracks over frames, utilizes Kalman filtering for\n predicting the new object locations, and performs data association.\n\n Attributes:\n tracked_stracks (list[STrack]): List of successfully activated tracks.\n lost_stracks (list[STrack]): List of lost tracks.\n removed_stracks (list[STrack]): List of removed tracks.\n frame_id (int): The current frame ID.\n args (namespace): Command-line arguments.\n max_time_lost (int): The maximum frames for a track to be considered as 'lost'.\n kalman_filter (object): Kalman Filter object.\n\n Methods:\n update(results, img=None): Updates object tracker with new detections.\n get_kalmanfilter(): Returns a Kalman filter object for tracking bounding boxes.\n init_track(dets, scores, cls, img=None): Initialize object tracking with detections.\n get_dists(tracks, detections): Calculates the distance between tracks and detections.\n multi_predict(tracks): Predicts the location of tracks.\n reset_id(): Resets the ID counter of STrack.\n joint_stracks(tlista, tlistb): Combines two lists of stracks.\n sub_stracks(tlista, tlistb): Filters out the stracks present in the second list from the first list.\n remove_duplicate_stracks(stracksa, stracksb): Removes duplicate stracks based on IOU.\n \"\"\"\n\n def __init__(self, args, frame_rate=30):\n \"\"\"Initialize a YOLOv8 object to track objects with given arguments and frame rate.\"\"\"\n self.tracked_stracks = [] # type: list[STrack]\n self.lost_stracks = [] # type: list[STrack]\n self.removed_stracks = [] # type: list[STrack]\n\n self.frame_id = 0\n self.args = args\n self.max_time_lost = int(frame_rate / 30.0 * args.track_buffer)\n self.kalman_filter = self.get_kalmanfilter()\n self.reset_id()\n\n def update(self, results, img=None):\n \"\"\"Updates object tracker with new detections and returns tracked object bounding boxes.\"\"\"\n self.frame_id += 1\n activated_stracks = []\n refind_stracks = []\n lost_stracks = []\n removed_stracks = []\n\n scores = results.conf\n bboxes = results.xyxy\n # Add index\n bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1)\n cls = results.cls\n\n remain_inds = scores > self.args.track_high_thresh\n inds_low = scores > self.args.track_low_thresh\n inds_high = scores < self.args.track_high_thresh\n\n inds_second = np.logical_and(inds_low, inds_high)\n dets_second = bboxes[inds_second]\n dets = bboxes[remain_inds]\n scores_keep = scores[remain_inds]\n scores_second = scores[inds_second]\n cls_keep = cls[remain_inds]\n cls_second = cls[inds_second]\n\n detections = self.init_track(dets, scores_keep, cls_keep, img)\n # Add newly detected tracklets to tracked_stracks\n unconfirmed = []\n tracked_stracks = [] # type: list[STrack]\n for track in self.tracked_stracks:\n if not track.is_activated:\n unconfirmed.append(track)\n else:\n tracked_stracks.append(track)\n # Step 2: First association, with high score detection boxes\n strack_pool = self.joint_stracks(tracked_stracks, self.lost_stracks)\n # Predict the current location with KF\n self.multi_predict(strack_pool)\n if hasattr(self, 'gmc') and img is not None:\n warp = self.gmc.apply(img, dets)\n STrack.multi_gmc(strack_pool, warp)\n STrack.multi_gmc(unconfirmed, warp)\n\n dists = self.get_dists(strack_pool, detections)\n matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh)\n\n for itracked, idet in matches:\n track = strack_pool[itracked]\n det = detections[idet]\n if track.state == TrackState.Tracked:\n track.update(det, self.frame_id)\n activated_stracks.append(track)\n else:\n track.re_activate(det, self.frame_id, new_id=False)\n refind_stracks.append(track)\n # Step 3: Second association, with low score detection boxes association the untrack to the low score detections\n detections_second = self.init_track(dets_second, scores_second, cls_second, img)\n r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]\n # TODO\n dists = matching.iou_distance(r_tracked_stracks, detections_second)\n matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)\n for itracked, idet in matches:\n track = r_tracked_stracks[itracked]\n det = detections_second[idet]\n if track.state == TrackState.Tracked:\n track.update(det, self.frame_id)\n activated_stracks.append(track)\n else:\n track.re_activate(det, self.frame_id, new_id=False)\n refind_stracks.append(track)\n\n for it in u_track:\n track = r_tracked_stracks[it]\n if track.state != TrackState.Lost:\n track.mark_lost()\n lost_stracks.append(track)\n # Deal with unconfirmed tracks, usually tracks with only one beginning frame\n detections = [detections[i] for i in u_detection]\n dists = self.get_dists(unconfirmed, detections)\n matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)\n for itracked, idet in matches:\n unconfirmed[itracked].update(detections[idet], self.frame_id)\n activated_stracks.append(unconfirmed[itracked])\n for it in u_unconfirmed:\n track = unconfirmed[it]\n track.mark_removed()\n removed_stracks.append(track)\n # Step 4: Init new stracks\n for inew in u_detection:\n track = detections[inew]\n if track.score < self.args.new_track_thresh:\n continue\n track.activate(self.kalman_filter, self.frame_id)\n activated_stracks.append(track)\n # Step 5: Update state\n for track in self.lost_stracks:\n if self.frame_id - track.end_frame > self.max_time_lost:\n track.mark_removed()\n removed_stracks.append(track)\n\n self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]\n self.tracked_stracks = self.joint_stracks(self.tracked_stracks, activated_stracks)\n self.tracked_stracks = self.joint_stracks(self.tracked_stracks, refind_stracks)\n self.lost_stracks = self.sub_stracks(self.lost_stracks, self.tracked_stracks)\n self.lost_stracks.extend(lost_stracks)\n self.lost_stracks = self.sub_stracks(self.lost_stracks, self.removed_stracks)\n self.tracked_stracks, self.lost_stracks = self.remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)\n self.removed_stracks.extend(removed_stracks)\n if len(self.removed_stracks) > 1000:\n self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum\n return np.asarray(\n [x.tlbr.tolist() + [x.track_id, x.score, x.cls, x.idx] for x in self.tracked_stracks if x.is_activated],\n dtype=np.float32)\n\n def get_kalmanfilter(self):\n \"\"\"Returns a Kalman filter object for tracking bounding boxes.\"\"\"\n return KalmanFilterXYAH()\n\n def init_track(self, dets, scores, cls, img=None):\n \"\"\"Initialize object tracking with detections and scores using STrack algorithm.\"\"\"\n return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections\n\n def get_dists(self, tracks, detections):\n \"\"\"Calculates the distance between tracks and detections using IOU and fuses scores.\"\"\"\n dists = matching.iou_distance(tracks, detections)\n # TODO: mot20\n # if not self.args.mot20:\n dists = matching.fuse_score(dists, detections)\n return dists\n\n def multi_predict(self, tracks):\n \"\"\"Returns the predicted tracks using the YOLOv8 network.\"\"\"\n STrack.multi_predict(tracks)\n\n def reset_id(self):\n \"\"\"Resets the ID counter of STrack.\"\"\"\n STrack.reset_id()\n\n def reset(self):\n \"\"\"Reset tracker.\"\"\"\n self.tracked_stracks = [] # type: list[STrack]\n self.lost_stracks = [] # type: list[STrack]\n self.removed_stracks = [] # type: list[STrack]\n self.frame_id = 0\n self.kalman_filter = self.get_kalmanfilter()\n self.reset_id()\n\n @staticmethod\n def joint_stracks(tlista, tlistb):\n \"\"\"Combine two lists of stracks into a single one.\"\"\"\n exists = {}\n res = []\n for t in tlista:\n exists[t.track_id] = 1\n res.append(t)\n for t in tlistb:\n tid = t.track_id\n if not exists.get(tid, 0):\n exists[tid] = 1\n res.append(t)\n return res\n\n @staticmethod\n def sub_stracks(tlista, tlistb):\n \"\"\"DEPRECATED CODE in https://github.com/ultralytics/ultralytics/pull/1890/\n stracks = {t.track_id: t for t in tlista}\n for t in tlistb:\n tid = t.track_id\n if stracks.get(tid, 0):\n del stracks[tid]\n return list(stracks.values())\n \"\"\"\n track_ids_b = {t.track_id for t in tlistb}\n return [t for t in tlista if t.track_id not in track_ids_b]\n\n @staticmethod\n def remove_duplicate_stracks(stracksa, stracksb):\n \"\"\"Remove duplicate stracks with non-maximum IOU distance.\"\"\"\n pdist = matching.iou_distance(stracksa, stracksb)\n pairs = np.where(pdist < 0.15)\n dupa, dupb = [], []\n for p, q in zip(*pairs):\n timep = stracksa[p].frame_id - stracksa[p].start_frame\n timeq = stracksb[q].frame_id - stracksb[q].start_frame\n if timep > timeq:\n dupb.append(q)\n else:\n dupa.append(p)\n resa = [t for i, t in enumerate(stracksa) if i not in dupa]\n resb = [t for i, t in enumerate(stracksb) if i not in dupb]\n return resa, resb" }, { "identifier": "STrack", "path": "ultralytics/trackers/byte_tracker.py", "snippet": "class STrack(BaseTrack):\n \"\"\"\n Single object tracking representation that uses Kalman filtering for state estimation.\n\n This class is responsible for storing all the information regarding individual tracklets and performs state updates\n and predictions based on Kalman filter.\n\n Attributes:\n shared_kalman (KalmanFilterXYAH): Shared Kalman filter that is used across all STrack instances for prediction.\n _tlwh (np.ndarray): Private attribute to store top-left corner coordinates and width and height of bounding box.\n kalman_filter (KalmanFilterXYAH): Instance of Kalman filter used for this particular object track.\n mean (np.ndarray): Mean state estimate vector.\n covariance (np.ndarray): Covariance of state estimate.\n is_activated (bool): Boolean flag indicating if the track has been activated.\n score (float): Confidence score of the track.\n tracklet_len (int): Length of the tracklet.\n cls (any): Class label for the object.\n idx (int): Index or identifier for the object.\n frame_id (int): Current frame ID.\n start_frame (int): Frame where the object was first detected.\n\n Methods:\n predict(): Predict the next state of the object using Kalman filter.\n multi_predict(stracks): Predict the next states for multiple tracks.\n multi_gmc(stracks, H): Update multiple track states using a homography matrix.\n activate(kalman_filter, frame_id): Activate a new tracklet.\n re_activate(new_track, frame_id, new_id): Reactivate a previously lost tracklet.\n update(new_track, frame_id): Update the state of a matched track.\n convert_coords(tlwh): Convert bounding box to x-y-angle-height format.\n tlwh_to_xyah(tlwh): Convert tlwh bounding box to xyah format.\n tlbr_to_tlwh(tlbr): Convert tlbr bounding box to tlwh format.\n tlwh_to_tlbr(tlwh): Convert tlwh bounding box to tlbr format.\n \"\"\"\n\n shared_kalman = KalmanFilterXYAH()\n\n def __init__(self, tlwh, score, cls):\n \"\"\"Initialize new STrack instance.\"\"\"\n self._tlwh = np.asarray(self.tlbr_to_tlwh(tlwh[:-1]), dtype=np.float32)\n self.kalman_filter = None\n self.mean, self.covariance = None, None\n self.is_activated = False\n\n self.score = score\n self.tracklet_len = 0\n self.cls = cls\n self.idx = tlwh[-1]\n\n def predict(self):\n \"\"\"Predicts mean and covariance using Kalman filter.\"\"\"\n mean_state = self.mean.copy()\n if self.state != TrackState.Tracked:\n mean_state[7] = 0\n self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)\n\n @staticmethod\n def multi_predict(stracks):\n \"\"\"Perform multi-object predictive tracking using Kalman filter for given stracks.\"\"\"\n if len(stracks) <= 0:\n return\n multi_mean = np.asarray([st.mean.copy() for st in stracks])\n multi_covariance = np.asarray([st.covariance for st in stracks])\n for i, st in enumerate(stracks):\n if st.state != TrackState.Tracked:\n multi_mean[i][7] = 0\n multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)\n for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):\n stracks[i].mean = mean\n stracks[i].covariance = cov\n\n @staticmethod\n def multi_gmc(stracks, H=np.eye(2, 3)):\n \"\"\"Update state tracks positions and covariances using a homography matrix.\"\"\"\n if len(stracks) > 0:\n multi_mean = np.asarray([st.mean.copy() for st in stracks])\n multi_covariance = np.asarray([st.covariance for st in stracks])\n\n R = H[:2, :2]\n R8x8 = np.kron(np.eye(4, dtype=float), R)\n t = H[:2, 2]\n\n for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):\n mean = R8x8.dot(mean)\n mean[:2] += t\n cov = R8x8.dot(cov).dot(R8x8.transpose())\n\n stracks[i].mean = mean\n stracks[i].covariance = cov\n\n def activate(self, kalman_filter, frame_id):\n \"\"\"Start a new tracklet.\"\"\"\n self.kalman_filter = kalman_filter\n self.track_id = self.next_id()\n self.mean, self.covariance = self.kalman_filter.initiate(self.convert_coords(self._tlwh))\n\n self.tracklet_len = 0\n self.state = TrackState.Tracked\n if frame_id == 1:\n self.is_activated = True\n self.frame_id = frame_id\n self.start_frame = frame_id\n\n def re_activate(self, new_track, frame_id, new_id=False):\n \"\"\"Reactivates a previously lost track with a new detection.\"\"\"\n self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance,\n self.convert_coords(new_track.tlwh))\n self.tracklet_len = 0\n self.state = TrackState.Tracked\n self.is_activated = True\n self.frame_id = frame_id\n if new_id:\n self.track_id = self.next_id()\n self.score = new_track.score\n self.cls = new_track.cls\n self.idx = new_track.idx\n\n def update(self, new_track, frame_id):\n \"\"\"\n Update the state of a matched track.\n\n Args:\n new_track (STrack): The new track containing updated information.\n frame_id (int): The ID of the current frame.\n \"\"\"\n self.frame_id = frame_id\n self.tracklet_len += 1\n\n new_tlwh = new_track.tlwh\n self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance,\n self.convert_coords(new_tlwh))\n self.state = TrackState.Tracked\n self.is_activated = True\n\n self.score = new_track.score\n self.cls = new_track.cls\n self.idx = new_track.idx\n\n def convert_coords(self, tlwh):\n \"\"\"Convert a bounding box's top-left-width-height format to its x-y-angle-height equivalent.\"\"\"\n return self.tlwh_to_xyah(tlwh)\n\n @property\n def tlwh(self):\n \"\"\"Get current position in bounding box format (top left x, top left y, width, height).\"\"\"\n if self.mean is None:\n return self._tlwh.copy()\n ret = self.mean[:4].copy()\n ret[2] *= ret[3]\n ret[:2] -= ret[2:] / 2\n return ret\n\n @property\n def tlbr(self):\n \"\"\"Convert bounding box to format (min x, min y, max x, max y), i.e., (top left, bottom right).\"\"\"\n ret = self.tlwh.copy()\n ret[2:] += ret[:2]\n return ret\n\n @staticmethod\n def tlwh_to_xyah(tlwh):\n \"\"\"Convert bounding box to format (center x, center y, aspect ratio, height), where the aspect ratio is width /\n height.\n \"\"\"\n ret = np.asarray(tlwh).copy()\n ret[:2] += ret[2:] / 2\n ret[2] /= ret[3]\n return ret\n\n @staticmethod\n def tlbr_to_tlwh(tlbr):\n \"\"\"Converts top-left bottom-right format to top-left width height format.\"\"\"\n ret = np.asarray(tlbr).copy()\n ret[2:] -= ret[:2]\n return ret\n\n @staticmethod\n def tlwh_to_tlbr(tlwh):\n \"\"\"Converts tlwh bounding box format to tlbr format.\"\"\"\n ret = np.asarray(tlwh).copy()\n ret[2:] += ret[:2]\n return ret\n\n def __repr__(self):\n \"\"\"Return a string representation of the BYTETracker object with start and end frames and track ID.\"\"\"\n return f'OT_{self.track_id}_({self.start_frame}-{self.end_frame})'" }, { "identifier": "matching", "path": "ultralytics/trackers/utils/matching.py", "snippet": "def linear_assignment(cost_matrix, thresh, use_lap=True):\ndef iou_distance(atracks, btracks):\ndef embedding_distance(tracks, detections, metric='cosine'):\ndef fuse_score(cost_matrix, detections):" }, { "identifier": "GMC", "path": "ultralytics/trackers/utils/gmc.py", "snippet": "class GMC:\n \"\"\"\n Generalized Motion Compensation (GMC) class for tracking and object detection in video frames.\n\n This class provides methods for tracking and detecting objects based on several tracking algorithms including ORB,\n SIFT, ECC, and Sparse Optical Flow. It also supports downscaling of frames for computational efficiency.\n\n Attributes:\n method (str): The method used for tracking. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'.\n downscale (int): Factor by which to downscale the frames for processing.\n prevFrame (np.array): Stores the previous frame for tracking.\n prevKeyPoints (list): Stores the keypoints from the previous frame.\n prevDescriptors (np.array): Stores the descriptors from the previous frame.\n initializedFirstFrame (bool): Flag to indicate if the first frame has been processed.\n\n Methods:\n __init__(self, method='sparseOptFlow', downscale=2): Initializes a GMC object with the specified method\n and downscale factor.\n apply(self, raw_frame, detections=None): Applies the chosen method to a raw frame and optionally uses\n provided detections.\n applyEcc(self, raw_frame, detections=None): Applies the ECC algorithm to a raw frame.\n applyFeatures(self, raw_frame, detections=None): Applies feature-based methods like ORB or SIFT to a raw frame.\n applySparseOptFlow(self, raw_frame, detections=None): Applies the Sparse Optical Flow method to a raw frame.\n \"\"\"\n\n def __init__(self, method='sparseOptFlow', downscale=2):\n \"\"\"Initialize a video tracker with specified parameters.\"\"\"\n super().__init__()\n\n self.method = method\n self.downscale = max(1, int(downscale))\n\n if self.method == 'orb':\n self.detector = cv2.FastFeatureDetector_create(20)\n self.extractor = cv2.ORB_create()\n self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING)\n\n elif self.method == 'sift':\n self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)\n self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20)\n self.matcher = cv2.BFMatcher(cv2.NORM_L2)\n\n elif self.method == 'ecc':\n number_of_iterations = 5000\n termination_eps = 1e-6\n self.warp_mode = cv2.MOTION_EUCLIDEAN\n self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps)\n\n elif self.method == 'sparseOptFlow':\n self.feature_params = dict(maxCorners=1000,\n qualityLevel=0.01,\n minDistance=1,\n blockSize=3,\n useHarrisDetector=False,\n k=0.04)\n\n elif self.method in ['none', 'None', None]:\n self.method = None\n else:\n raise ValueError(f'Error: Unknown GMC method:{method}')\n\n self.prevFrame = None\n self.prevKeyPoints = None\n self.prevDescriptors = None\n\n self.initializedFirstFrame = False\n\n def apply(self, raw_frame, detections=None):\n \"\"\"Apply object detection on a raw frame using specified method.\"\"\"\n if self.method in ['orb', 'sift']:\n return self.applyFeatures(raw_frame, detections)\n elif self.method == 'ecc':\n return self.applyEcc(raw_frame, detections)\n elif self.method == 'sparseOptFlow':\n return self.applySparseOptFlow(raw_frame, detections)\n else:\n return np.eye(2, 3)\n\n def applyEcc(self, raw_frame, detections=None):\n \"\"\"Initialize.\"\"\"\n height, width, _ = raw_frame.shape\n frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)\n H = np.eye(2, 3, dtype=np.float32)\n\n # Downscale image (TODO: consider using pyramids)\n if self.downscale > 1.0:\n frame = cv2.GaussianBlur(frame, (3, 3), 1.5)\n frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))\n width = width // self.downscale\n height = height // self.downscale\n\n # Handle first frame\n if not self.initializedFirstFrame:\n # Initialize data\n self.prevFrame = frame.copy()\n\n # Initialization done\n self.initializedFirstFrame = True\n\n return H\n\n # Run the ECC algorithm. The results are stored in warp_matrix.\n # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria)\n try:\n (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1)\n except Exception as e:\n LOGGER.warning(f'WARNING: find transform failed. Set warp as identity {e}')\n\n return H\n\n def applyFeatures(self, raw_frame, detections=None):\n \"\"\"Initialize.\"\"\"\n height, width, _ = raw_frame.shape\n frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)\n H = np.eye(2, 3)\n\n # Downscale image (TODO: consider using pyramids)\n if self.downscale > 1.0:\n # frame = cv2.GaussianBlur(frame, (3, 3), 1.5)\n frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))\n width = width // self.downscale\n height = height // self.downscale\n\n # Find the keypoints\n mask = np.zeros_like(frame)\n # mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255\n mask[int(0.02 * height):int(0.98 * height), int(0.02 * width):int(0.98 * width)] = 255\n if detections is not None:\n for det in detections:\n tlbr = (det[:4] / self.downscale).astype(np.int_)\n mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0\n\n keypoints = self.detector.detect(frame, mask)\n\n # Compute the descriptors\n keypoints, descriptors = self.extractor.compute(frame, keypoints)\n\n # Handle first frame\n if not self.initializedFirstFrame:\n # Initialize data\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n self.prevDescriptors = copy.copy(descriptors)\n\n # Initialization done\n self.initializedFirstFrame = True\n\n return H\n\n # Match descriptors.\n knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2)\n\n # Filtered matches based on smallest spatial distance\n matches = []\n spatialDistances = []\n\n maxSpatialDistance = 0.25 * np.array([width, height])\n\n # Handle empty matches case\n if len(knnMatches) == 0:\n # Store to next iteration\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n self.prevDescriptors = copy.copy(descriptors)\n\n return H\n\n for m, n in knnMatches:\n if m.distance < 0.9 * n.distance:\n prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt\n currKeyPointLocation = keypoints[m.trainIdx].pt\n\n spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0],\n prevKeyPointLocation[1] - currKeyPointLocation[1])\n\n if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \\\n (np.abs(spatialDistance[1]) < maxSpatialDistance[1]):\n spatialDistances.append(spatialDistance)\n matches.append(m)\n\n meanSpatialDistances = np.mean(spatialDistances, 0)\n stdSpatialDistances = np.std(spatialDistances, 0)\n\n inliers = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances\n\n goodMatches = []\n prevPoints = []\n currPoints = []\n for i in range(len(matches)):\n if inliers[i, 0] and inliers[i, 1]:\n goodMatches.append(matches[i])\n prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt)\n currPoints.append(keypoints[matches[i].trainIdx].pt)\n\n prevPoints = np.array(prevPoints)\n currPoints = np.array(currPoints)\n\n # Draw the keypoint matches on the output image\n # if False:\n # import matplotlib.pyplot as plt\n # matches_img = np.hstack((self.prevFrame, frame))\n # matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR)\n # W = np.size(self.prevFrame, 1)\n # for m in goodMatches:\n # prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_)\n # curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_)\n # curr_pt[0] += W\n # color = np.random.randint(0, 255, 3)\n # color = (int(color[0]), int(color[1]), int(color[2]))\n #\n # matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA)\n # matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1)\n # matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1)\n #\n # plt.figure()\n # plt.imshow(matches_img)\n # plt.show()\n\n # Find rigid matrix\n if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):\n H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)\n\n # Handle downscale\n if self.downscale > 1.0:\n H[0, 2] *= self.downscale\n H[1, 2] *= self.downscale\n else:\n LOGGER.warning('WARNING: not enough matching points')\n\n # Store to next iteration\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n self.prevDescriptors = copy.copy(descriptors)\n\n return H\n\n def applySparseOptFlow(self, raw_frame, detections=None):\n \"\"\"Initialize.\"\"\"\n height, width, _ = raw_frame.shape\n frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)\n H = np.eye(2, 3)\n\n # Downscale image\n if self.downscale > 1.0:\n # frame = cv2.GaussianBlur(frame, (3, 3), 1.5)\n frame = cv2.resize(frame, (width // self.downscale, height // self.downscale))\n\n # Find the keypoints\n keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params)\n\n # Handle first frame\n if not self.initializedFirstFrame:\n # Initialize data\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n\n # Initialization done\n self.initializedFirstFrame = True\n\n return H\n\n # Find correspondences\n matchedKeypoints, status, err = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None)\n\n # Leave good correspondences only\n prevPoints = []\n currPoints = []\n\n for i in range(len(status)):\n if status[i]:\n prevPoints.append(self.prevKeyPoints[i])\n currPoints.append(matchedKeypoints[i])\n\n prevPoints = np.array(prevPoints)\n currPoints = np.array(currPoints)\n\n # Find rigid matrix\n if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):\n H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)\n\n # Handle downscale\n if self.downscale > 1.0:\n H[0, 2] *= self.downscale\n H[1, 2] *= self.downscale\n else:\n LOGGER.warning('WARNING: not enough matching points')\n\n # Store to next iteration\n self.prevFrame = frame.copy()\n self.prevKeyPoints = copy.copy(keypoints)\n\n return H\n\n def reset_params(self):\n \"\"\"Reset parameters.\"\"\"\n self.prevFrame = None\n self.prevKeyPoints = None\n self.prevDescriptors = None\n self.initializedFirstFrame = False" }, { "identifier": "KalmanFilterXYWH", "path": "ultralytics/trackers/utils/kalman_filter.py", "snippet": "class KalmanFilterXYWH(KalmanFilterXYAH):\n \"\"\"\n For BoT-SORT. A simple Kalman filter for tracking bounding boxes in image space.\n\n The 8-dimensional state space (x, y, w, h, vx, vy, vw, vh) contains the bounding box center position (x, y), width\n w, height h, and their respective velocities.\n\n Object motion follows a constant velocity model. The bounding box location (x, y, w, h) is taken as direct\n observation of the state space (linear observation model).\n \"\"\"\n\n def initiate(self, measurement):\n \"\"\"\n Create track from unassociated measurement.\n\n Parameters\n ----------\n measurement : ndarray\n Bounding box coordinates (x, y, w, h) with center position (x, y), width w, and height h.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the mean vector (8 dimensional) and covariance matrix (8x8 dimensional) of the new track.\n Unobserved velocities are initialized to 0 mean.\n \"\"\"\n mean_pos = measurement\n mean_vel = np.zeros_like(mean_pos)\n mean = np.r_[mean_pos, mean_vel]\n\n std = [\n 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3],\n 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3],\n 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3],\n 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3]]\n covariance = np.diag(np.square(std))\n return mean, covariance\n\n def predict(self, mean, covariance):\n \"\"\"\n Run Kalman filter prediction step.\n\n Parameters\n ----------\n mean : ndarray\n The 8 dimensional mean vector of the object state at the previous time step.\n covariance : ndarray\n The 8x8 dimensional covariance matrix of the object state at the previous time step.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are\n initialized to 0 mean.\n \"\"\"\n std_pos = [\n self._std_weight_position * mean[2], self._std_weight_position * mean[3],\n self._std_weight_position * mean[2], self._std_weight_position * mean[3]]\n std_vel = [\n self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3],\n self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3]]\n motion_cov = np.diag(np.square(np.r_[std_pos, std_vel]))\n\n mean = np.dot(mean, self._motion_mat.T)\n covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov\n\n return mean, covariance\n\n def project(self, mean, covariance):\n \"\"\"\n Project state distribution to measurement space.\n\n Parameters\n ----------\n mean : ndarray\n The state's mean vector (8 dimensional array).\n covariance : ndarray\n The state's covariance matrix (8x8 dimensional).\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the projected mean and covariance matrix of the given state estimate.\n \"\"\"\n std = [\n self._std_weight_position * mean[2], self._std_weight_position * mean[3],\n self._std_weight_position * mean[2], self._std_weight_position * mean[3]]\n innovation_cov = np.diag(np.square(std))\n\n mean = np.dot(self._update_mat, mean)\n covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T))\n return mean, covariance + innovation_cov\n\n def multi_predict(self, mean, covariance):\n \"\"\"\n Run Kalman filter prediction step (Vectorized version).\n\n Parameters\n ----------\n mean : ndarray\n The Nx8 dimensional mean matrix of the object states at the previous time step.\n covariance : ndarray\n The Nx8x8 dimensional covariance matrix of the object states at the previous time step.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the mean vector and covariance matrix of the predicted state. Unobserved velocities are\n initialized to 0 mean.\n \"\"\"\n std_pos = [\n self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3],\n self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3]]\n std_vel = [\n self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3],\n self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3]]\n sqr = np.square(np.r_[std_pos, std_vel]).T\n\n motion_cov = [np.diag(sqr[i]) for i in range(len(mean))]\n motion_cov = np.asarray(motion_cov)\n\n mean = np.dot(mean, self._motion_mat.T)\n left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2))\n covariance = np.dot(left, self._motion_mat.T) + motion_cov\n\n return mean, covariance\n\n def update(self, mean, covariance, measurement):\n \"\"\"\n Run Kalman filter correction step.\n\n Parameters\n ----------\n mean : ndarray\n The predicted state's mean vector (8 dimensional).\n covariance : ndarray\n The state's covariance matrix (8x8 dimensional).\n measurement : ndarray\n The 4 dimensional measurement vector (x, y, w, h), where (x, y) is the center position, w the width,\n and h the height of the bounding box.\n\n Returns\n -------\n (ndarray, ndarray)\n Returns the measurement-corrected state distribution.\n \"\"\"\n return super().update(mean, covariance, measurement)" } ]
from collections import deque from .basetrack import TrackState from .byte_tracker import BYTETracker, STrack from .utils import matching from .utils.gmc import GMC from .utils.kalman_filter import KalmanFilterXYWH import numpy as np
11,217
bo_track.update(new_track, frame_id) """ shared_kalman = KalmanFilterXYWH() def __init__(self, tlwh, score, cls, feat=None, feat_history=50): """Initialize YOLOv8 object with temporal parameters, such as feature history, alpha and current features.""" super().__init__(tlwh, score, cls) self.smooth_feat = None self.curr_feat = None if feat is not None: self.update_features(feat) self.features = deque([], maxlen=feat_history) self.alpha = 0.9 def update_features(self, feat): """Update features vector and smooth it using exponential moving average.""" feat /= np.linalg.norm(feat) self.curr_feat = feat if self.smooth_feat is None: self.smooth_feat = feat else: self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat self.features.append(feat) self.smooth_feat /= np.linalg.norm(self.smooth_feat) def predict(self): """Predicts the mean and covariance using Kalman filter.""" mean_state = self.mean.copy() if self.state != TrackState.Tracked: mean_state[6] = 0 mean_state[7] = 0 self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) def re_activate(self, new_track, frame_id, new_id=False): """Reactivates a track with updated features and optionally assigns a new ID.""" if new_track.curr_feat is not None: self.update_features(new_track.curr_feat) super().re_activate(new_track, frame_id, new_id) def update(self, new_track, frame_id): """Update the YOLOv8 instance with new track and frame ID.""" if new_track.curr_feat is not None: self.update_features(new_track.curr_feat) super().update(new_track, frame_id) @property def tlwh(self): """Get current position in bounding box format `(top left x, top left y, width, height)`.""" if self.mean is None: return self._tlwh.copy() ret = self.mean[:4].copy() ret[:2] -= ret[2:] / 2 return ret @staticmethod def multi_predict(stracks): """Predicts the mean and covariance of multiple object tracks using shared Kalman filter.""" if len(stracks) <= 0: return multi_mean = np.asarray([st.mean.copy() for st in stracks]) multi_covariance = np.asarray([st.covariance for st in stracks]) for i, st in enumerate(stracks): if st.state != TrackState.Tracked: multi_mean[i][6] = 0 multi_mean[i][7] = 0 multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance) for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): stracks[i].mean = mean stracks[i].covariance = cov def convert_coords(self, tlwh): """Converts Top-Left-Width-Height bounding box coordinates to X-Y-Width-Height format.""" return self.tlwh_to_xywh(tlwh) @staticmethod def tlwh_to_xywh(tlwh): """Convert bounding box to format `(center x, center y, width, height)`.""" ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 return ret class BOTSORT(BYTETracker): """ An extended version of the BYTETracker class for YOLOv8, designed for object tracking with ReID and GMC algorithm. Attributes: proximity_thresh (float): Threshold for spatial proximity (IoU) between tracks and detections. appearance_thresh (float): Threshold for appearance similarity (ReID embeddings) between tracks and detections. encoder (object): Object to handle ReID embeddings, set to None if ReID is not enabled. gmc (GMC): An instance of the GMC algorithm for data association. args (object): Parsed command-line arguments containing tracking parameters. Methods: get_kalmanfilter(): Returns an instance of KalmanFilterXYWH for object tracking. init_track(dets, scores, cls, img): Initialize track with detections, scores, and classes. get_dists(tracks, detections): Get distances between tracks and detections using IoU and (optionally) ReID. multi_predict(tracks): Predict and track multiple objects with YOLOv8 model. Usage: bot_sort = BOTSORT(args, frame_rate) bot_sort.init_track(dets, scores, cls, img) bot_sort.multi_predict(tracks) Note: The class is designed to work with the YOLOv8 object detection model and supports ReID only if enabled via args. """ def __init__(self, args, frame_rate=30): """Initialize YOLOv8 object with ReID module and GMC algorithm.""" super().__init__(args, frame_rate) # ReID module self.proximity_thresh = args.proximity_thresh self.appearance_thresh = args.appearance_thresh if args.with_reid: # Haven't supported BoT-SORT(reid) yet self.encoder = None
# Ultralytics YOLO 🚀, AGPL-3.0 license class BOTrack(STrack): """ An extended version of the STrack class for YOLOv8, adding object tracking features. Attributes: shared_kalman (KalmanFilterXYWH): A shared Kalman filter for all instances of BOTrack. smooth_feat (np.ndarray): Smoothed feature vector. curr_feat (np.ndarray): Current feature vector. features (deque): A deque to store feature vectors with a maximum length defined by `feat_history`. alpha (float): Smoothing factor for the exponential moving average of features. mean (np.ndarray): The mean state of the Kalman filter. covariance (np.ndarray): The covariance matrix of the Kalman filter. Methods: update_features(feat): Update features vector and smooth it using exponential moving average. predict(): Predicts the mean and covariance using Kalman filter. re_activate(new_track, frame_id, new_id): Reactivates a track with updated features and optionally new ID. update(new_track, frame_id): Update the YOLOv8 instance with new track and frame ID. tlwh: Property that gets the current position in tlwh format `(top left x, top left y, width, height)`. multi_predict(stracks): Predicts the mean and covariance of multiple object tracks using shared Kalman filter. convert_coords(tlwh): Converts tlwh bounding box coordinates to xywh format. tlwh_to_xywh(tlwh): Convert bounding box to xywh format `(center x, center y, width, height)`. Usage: bo_track = BOTrack(tlwh, score, cls, feat) bo_track.predict() bo_track.update(new_track, frame_id) """ shared_kalman = KalmanFilterXYWH() def __init__(self, tlwh, score, cls, feat=None, feat_history=50): """Initialize YOLOv8 object with temporal parameters, such as feature history, alpha and current features.""" super().__init__(tlwh, score, cls) self.smooth_feat = None self.curr_feat = None if feat is not None: self.update_features(feat) self.features = deque([], maxlen=feat_history) self.alpha = 0.9 def update_features(self, feat): """Update features vector and smooth it using exponential moving average.""" feat /= np.linalg.norm(feat) self.curr_feat = feat if self.smooth_feat is None: self.smooth_feat = feat else: self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat self.features.append(feat) self.smooth_feat /= np.linalg.norm(self.smooth_feat) def predict(self): """Predicts the mean and covariance using Kalman filter.""" mean_state = self.mean.copy() if self.state != TrackState.Tracked: mean_state[6] = 0 mean_state[7] = 0 self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) def re_activate(self, new_track, frame_id, new_id=False): """Reactivates a track with updated features and optionally assigns a new ID.""" if new_track.curr_feat is not None: self.update_features(new_track.curr_feat) super().re_activate(new_track, frame_id, new_id) def update(self, new_track, frame_id): """Update the YOLOv8 instance with new track and frame ID.""" if new_track.curr_feat is not None: self.update_features(new_track.curr_feat) super().update(new_track, frame_id) @property def tlwh(self): """Get current position in bounding box format `(top left x, top left y, width, height)`.""" if self.mean is None: return self._tlwh.copy() ret = self.mean[:4].copy() ret[:2] -= ret[2:] / 2 return ret @staticmethod def multi_predict(stracks): """Predicts the mean and covariance of multiple object tracks using shared Kalman filter.""" if len(stracks) <= 0: return multi_mean = np.asarray([st.mean.copy() for st in stracks]) multi_covariance = np.asarray([st.covariance for st in stracks]) for i, st in enumerate(stracks): if st.state != TrackState.Tracked: multi_mean[i][6] = 0 multi_mean[i][7] = 0 multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance) for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): stracks[i].mean = mean stracks[i].covariance = cov def convert_coords(self, tlwh): """Converts Top-Left-Width-Height bounding box coordinates to X-Y-Width-Height format.""" return self.tlwh_to_xywh(tlwh) @staticmethod def tlwh_to_xywh(tlwh): """Convert bounding box to format `(center x, center y, width, height)`.""" ret = np.asarray(tlwh).copy() ret[:2] += ret[2:] / 2 return ret class BOTSORT(BYTETracker): """ An extended version of the BYTETracker class for YOLOv8, designed for object tracking with ReID and GMC algorithm. Attributes: proximity_thresh (float): Threshold for spatial proximity (IoU) between tracks and detections. appearance_thresh (float): Threshold for appearance similarity (ReID embeddings) between tracks and detections. encoder (object): Object to handle ReID embeddings, set to None if ReID is not enabled. gmc (GMC): An instance of the GMC algorithm for data association. args (object): Parsed command-line arguments containing tracking parameters. Methods: get_kalmanfilter(): Returns an instance of KalmanFilterXYWH for object tracking. init_track(dets, scores, cls, img): Initialize track with detections, scores, and classes. get_dists(tracks, detections): Get distances between tracks and detections using IoU and (optionally) ReID. multi_predict(tracks): Predict and track multiple objects with YOLOv8 model. Usage: bot_sort = BOTSORT(args, frame_rate) bot_sort.init_track(dets, scores, cls, img) bot_sort.multi_predict(tracks) Note: The class is designed to work with the YOLOv8 object detection model and supports ReID only if enabled via args. """ def __init__(self, args, frame_rate=30): """Initialize YOLOv8 object with ReID module and GMC algorithm.""" super().__init__(args, frame_rate) # ReID module self.proximity_thresh = args.proximity_thresh self.appearance_thresh = args.appearance_thresh if args.with_reid: # Haven't supported BoT-SORT(reid) yet self.encoder = None
self.gmc = GMC(method=args.gmc_method)
4
2023-11-16 12:49:59+00:00
16k
i-super/Saleor
saleor/graphql/product/tests/test_category.py
[ { "identifier": "CustomJsonEncoder", "path": "saleor/core/utils/json_serializer.py", "snippet": "class CustomJsonEncoder(DjangoJSONEncoder):\n def default(self, obj):\n if isinstance(obj, Money):\n return {\"_type\": MONEY_TYPE, \"amount\": obj.amount, \"currency\": obj.currency}\n # Mirror implementation of django_measurement.MeasurementField.value_to_string\n if isinstance(obj, Weight):\n return f\"{obj.value}:{obj.unit}\"\n return super().default(obj)" }, { "identifier": "ProductErrorCode", "path": "saleor/product/error_codes.py", "snippet": "class ProductErrorCode(Enum):\n ALREADY_EXISTS = \"already_exists\"\n ATTRIBUTE_ALREADY_ASSIGNED = \"attribute_already_assigned\"\n ATTRIBUTE_CANNOT_BE_ASSIGNED = \"attribute_cannot_be_assigned\"\n ATTRIBUTE_VARIANTS_DISABLED = \"attribute_variants_disabled\"\n MEDIA_ALREADY_ASSIGNED = \"media_already_assigned\"\n DUPLICATED_INPUT_ITEM = \"duplicated_input_item\"\n GRAPHQL_ERROR = \"graphql_error\"\n INVALID = \"invalid\"\n INVALID_PRICE = \"invalid_price\"\n PRODUCT_WITHOUT_CATEGORY = \"product_without_category\"\n NOT_PRODUCTS_IMAGE = \"not_products_image\"\n NOT_PRODUCTS_VARIANT = \"not_products_variant\"\n NOT_FOUND = \"not_found\"\n REQUIRED = \"required\"\n UNIQUE = \"unique\"\n VARIANT_NO_DIGITAL_CONTENT = \"variant_no_digital_content\"\n CANNOT_MANAGE_PRODUCT_WITHOUT_VARIANT = \"cannot_manage_product_without_variant\"\n PRODUCT_NOT_ASSIGNED_TO_CHANNEL = \"product_not_assigned_to_channel\"\n UNSUPPORTED_MEDIA_PROVIDER = \"unsupported_media_provider\"\n PREORDER_VARIANT_CANNOT_BE_DEACTIVATED = \"preorder_variant_cannot_be_deactivated\"" }, { "identifier": "Category", "path": "saleor/product/models.py", "snippet": "class Category(ModelWithMetadata, MPTTModel, SeoModel):\n name = models.CharField(max_length=250)\n slug = models.SlugField(max_length=255, unique=True, allow_unicode=True)\n description = SanitizedJSONField(blank=True, null=True, sanitizer=clean_editor_js)\n description_plaintext = TextField(blank=True)\n updated_at = models.DateTimeField(auto_now=True, blank=True, null=True)\n parent = models.ForeignKey(\n \"self\", null=True, blank=True, related_name=\"children\", on_delete=models.CASCADE\n )\n background_image = models.ImageField(\n upload_to=\"category-backgrounds\", blank=True, null=True\n )\n background_image_alt = models.CharField(max_length=128, blank=True)\n\n objects = models.Manager()\n tree = TreeManager() # type: ignore[django-manager-missing]\n\n class Meta:\n indexes = [\n *ModelWithMetadata.Meta.indexes,\n GinIndex(\n name=\"category_search_name_slug_gin\",\n # `opclasses` and `fields` should be the same length\n fields=[\"name\", \"slug\", \"description_plaintext\"],\n opclasses=[\"gin_trgm_ops\"] * 3,\n ),\n BTreeIndex(fields=[\"updated_at\"], name=\"updated_at_idx\"),\n ]\n\n def __str__(self) -> str:\n return self.name" }, { "identifier": "Product", "path": "saleor/product/models.py", "snippet": "class Product(SeoModel, ModelWithMetadata, ModelWithExternalReference):\n product_type = models.ForeignKey(\n ProductType, related_name=\"products\", on_delete=models.CASCADE\n )\n name = models.CharField(max_length=250)\n slug = models.SlugField(max_length=255, unique=True, allow_unicode=True)\n description = SanitizedJSONField(blank=True, null=True, sanitizer=clean_editor_js)\n description_plaintext = TextField(blank=True)\n search_document = models.TextField(blank=True, default=\"\")\n search_vector = SearchVectorField(blank=True, null=True)\n search_index_dirty = models.BooleanField(default=False, db_index=True)\n\n category = models.ForeignKey(\n Category,\n related_name=\"products\",\n on_delete=models.SET_NULL,\n null=True,\n blank=True,\n )\n created_at = models.DateTimeField(auto_now_add=True, db_index=True)\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n weight = MeasurementField(\n measurement=Weight,\n unit_choices=WeightUnits.CHOICES,\n blank=True,\n null=True,\n )\n default_variant = models.OneToOneField(\n \"ProductVariant\",\n blank=True,\n null=True,\n on_delete=models.SET_NULL,\n related_name=\"+\",\n )\n rating = models.FloatField(null=True, blank=True)\n tax_class = models.ForeignKey(\n TaxClass,\n related_name=\"products\",\n blank=True,\n null=True,\n on_delete=models.SET_NULL,\n )\n\n objects = managers.ProductManager()\n\n class Meta:\n app_label = \"product\"\n ordering = (\"slug\",)\n permissions = (\n (ProductPermissions.MANAGE_PRODUCTS.codename, \"Manage products.\"),\n )\n indexes = [\n GinIndex(\n name=\"product_search_gin\",\n fields=[\"search_document\"],\n opclasses=[\"gin_trgm_ops\"],\n ),\n GinIndex(\n name=\"product_tsearch\",\n fields=[\"search_vector\"],\n ),\n GinIndex(\n name=\"product_gin\",\n fields=[\"name\", \"slug\"],\n opclasses=[\"gin_trgm_ops\"] * 2,\n ),\n ]\n indexes.extend(ModelWithMetadata.Meta.indexes)\n\n def __iter__(self):\n if not hasattr(self, \"__variants\"):\n setattr(self, \"__variants\", self.variants.all())\n return iter(getattr(self, \"__variants\"))\n\n def __repr__(self) -> str:\n class_ = type(self)\n return f\"<{class_.__module__}.{class_.__name__}(pk={self.pk!r}, name={self.name!r})>\"\n\n def __str__(self) -> str:\n return self.name\n\n def get_first_image(self):\n all_media = self.media.all()\n images = [media for media in all_media if media.type == ProductMediaTypes.IMAGE]\n return images[0] if images else None\n\n @staticmethod\n def sort_by_attribute_fields() -> list:\n return [\"concatenated_values_order\", \"concatenated_values\", \"name\"]" }, { "identifier": "ProductChannelListing", "path": "saleor/product/models.py", "snippet": "class ProductChannelListing(PublishableModel):\n product = models.ForeignKey(\n Product,\n null=False,\n blank=False,\n related_name=\"channel_listings\",\n on_delete=models.CASCADE,\n )\n channel = models.ForeignKey(\n Channel,\n null=False,\n blank=False,\n related_name=\"product_listings\",\n on_delete=models.CASCADE,\n )\n visible_in_listings = models.BooleanField(default=False)\n available_for_purchase_at = models.DateTimeField(blank=True, null=True)\n currency = models.CharField(max_length=settings.DEFAULT_CURRENCY_CODE_LENGTH)\n discounted_price_amount = models.DecimalField(\n max_digits=settings.DEFAULT_MAX_DIGITS,\n decimal_places=settings.DEFAULT_DECIMAL_PLACES,\n blank=True,\n null=True,\n )\n discounted_price = MoneyField(\n amount_field=\"discounted_price_amount\", currency_field=\"currency\"\n )\n\n class Meta:\n unique_together = [[\"product\", \"channel\"]]\n ordering = (\"pk\",)\n indexes = [\n models.Index(fields=[\"published_at\"]),\n BTreeIndex(fields=[\"discounted_price_amount\"]),\n ]\n\n def is_available_for_purchase(self):\n return (\n self.available_for_purchase_at is not None\n and datetime.datetime.now(pytz.UTC) >= self.available_for_purchase_at\n )" }, { "identifier": "create_image", "path": "saleor/product/tests/utils.py", "snippet": "def create_image(image_name=\"product2\"):\n img_data = BytesIO()\n image = Image.new(\"RGB\", size=(1, 1), color=(255, 0, 0, 0))\n image.save(img_data, format=\"JPEG\")\n image = SimpleUploadedFile(image_name + \".jpg\", img_data.getvalue(), \"image/jpeg\")\n return image, image_name" }, { "identifier": "create_zip_file_with_image_ext", "path": "saleor/product/tests/utils.py", "snippet": "def create_zip_file_with_image_ext():\n file_name = \"product.jpg\"\n file_data = SimpleUploadedFile(file_name, b\"product_data\", \"application/zip\")\n return file_data, file_name" }, { "identifier": "get_product_costs_data", "path": "saleor/product/utils/costs.py", "snippet": "def get_product_costs_data(\n variant_channel_listings: Iterable[ProductVariantChannelListing],\n has_variants: bool,\n currency: str,\n) -> tuple[MoneyRange, tuple[float, float]]:\n purchase_costs_range = MoneyRange(\n start=zero_money(currency), stop=zero_money(currency)\n )\n margin = (0.0, 0.0)\n\n if not has_variants:\n return purchase_costs_range, margin\n\n costs_data = get_cost_data_from_variant_channel_listing(variant_channel_listings)\n if costs_data.costs:\n purchase_costs_range = MoneyRange(min(costs_data.costs), max(costs_data.costs))\n if costs_data.margins:\n margin = (costs_data.margins[0], costs_data.margins[-1])\n return purchase_costs_range, margin" }, { "identifier": "dummy_editorjs", "path": "saleor/tests/utils.py", "snippet": "def dummy_editorjs(text, json_format=False):\n data = {\"blocks\": [{\"data\": {\"text\": text}, \"type\": \"paragraph\"}]}\n return json.dumps(data) if json_format else data" }, { "identifier": "Thumbnail", "path": "saleor/thumbnail/models.py", "snippet": "class Thumbnail(models.Model):\n image = models.ImageField(upload_to=\"thumbnails\")\n size = models.PositiveIntegerField(validators=[validate_thumbnail_size])\n format = models.CharField(\n max_length=32, null=True, blank=True, choices=ThumbnailFormat.CHOICES\n )\n category = models.ForeignKey(\n Category,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"thumbnails\",\n )\n collection = models.ForeignKey(\n Collection,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"thumbnails\",\n )\n product_media = models.ForeignKey(\n ProductMedia,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"thumbnails\",\n )\n user = models.ForeignKey(\n User, null=True, blank=True, on_delete=models.CASCADE, related_name=\"thumbnails\"\n )\n app = models.ForeignKey(\n App, null=True, blank=True, on_delete=models.CASCADE, related_name=\"thumbnails\"\n )\n app_installation = models.ForeignKey(\n AppInstallation,\n null=True,\n blank=True,\n on_delete=models.CASCADE,\n related_name=\"thumbnails\",\n )" }, { "identifier": "WebhookEventAsyncType", "path": "saleor/webhook/event_types.py", "snippet": "class WebhookEventAsyncType:\n ANY = \"any_events\"\n\n ACCOUNT_CONFIRMATION_REQUESTED = \"account_confirmation_requested\"\n ACCOUNT_EMAIL_CHANGED = \"account_email_changed\"\n ACCOUNT_CHANGE_EMAIL_REQUESTED = \"account_change_email_requested\"\n ACCOUNT_SET_PASSWORD_REQUESTED = \"account_set_password_requested\"\n ACCOUNT_CONFIRMED = \"account_confirmed\"\n ACCOUNT_DELETE_REQUESTED = \"account_delete_requested\"\n ACCOUNT_DELETED = \"account_deleted\"\n\n ADDRESS_CREATED = \"address_created\"\n ADDRESS_UPDATED = \"address_updated\"\n ADDRESS_DELETED = \"address_deleted\"\n\n APP_INSTALLED = \"app_installed\"\n APP_UPDATED = \"app_updated\"\n APP_DELETED = \"app_deleted\"\n APP_STATUS_CHANGED = \"app_status_changed\"\n\n ATTRIBUTE_CREATED = \"attribute_created\"\n ATTRIBUTE_UPDATED = \"attribute_updated\"\n ATTRIBUTE_DELETED = \"attribute_deleted\"\n\n ATTRIBUTE_VALUE_CREATED = \"attribute_value_created\"\n ATTRIBUTE_VALUE_UPDATED = \"attribute_value_updated\"\n ATTRIBUTE_VALUE_DELETED = \"attribute_value_deleted\"\n\n CATEGORY_CREATED = \"category_created\"\n CATEGORY_UPDATED = \"category_updated\"\n CATEGORY_DELETED = \"category_deleted\"\n\n CHANNEL_CREATED = \"channel_created\"\n CHANNEL_UPDATED = \"channel_updated\"\n CHANNEL_DELETED = \"channel_deleted\"\n CHANNEL_STATUS_CHANGED = \"channel_status_changed\"\n CHANNEL_METADATA_UPDATED = \"channel_metadata_updated\"\n\n GIFT_CARD_CREATED = \"gift_card_created\"\n GIFT_CARD_UPDATED = \"gift_card_updated\"\n GIFT_CARD_DELETED = \"gift_card_deleted\"\n GIFT_CARD_SENT = \"gift_card_sent\"\n GIFT_CARD_STATUS_CHANGED = \"gift_card_status_changed\"\n GIFT_CARD_METADATA_UPDATED = \"gift_card_metadata_updated\"\n GIFT_CARD_EXPORT_COMPLETED = \"gift_card_export_completed\"\n\n MENU_CREATED = \"menu_created\"\n MENU_UPDATED = \"menu_updated\"\n MENU_DELETED = \"menu_deleted\"\n MENU_ITEM_CREATED = \"menu_item_created\"\n MENU_ITEM_UPDATED = \"menu_item_updated\"\n MENU_ITEM_DELETED = \"menu_item_deleted\"\n\n ORDER_CREATED = \"order_created\"\n ORDER_CONFIRMED = \"order_confirmed\"\n ORDER_PAID = \"order_paid\"\n ORDER_FULLY_PAID = \"order_fully_paid\"\n ORDER_REFUNDED = \"order_refunded\"\n ORDER_FULLY_REFUNDED = \"order_fully_refunded\"\n ORDER_UPDATED = \"order_updated\"\n ORDER_CANCELLED = \"order_cancelled\"\n ORDER_EXPIRED = \"order_expired\"\n ORDER_FULFILLED = \"order_fulfilled\"\n ORDER_METADATA_UPDATED = \"order_metadata_updated\"\n ORDER_BULK_CREATED = \"order_bulk_created\"\n\n FULFILLMENT_CREATED = \"fulfillment_created\"\n FULFILLMENT_CANCELED = \"fulfillment_canceled\"\n FULFILLMENT_APPROVED = \"fulfillment_approved\"\n FULFILLMENT_METADATA_UPDATED = \"fulfillment_metadata_updated\"\n FULFILLMENT_TRACKING_NUMBER_UPDATED = \"fulfillment_tracking_number_updated\"\n\n DRAFT_ORDER_CREATED = \"draft_order_created\"\n DRAFT_ORDER_UPDATED = \"draft_order_updated\"\n DRAFT_ORDER_DELETED = \"draft_order_deleted\"\n\n SALE_CREATED = \"sale_created\"\n SALE_UPDATED = \"sale_updated\"\n SALE_DELETED = \"sale_deleted\"\n SALE_TOGGLE = \"sale_toggle\"\n\n PROMOTION_CREATED = \"promotion_created\"\n PROMOTION_UPDATED = \"promotion_updated\"\n PROMOTION_DELETED = \"promotion_deleted\"\n PROMOTION_STARTED = \"promotion_started\"\n PROMOTION_ENDED = \"promotion_ended\"\n\n PROMOTION_RULE_CREATED = \"promotion_rule_created\"\n PROMOTION_RULE_UPDATED = \"promotion_rule_updated\"\n PROMOTION_RULE_DELETED = \"promotion_rule_deleted\"\n\n INVOICE_REQUESTED = \"invoice_requested\"\n INVOICE_DELETED = \"invoice_deleted\"\n INVOICE_SENT = \"invoice_sent\"\n\n CUSTOMER_CREATED = \"customer_created\"\n CUSTOMER_UPDATED = \"customer_updated\"\n CUSTOMER_DELETED = \"customer_deleted\"\n CUSTOMER_METADATA_UPDATED = \"customer_metadata_updated\"\n\n COLLECTION_CREATED = \"collection_created\"\n COLLECTION_UPDATED = \"collection_updated\"\n COLLECTION_DELETED = \"collection_deleted\"\n COLLECTION_METADATA_UPDATED = \"collection_metadata_updated\"\n\n PRODUCT_CREATED = \"product_created\"\n PRODUCT_UPDATED = \"product_updated\"\n PRODUCT_DELETED = \"product_deleted\"\n PRODUCT_METADATA_UPDATED = \"product_metadata_updated\"\n PRODUCT_EXPORT_COMPLETED = \"product_export_completed\"\n\n PRODUCT_MEDIA_CREATED = \"product_media_created\"\n PRODUCT_MEDIA_UPDATED = \"product_media_updated\"\n PRODUCT_MEDIA_DELETED = \"product_media_deleted\"\n\n PRODUCT_VARIANT_CREATED = \"product_variant_created\"\n PRODUCT_VARIANT_UPDATED = \"product_variant_updated\"\n PRODUCT_VARIANT_DELETED = \"product_variant_deleted\"\n PRODUCT_VARIANT_METADATA_UPDATED = \"product_variant_metadata_updated\"\n\n PRODUCT_VARIANT_OUT_OF_STOCK = \"product_variant_out_of_stock\"\n PRODUCT_VARIANT_BACK_IN_STOCK = \"product_variant_back_in_stock\"\n PRODUCT_VARIANT_STOCK_UPDATED = \"product_variant_stock_updated\"\n\n CHECKOUT_CREATED = \"checkout_created\"\n CHECKOUT_UPDATED = \"checkout_updated\"\n CHECKOUT_FULLY_PAID = \"checkout_fully_paid\"\n CHECKOUT_METADATA_UPDATED = \"checkout_metadata_updated\"\n\n NOTIFY_USER = \"notify_user\" # deprecated\n\n PAGE_CREATED = \"page_created\"\n PAGE_UPDATED = \"page_updated\"\n PAGE_DELETED = \"page_deleted\"\n\n PAGE_TYPE_CREATED = \"page_type_created\"\n PAGE_TYPE_UPDATED = \"page_type_updated\"\n PAGE_TYPE_DELETED = \"page_type_deleted\"\n\n PERMISSION_GROUP_CREATED = \"permission_group_created\"\n PERMISSION_GROUP_UPDATED = \"permission_group_updated\"\n PERMISSION_GROUP_DELETED = \"permission_group_deleted\"\n\n SHIPPING_PRICE_CREATED = \"shipping_price_created\"\n SHIPPING_PRICE_UPDATED = \"shipping_price_updated\"\n SHIPPING_PRICE_DELETED = \"shipping_price_deleted\"\n\n SHIPPING_ZONE_CREATED = \"shipping_zone_created\"\n SHIPPING_ZONE_UPDATED = \"shipping_zone_updated\"\n SHIPPING_ZONE_DELETED = \"shipping_zone_deleted\"\n SHIPPING_ZONE_METADATA_UPDATED = \"shipping_zone_metadata_updated\"\n\n STAFF_CREATED = \"staff_created\"\n STAFF_UPDATED = \"staff_updated\"\n STAFF_DELETED = \"staff_deleted\"\n STAFF_SET_PASSWORD_REQUESTED = \"staff_set_password_requested\"\n\n TRANSACTION_ITEM_METADATA_UPDATED = \"transaction_item_metadata_updated\"\n\n TRANSLATION_CREATED = \"translation_created\"\n TRANSLATION_UPDATED = \"translation_updated\"\n\n WAREHOUSE_CREATED = \"warehouse_created\"\n WAREHOUSE_UPDATED = \"warehouse_updated\"\n WAREHOUSE_DELETED = \"warehouse_deleted\"\n WAREHOUSE_METADATA_UPDATED = \"warehouse_metadata_updated\"\n\n VOUCHER_CREATED = \"voucher_created\"\n VOUCHER_UPDATED = \"voucher_updated\"\n VOUCHER_DELETED = \"voucher_deleted\"\n VOUCHER_METADATA_UPDATED = \"voucher_metadata_updated\"\n VOUCHER_CODE_EXPORT_COMPLETED = \"voucher_code_export_completed\"\n\n OBSERVABILITY = \"observability\"\n\n THUMBNAIL_CREATED = \"thumbnail_created\"\n\n SHOP_METADATA_UPDATED = \"shop_metadata_updated\"\n\n EVENT_MAP: dict[str, dict[str, Any]] = {\n ACCOUNT_CONFIRMATION_REQUESTED: {\n \"name\": \"Account confirmation requested\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_CHANGE_EMAIL_REQUESTED: {\n \"name\": \"Account change email requested\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_EMAIL_CHANGED: {\n \"name\": \"Account email changed\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_SET_PASSWORD_REQUESTED: {\n \"name\": \"Account set password requested\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_CONFIRMED: {\n \"name\": \"Account confirmed\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_DELETE_REQUESTED: {\n \"name\": \"Account delete requested\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ACCOUNT_DELETED: {\n \"name\": \"Account delete confirmed\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ADDRESS_CREATED: {\n \"name\": \"Address created\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ADDRESS_UPDATED: {\n \"name\": \"Address updated\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n ADDRESS_DELETED: {\n \"name\": \"Address deleted\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n APP_INSTALLED: {\n \"name\": \"App created\",\n \"permission\": AppPermission.MANAGE_APPS,\n },\n APP_UPDATED: {\n \"name\": \"App updated\",\n \"permission\": AppPermission.MANAGE_APPS,\n },\n APP_DELETED: {\n \"name\": \"App deleted\",\n \"permission\": AppPermission.MANAGE_APPS,\n },\n APP_STATUS_CHANGED: {\n \"name\": \"App status changed\",\n \"permission\": AppPermission.MANAGE_APPS,\n },\n ATTRIBUTE_CREATED: {\n \"name\": \"Attribute created\",\n \"permission\": None,\n },\n ATTRIBUTE_UPDATED: {\n \"name\": \"Attribute updated\",\n \"permission\": None,\n },\n ATTRIBUTE_DELETED: {\n \"name\": \"Attribute deleted\",\n \"permission\": None,\n },\n ATTRIBUTE_VALUE_CREATED: {\n \"name\": \"Attribute value created\",\n \"permission\": None,\n },\n ATTRIBUTE_VALUE_UPDATED: {\n \"name\": \"Attribute value updated\",\n \"permission\": None,\n },\n ATTRIBUTE_VALUE_DELETED: {\n \"name\": \"Attribute value deleted\",\n \"permission\": None,\n },\n CATEGORY_CREATED: {\n \"name\": \"Category created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n CATEGORY_UPDATED: {\n \"name\": \"Category updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n CATEGORY_DELETED: {\n \"name\": \"Category deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n CHANNEL_CREATED: {\n \"name\": \"Channel created\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n CHANNEL_UPDATED: {\n \"name\": \"Channel updated\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n CHANNEL_DELETED: {\n \"name\": \"Channel deleted\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n CHANNEL_STATUS_CHANGED: {\n \"name\": \"Channel status changed\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n CHANNEL_METADATA_UPDATED: {\n \"name\": \"Channel metadata updated\",\n \"permission\": ChannelPermissions.MANAGE_CHANNELS,\n },\n GIFT_CARD_CREATED: {\n \"name\": \"Gift card created\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_UPDATED: {\n \"name\": \"Gift card updated\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_DELETED: {\n \"name\": \"Gift card deleted\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_SENT: {\n \"name\": \"Gift card sent\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_STATUS_CHANGED: {\n \"name\": \"Gift card status changed\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_METADATA_UPDATED: {\n \"name\": \"Gift card metadata updated\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n GIFT_CARD_EXPORT_COMPLETED: {\n \"name\": \"Gift card export completed\",\n \"permission\": GiftcardPermissions.MANAGE_GIFT_CARD,\n },\n MENU_CREATED: {\n \"name\": \"Menu created\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_UPDATED: {\n \"name\": \"Menu updated\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_DELETED: {\n \"name\": \"Menu deleted\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_ITEM_CREATED: {\n \"name\": \"Menu item created\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_ITEM_UPDATED: {\n \"name\": \"Menu item updated\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n MENU_ITEM_DELETED: {\n \"name\": \"Menu item deleted\",\n \"permission\": MenuPermissions.MANAGE_MENUS,\n },\n ORDER_CREATED: {\n \"name\": \"Order created\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_CONFIRMED: {\n \"name\": \"Order confirmed\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_PAID: {\n \"name\": \"Order paid\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_FULLY_PAID: {\n \"name\": \"Order fully paid\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_REFUNDED: {\n \"name\": \"Order refunded\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_FULLY_REFUNDED: {\n \"name\": \"Order fully refunded\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_UPDATED: {\n \"name\": \"Order updated\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_CANCELLED: {\n \"name\": \"Order cancelled\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_EXPIRED: {\n \"name\": \"Order expired\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_FULFILLED: {\n \"name\": \"Order fulfilled\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_METADATA_UPDATED: {\n \"name\": \"Order metadata updated\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n ORDER_BULK_CREATED: {\n \"name\": \"Order bulk created\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_CREATED: {\n \"name\": \"Fulfillment created\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_CANCELED: {\n \"name\": \"Fulfillment cancelled\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_APPROVED: {\n \"name\": \"Fulfillment approved\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_METADATA_UPDATED: {\n \"name\": \"Fulfillment metadata updated\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n FULFILLMENT_TRACKING_NUMBER_UPDATED: {\n \"name\": \"Fulfillment tracking number updated.\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n DRAFT_ORDER_CREATED: {\n \"name\": \"Draft order created\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n DRAFT_ORDER_UPDATED: {\n \"name\": \"Draft order updated\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n DRAFT_ORDER_DELETED: {\n \"name\": \"Draft order deleted\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n SALE_CREATED: {\n \"name\": \"Sale created\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n SALE_UPDATED: {\n \"name\": \"Sale updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n SALE_DELETED: {\n \"name\": \"Sale deleted\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n SALE_TOGGLE: {\n \"name\": \"Sale toggle\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_CREATED: {\n \"name\": \"Promotion created\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_UPDATED: {\n \"name\": \"Promotion updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_DELETED: {\n \"name\": \"Promotion deleted\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_STARTED: {\n \"name\": \"Promotion started\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_ENDED: {\n \"name\": \"Promotion ended\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_RULE_CREATED: {\n \"name\": \"Promotion rule created\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_RULE_UPDATED: {\n \"name\": \"Promotion rule updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n PROMOTION_RULE_DELETED: {\n \"name\": \"Promotion rule deleted\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n INVOICE_REQUESTED: {\n \"name\": \"Invoice requested\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n INVOICE_DELETED: {\n \"name\": \"Invoice deleted\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n INVOICE_SENT: {\n \"name\": \"Invoice sent\",\n \"permission\": OrderPermissions.MANAGE_ORDERS,\n },\n CUSTOMER_CREATED: {\n \"name\": \"Customer created\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n CUSTOMER_UPDATED: {\n \"name\": \"Customer updated\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n CUSTOMER_DELETED: {\n \"name\": \"Customer deleted\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n CUSTOMER_METADATA_UPDATED: {\n \"name\": \"Customer metadata updated\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n COLLECTION_CREATED: {\n \"name\": \"Collection created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n COLLECTION_UPDATED: {\n \"name\": \"Collection updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n COLLECTION_DELETED: {\n \"name\": \"Collection deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n COLLECTION_METADATA_UPDATED: {\n \"name\": \"Collection metadata updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_CREATED: {\n \"name\": \"Product created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_UPDATED: {\n \"name\": \"Product updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_DELETED: {\n \"name\": \"Product deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_METADATA_UPDATED: {\n \"name\": \"Product metadata updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_EXPORT_COMPLETED: {\n \"name\": \"Product export completed\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_MEDIA_CREATED: {\n \"name\": \"Product media created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_MEDIA_UPDATED: {\n \"name\": \"Product media updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_MEDIA_DELETED: {\n \"name\": \"Product media deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_CREATED: {\n \"name\": \"Product variant created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_UPDATED: {\n \"name\": \"Product variant updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_DELETED: {\n \"name\": \"Product variant deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_METADATA_UPDATED: {\n \"name\": \"Product variant metadata updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_OUT_OF_STOCK: {\n \"name\": \"Product variant stock changed\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_BACK_IN_STOCK: {\n \"name\": \"Product variant back in stock\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n PRODUCT_VARIANT_STOCK_UPDATED: {\n \"name\": \"Product variant stock updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n CHECKOUT_CREATED: {\n \"name\": \"Checkout created\",\n \"permission\": CheckoutPermissions.MANAGE_CHECKOUTS,\n },\n CHECKOUT_UPDATED: {\n \"name\": \"Checkout updated\",\n \"permission\": CheckoutPermissions.MANAGE_CHECKOUTS,\n },\n CHECKOUT_FULLY_PAID: {\n \"name\": \"Checkout fully paid\",\n \"permission\": CheckoutPermissions.MANAGE_CHECKOUTS,\n },\n CHECKOUT_METADATA_UPDATED: {\n \"name\": \"Checkout metadata updated\",\n \"permission\": CheckoutPermissions.MANAGE_CHECKOUTS,\n },\n NOTIFY_USER: {\n \"name\": \"Notify user\",\n \"permission\": AccountPermissions.MANAGE_USERS,\n },\n PAGE_CREATED: {\n \"name\": \"Page created\",\n \"permission\": PagePermissions.MANAGE_PAGES,\n },\n PAGE_UPDATED: {\n \"name\": \"Page updated\",\n \"permission\": PagePermissions.MANAGE_PAGES,\n },\n PAGE_DELETED: {\n \"name\": \"Page deleted\",\n \"permission\": PagePermissions.MANAGE_PAGES,\n },\n PAGE_TYPE_CREATED: {\n \"name\": \"Page type created\",\n \"permission\": PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,\n },\n PAGE_TYPE_UPDATED: {\n \"name\": \"Page type updated\",\n \"permission\": PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,\n },\n PAGE_TYPE_DELETED: {\n \"name\": \"Page type deleted\",\n \"permission\": PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,\n },\n PERMISSION_GROUP_CREATED: {\n \"name\": \"Permission group created\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n PERMISSION_GROUP_UPDATED: {\n \"name\": \"Permission group updated\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n PERMISSION_GROUP_DELETED: {\n \"name\": \"Permission group deleted\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n SHIPPING_PRICE_CREATED: {\n \"name\": \"Shipping price created\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_PRICE_UPDATED: {\n \"name\": \"Shipping price updated\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_PRICE_DELETED: {\n \"name\": \"Shipping price deleted\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_ZONE_CREATED: {\n \"name\": \"Shipping zone created\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_ZONE_UPDATED: {\n \"name\": \"Shipping zone updated\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_ZONE_DELETED: {\n \"name\": \"Shipping zone deleted\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n SHIPPING_ZONE_METADATA_UPDATED: {\n \"name\": \"Shipping zone metadata updated\",\n \"permission\": ShippingPermissions.MANAGE_SHIPPING,\n },\n STAFF_CREATED: {\n \"name\": \"Staff created\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n STAFF_UPDATED: {\n \"name\": \"Staff updated\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n STAFF_DELETED: {\n \"name\": \"Staff deleted\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n STAFF_SET_PASSWORD_REQUESTED: {\n \"name\": \"Setting a password for a staff is requested\",\n \"permission\": AccountPermissions.MANAGE_STAFF,\n },\n TRANSACTION_ITEM_METADATA_UPDATED: {\n \"name\": \"Transaction item metadata updated\",\n \"permission\": PaymentPermissions.HANDLE_PAYMENTS,\n },\n TRANSLATION_CREATED: {\n \"name\": \"Translation created\",\n \"permission\": SitePermissions.MANAGE_TRANSLATIONS,\n },\n TRANSLATION_UPDATED: {\n \"name\": \"Translation updated\",\n \"permission\": SitePermissions.MANAGE_TRANSLATIONS,\n },\n WAREHOUSE_CREATED: {\n \"name\": \"Warehouse created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n WAREHOUSE_UPDATED: {\n \"name\": \"Warehouse updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n WAREHOUSE_DELETED: {\n \"name\": \"Warehouse deleted\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n WAREHOUSE_METADATA_UPDATED: {\n \"name\": \"Warehouse metadata updated\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n VOUCHER_CREATED: {\n \"name\": \"Voucher created\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n VOUCHER_UPDATED: {\n \"name\": \"Voucher updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n VOUCHER_DELETED: {\n \"name\": \"Voucher deleted\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n VOUCHER_METADATA_UPDATED: {\n \"name\": \"Voucher metadata updated\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n VOUCHER_CODE_EXPORT_COMPLETED: {\n \"name\": \"Voucher code export completed\",\n \"permission\": DiscountPermissions.MANAGE_DISCOUNTS,\n },\n OBSERVABILITY: {\n \"name\": \"Observability\",\n \"permission\": AppPermission.MANAGE_OBSERVABILITY,\n },\n THUMBNAIL_CREATED: {\n \"name\": \"Thumbnail created\",\n \"permission\": ProductPermissions.MANAGE_PRODUCTS,\n },\n SHOP_METADATA_UPDATED: {\n \"name\": \"Shop metadata updated\",\n \"permission\": SitePermissions.MANAGE_SETTINGS,\n },\n }\n\n CHOICES = [\n (ANY, \"Any events\"),\n ] + [\n (event_name, event_data[\"name\"]) for event_name, event_data in EVENT_MAP.items()\n ]\n PERMISSIONS: dict[str, Optional[BasePermissionEnum]] = {\n event_name: event_data[\"permission\"]\n for event_name, event_data in EVENT_MAP.items()\n }\n\n ALL = [event[0] for event in CHOICES]" }, { "identifier": "generate_meta", "path": "saleor/webhook/payloads.py", "snippet": "def generate_meta(*, requestor_data: dict[str, Any], camel_case=False, **kwargs):\n meta_result = {\n \"issued_at\": timezone.now().isoformat(),\n \"version\": __version__,\n \"issuing_principal\": requestor_data,\n }\n\n meta_result.update(kwargs)\n\n if camel_case:\n meta = {}\n for key, value in meta_result.items():\n meta[to_camel_case(key)] = value\n else:\n meta = meta_result\n\n return meta" }, { "identifier": "generate_requestor", "path": "saleor/webhook/payloads.py", "snippet": "def generate_requestor(requestor: Optional[\"RequestorOrLazyObject\"] = None):\n if not requestor:\n return {\"id\": None, \"type\": None}\n if isinstance(requestor, User):\n return {\"id\": graphene.Node.to_global_id(\"User\", requestor.id), \"type\": \"user\"}\n return {\"id\": requestor.name, \"type\": \"app\"} # type: ignore" }, { "identifier": "ThumbnailFormatEnum", "path": "saleor/graphql/core/enums.py", "snippet": "class OrderDirection(graphene.Enum):\nclass ReportingPeriod(graphene.Enum):\nclass ErrorPolicy:\n ASC = \"\"\n DESC = \"-\"\n TODAY = \"TODAY\"\n THIS_MONTH = \"THIS_MONTH\"\n IGNORE_FAILED = \"ignore_failed\"\n REJECT_EVERYTHING = \"reject_everything\"\n REJECT_FAILED_ROWS = \"reject_failed_rows\"\n CHOICES = [\n (IGNORE_FAILED, \"Ignore failed\"),\n (REJECT_EVERYTHING, \"Reject everything\"),\n (REJECT_FAILED_ROWS, \"Reject failed rows\"),\n ]\n def description(self):\ndef to_enum(enum_cls, *, type_name=None, **options) -> graphene.Enum:\ndef error_policy_enum_description(enum):" }, { "identifier": "get_graphql_content", "path": "saleor/graphql/tests/utils.py", "snippet": "def get_graphql_content(response, *, ignore_errors: bool = False):\n \"\"\"Extract GraphQL content from the API response.\n\n Optionally ignore protocol-level errors, eg. schema errors or lack of\n permissions.\n \"\"\"\n content = get_graphql_content_from_response(response)\n if not ignore_errors:\n assert \"errors\" not in content, content[\"errors\"]\n return content" }, { "identifier": "get_graphql_content_from_response", "path": "saleor/graphql/tests/utils.py", "snippet": "def get_graphql_content_from_response(response):\n return json.loads(response.content.decode(\"utf8\"))" }, { "identifier": "get_multipart_request_body", "path": "saleor/graphql/tests/utils.py", "snippet": "def get_multipart_request_body(query, variables, file, file_name):\n \"\"\"Create request body for multipart GraphQL requests.\n\n Multipart requests are different from standard GraphQL requests, because\n of additional 'operations' and 'map' keys.\n \"\"\"\n return {\n \"operations\": json.dumps(\n {\"query\": query, \"variables\": variables}, cls=DjangoJSONEncoder\n ),\n \"map\": json.dumps({file_name: [\"variables.file\"]}, cls=DjangoJSONEncoder),\n file_name: file,\n }" } ]
import json import os import graphene import pytest from unittest.mock import MagicMock, Mock, patch from django.core.files import File from django.utils import timezone from django.utils.functional import SimpleLazyObject from django.utils.text import slugify from freezegun import freeze_time from graphql_relay import to_global_id from ....core.utils.json_serializer import CustomJsonEncoder from ....product.error_codes import ProductErrorCode from ....product.models import Category, Product, ProductChannelListing from ....product.tests.utils import create_image, create_zip_file_with_image_ext from ....product.utils.costs import get_product_costs_data from ....tests.utils import dummy_editorjs from ....thumbnail.models import Thumbnail from ....webhook.event_types import WebhookEventAsyncType from ....webhook.payloads import generate_meta, generate_requestor from ...core.enums import ThumbnailFormatEnum from ...tests.utils import ( get_graphql_content, get_graphql_content_from_response, get_multipart_request_body, )
11,693
): # given category = Category.objects.first() product_list[0].channel_listings.all().update(visible_in_listings=False) product_count = Product.objects.count() variables = { "id": graphene.Node.to_global_id("Category", category.pk), "channel": channel_USD.slug, } # when response = app_api_client.post_graphql(QUERY_CATEGORY, variables=variables) # then content = get_graphql_content(response, ignore_errors=True) assert ( len(content["data"]["category"]["products"]["edges"]) == product_count - 1 ) # invisible doesn't count def test_query_category_product_only_visible_in_listings_as_app_with_perm( app_api_client, product_list, permission_manage_products ): # given app_api_client.app.permissions.add(permission_manage_products) category = Category.objects.first() product_list[0].channel_listings.all().update(visible_in_listings=False) product_count = Product.objects.count() variables = {"id": graphene.Node.to_global_id("Category", category.pk)} # when response = app_api_client.post_graphql(QUERY_CATEGORY, variables=variables) # then content = get_graphql_content(response, ignore_errors=True) assert len(content["data"]["category"]["products"]["edges"]) == product_count CATEGORY_CREATE_MUTATION = """ mutation( $name: String, $slug: String, $description: JSONString, $backgroundImage: Upload, $backgroundImageAlt: String, $parentId: ID, $metadata: [MetadataInput!], $privateMetadata: [MetadataInput!]) { categoryCreate( input: { name: $name slug: $slug description: $description backgroundImage: $backgroundImage backgroundImageAlt: $backgroundImageAlt metadata: $metadata privateMetadata: $privateMetadata }, parent: $parentId ) { category { id name slug description parent { name id } backgroundImage{ alt } metadata { key value } privateMetadata { key value } } errors { field code message } } } """ def test_category_create_mutation( monkeypatch, staff_api_client, permission_manage_products, media_root ): # given staff_api_client.user.user_permissions.add(permission_manage_products) category_name = "Test category" description = "description" category_slug = slugify(category_name) category_description = dummy_editorjs(description, True) image_file, image_name = create_image() image_alt = "Alt text for an image." metadata_key = "md key" metadata_value = "md value" # test creating root category variables = { "name": category_name, "description": category_description, "backgroundImage": image_name, "backgroundImageAlt": image_alt, "slug": category_slug, "metadata": [{"key": metadata_key, "value": metadata_value}], "privateMetadata": [{"key": metadata_key, "value": metadata_value}], }
QUERY_CATEGORY = """ query ($id: ID, $slug: String, $channel: String){ category( id: $id, slug: $slug, ) { id name ancestors(first: 20) { edges { node { name } } } children(first: 20) { edges { node { name } } } products(first: 10, channel: $channel) { edges { node { id } } } } } """ def test_category_query_by_id(user_api_client, product, channel_USD): category = Category.objects.first() variables = { "id": graphene.Node.to_global_id("Category", category.pk), "channel": channel_USD.slug, } response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables) content = get_graphql_content(response) category_data = content["data"]["category"] assert category_data is not None assert category_data["name"] == category.name assert len(category_data["ancestors"]["edges"]) == category.get_ancestors().count() assert len(category_data["children"]["edges"]) == category.get_children().count() def test_category_query_invalid_id(user_api_client, product, channel_USD): category_id = "'" variables = { "id": category_id, "channel": channel_USD.slug, } response = user_api_client.post_graphql(QUERY_CATEGORY, variables) content = get_graphql_content_from_response(response) assert len(content["errors"]) == 1 assert ( content["errors"][0]["message"] == f"Invalid ID: {category_id}. Expected: Category." ) assert content["data"]["category"] is None def test_category_query_object_with_given_id_does_not_exist( user_api_client, product, channel_USD ): category_id = graphene.Node.to_global_id("Category", -1) variables = { "id": category_id, "channel": channel_USD.slug, } response = user_api_client.post_graphql(QUERY_CATEGORY, variables) content = get_graphql_content(response) assert content["data"]["category"] is None def test_category_query_object_with_invalid_object_type( user_api_client, product, channel_USD ): category = Category.objects.first() category_id = graphene.Node.to_global_id("Product", category.pk) variables = { "id": category_id, "channel": channel_USD.slug, } response = user_api_client.post_graphql(QUERY_CATEGORY, variables) content = get_graphql_content(response) assert content["data"]["category"] is None def test_category_query_doesnt_show_not_available_products( user_api_client, product, channel_USD ): category = Category.objects.first() variant = product.variants.get() # Set product as not visible due to lack of price. variant.channel_listings.update(price_amount=None) variables = { "id": graphene.Node.to_global_id("Category", category.pk), "channel": channel_USD.slug, } response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables) content = get_graphql_content(response) category_data = content["data"]["category"] assert category_data is not None assert category_data["name"] == category.name assert not category_data["products"]["edges"] def test_category_query_description(user_api_client, product, channel_USD): category = Category.objects.first() description = dummy_editorjs("Test description.", json_format=True) category.description = dummy_editorjs("Test description.") category.save() variables = { "id": graphene.Node.to_global_id("Category", category.pk), "channel": channel_USD.slug, } query = """ query ($id: ID, $slug: String){ category( id: $id, slug: $slug, ) { id name description descriptionJson } } """ response = user_api_client.post_graphql(query, variables=variables) content = get_graphql_content(response) category_data = content["data"]["category"] assert category_data["description"] == description assert category_data["descriptionJson"] == description def test_category_query_without_description(user_api_client, product, channel_USD): category = Category.objects.first() category.save() variables = { "id": graphene.Node.to_global_id("Category", category.pk), "channel": channel_USD.slug, } query = """ query ($id: ID, $slug: String){ category( id: $id, slug: $slug, ) { id name description descriptionJson } } """ response = user_api_client.post_graphql(query, variables=variables) content = get_graphql_content(response) category_data = content["data"]["category"] assert category_data["description"] is None assert category_data["descriptionJson"] == "{}" def test_category_query_by_slug(user_api_client, product, channel_USD): category = Category.objects.first() variables = {"slug": category.slug, "channel": channel_USD.slug} response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables) content = get_graphql_content(response) category_data = content["data"]["category"] assert category_data is not None assert category_data["name"] == category.name assert len(category_data["ancestors"]["edges"]) == category.get_ancestors().count() assert len(category_data["children"]["edges"]) == category.get_children().count() def test_category_query_error_when_id_and_slug_provided( user_api_client, product, graphql_log_handler, channel_USD ): category = Category.objects.first() variables = { "id": graphene.Node.to_global_id("Category", category.pk), "slug": category.slug, "channel": channel_USD.slug, } response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables) assert graphql_log_handler.messages == [ "saleor.graphql.errors.handled[INFO].GraphQLError" ] content = get_graphql_content(response, ignore_errors=True) assert len(content["errors"]) == 1 def test_category_query_error_when_no_param( user_api_client, product, graphql_log_handler ): variables = {} response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables) assert graphql_log_handler.messages == [ "saleor.graphql.errors.handled[INFO].GraphQLError" ] content = get_graphql_content(response, ignore_errors=True) assert len(content["errors"]) == 1 def test_query_category_product_only_visible_in_listings_as_customer( user_api_client, product_list, channel_USD ): # given category = Category.objects.first() product_list[0].channel_listings.all().update(visible_in_listings=False) product_count = Product.objects.count() variables = { "id": graphene.Node.to_global_id("Category", category.pk), "channel": channel_USD.slug, } # when response = user_api_client.post_graphql(QUERY_CATEGORY, variables=variables) # then content = get_graphql_content(response, ignore_errors=True) assert len(content["data"]["category"]["products"]["edges"]) == product_count - 1 def test_query_category_product_visible_in_listings_as_staff_without_manage_products( staff_api_client, product_list, channel_USD ): # given category = Category.objects.first() product_list[0].channel_listings.all().update(visible_in_listings=False) product_count = Product.objects.count() variables = { "id": graphene.Node.to_global_id("Category", category.pk), "channel": channel_USD.slug, } # when response = staff_api_client.post_graphql(QUERY_CATEGORY, variables=variables) # then content = get_graphql_content(response, ignore_errors=True) assert ( len(content["data"]["category"]["products"]["edges"]) == product_count - 1 ) # invisible doesn't count def test_query_category_product_only_visible_in_listings_as_staff_with_perm( staff_api_client, product_list, permission_manage_products ): # given staff_api_client.user.user_permissions.add(permission_manage_products) category = Category.objects.first() product_list[0].channel_listings.all().update(visible_in_listings=False) product_count = Product.objects.count() variables = {"id": graphene.Node.to_global_id("Category", category.pk)} # when response = staff_api_client.post_graphql(QUERY_CATEGORY, variables=variables) # then content = get_graphql_content(response, ignore_errors=True) assert len(content["data"]["category"]["products"]["edges"]) == product_count def test_query_category_product_only_visible_in_listings_as_app_without_manage_products( app_api_client, product_list, channel_USD ): # given category = Category.objects.first() product_list[0].channel_listings.all().update(visible_in_listings=False) product_count = Product.objects.count() variables = { "id": graphene.Node.to_global_id("Category", category.pk), "channel": channel_USD.slug, } # when response = app_api_client.post_graphql(QUERY_CATEGORY, variables=variables) # then content = get_graphql_content(response, ignore_errors=True) assert ( len(content["data"]["category"]["products"]["edges"]) == product_count - 1 ) # invisible doesn't count def test_query_category_product_only_visible_in_listings_as_app_with_perm( app_api_client, product_list, permission_manage_products ): # given app_api_client.app.permissions.add(permission_manage_products) category = Category.objects.first() product_list[0].channel_listings.all().update(visible_in_listings=False) product_count = Product.objects.count() variables = {"id": graphene.Node.to_global_id("Category", category.pk)} # when response = app_api_client.post_graphql(QUERY_CATEGORY, variables=variables) # then content = get_graphql_content(response, ignore_errors=True) assert len(content["data"]["category"]["products"]["edges"]) == product_count CATEGORY_CREATE_MUTATION = """ mutation( $name: String, $slug: String, $description: JSONString, $backgroundImage: Upload, $backgroundImageAlt: String, $parentId: ID, $metadata: [MetadataInput!], $privateMetadata: [MetadataInput!]) { categoryCreate( input: { name: $name slug: $slug description: $description backgroundImage: $backgroundImage backgroundImageAlt: $backgroundImageAlt metadata: $metadata privateMetadata: $privateMetadata }, parent: $parentId ) { category { id name slug description parent { name id } backgroundImage{ alt } metadata { key value } privateMetadata { key value } } errors { field code message } } } """ def test_category_create_mutation( monkeypatch, staff_api_client, permission_manage_products, media_root ): # given staff_api_client.user.user_permissions.add(permission_manage_products) category_name = "Test category" description = "description" category_slug = slugify(category_name) category_description = dummy_editorjs(description, True) image_file, image_name = create_image() image_alt = "Alt text for an image." metadata_key = "md key" metadata_value = "md value" # test creating root category variables = { "name": category_name, "description": category_description, "backgroundImage": image_name, "backgroundImageAlt": image_alt, "slug": category_slug, "metadata": [{"key": metadata_key, "value": metadata_value}], "privateMetadata": [{"key": metadata_key, "value": metadata_value}], }
body = get_multipart_request_body(
16
2023-11-13 05:00:35+00:00
16k
Aues6uen11Z/Zafkiel
tests/test.py
[ { "identifier": "logger", "path": "zafkiel/logger.py", "snippet": "" }, { "identifier": "Config", "path": "zafkiel/config.py", "snippet": "class Config:\n ST = Settings\n ST.CVSTRATEGY = [\"mstpl\", \"sift\"]\n ST.THRESHOLD = 0.8\n\n GAME_PATH = None\n SERVER_LANG = 'cn'\n\n # Top, left and bottom boundary pixel values when running in a bordered program\n # The value on my Win10 computer, may not accurate for everyone.\n BORDER = (32, 3, 2)" }, { "identifier": "API", "path": "zafkiel/device/api.py", "snippet": "class API:\n \"\"\"\n Device Setup APIs\n \"\"\"\n\n @staticmethod\n def init_device(platform=\"Android\", uuid=None, **kwargs):\n return init_device(platform, uuid, **kwargs)\n\n @staticmethod\n def connect_device(uri):\n return connect_device(uri)\n\n @staticmethod\n def device():\n return device()\n\n @staticmethod\n def set_current(idx):\n set_current(idx)\n\n @staticmethod\n def auto_setup(\n basedir: str = None,\n devices: list = None,\n firing_time: int = 30,\n logdir: bool = None,\n project_root: str = None,\n compress: int = None\n ):\n \"\"\"\n Auto setup running env and try to connect device if no device is connected.\n\n Args:\n basedir: basedir of script, __file__ is also acceptable.\n devices: connect_device uri in list.\n firing_time: Game starts taking time, this value should be set larger in old machine.\n logdir: log dir for script report, default is None for no log, set to ``True`` for ``<basedir>/log``.\n project_root: Project root dir for `using` api.\n compress: The compression rate of the screenshot image, integer in range [1, 99], default is 10\n\n Examples:\n auto_setup(__file__)\n auto_setup(__file__, devices=[\"Android://127.0.0.1:5037/SJE5T17B17\"],\n ... logdir=True, project_root=r\"D:\\\\test\\\\logs\", compress=90)\n \"\"\"\n if basedir:\n if os.path.isfile(basedir):\n basedir = os.path.dirname(basedir)\n if basedir not in G.BASEDIR:\n G.BASEDIR.append(basedir)\n if devices:\n startup_time = Timer(firing_time).start()\n for dev in devices:\n while not startup_time.reached():\n try:\n connect_device(dev)\n break\n except ElementNotFoundError:\n time.sleep(3)\n if startup_time.reached():\n raise NotRunningError(dev)\n if logdir:\n logdir = script_log_dir(basedir, logdir)\n set_logdir(logdir)\n if project_root:\n ST.PROJECT_ROOT = project_root\n if compress:\n ST.SNAPSHOT_QUALITY = compress\n\n \"\"\"\n Device Operations\n \"\"\"\n\n @staticmethod\n def app_is_running() -> bool:\n \"\"\"\n Platforms:\n Windows\n\n Returns:\n Whether app is running\n \"\"\"\n return G.DEVICE.app_is_running()\n\n @staticmethod\n def stop_app(package=None):\n \"\"\"\n Stop the target application on device\n\n Return:\n Has the Windows application stopped, on Android and iOS no return.\n\n Platforms:\n Android, iOS, Windows\n\n Example:\n stop_app(\"com.netease.cloudmusic\")\n stop_app() # only test on Windows\n \"\"\"\n return G.DEVICE.stop_app(package)\n\n @staticmethod\n @logwrap\n def touch(\n v: Template or tuple,\n times: int = 1,\n blind: bool = False,\n interval: float = 0.05,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the touch action on the device screen\n\n Args:\n v: Target to touch, either a ``ImageTemplate`` instance or absolute coordinates (x, y).\n times: How many touches to be performed\n blind: Whether to recognize Template, sometimes we only need to click without caring about the image.\n interval: Time interval between two touches.\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n **kwargs: Platform specific `kwargs`, please refer to corresponding docs.\n\n Returns:\n Final position to be clicked, e.g. (100, 100)\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n Click absolute coordinates:\n touch((100, 100))\n Click 2 times:\n touch((100, 100), times=2)\n Under Android and Windows platforms, you can set the click duration:\n touch((100, 100), duration=2)\n Right click(Windows):\n touch((100, 100), right_click=True)\n \"\"\"\n if isinstance(v, Template):\n if blind:\n center_pos = (v.area[2] + v.area[0]) / 2, (v.area[3] + v.area[1]) / 2\n else:\n center_pos = loop_find(v, timeout=ST.FIND_TIMEOUT, cls=cls, ocr_mode=ocr_mode)\n\n h = v.height * v.ratio()\n w = v.width * v.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(center_pos, h, w)\n else:\n try_log_screen()\n pos = v\n for _ in range(times):\n G.DEVICE.touch(pos, **kwargs)\n time.sleep(interval)\n delay_after_operation()\n return pos\n\n @logwrap\n def find_click(\n self,\n rec_template: Template,\n touch_template: Template = None,\n times: int = 1,\n timeout: float = 1,\n blind: bool = False,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> bool:\n \"\"\"\n Find the template image and click it or another image area.\n\n Args:\n rec_template: \"Template\" instance to be found.\n touch_template: \"ImageTemplate\" instance to be clicked, defaults to None which means click rec_template.\n times: How many touches to be performed.\n timeout: Time interval to wait for the match.\n blind: Whether to recognize Template, same as parameter of touch().\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n bool: Whether the target image appear and click it.\n \"\"\"\n try:\n pos = self.wait(rec_template, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n h = rec_template.height * rec_template.ratio()\n w = rec_template.width * rec_template.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(pos, h, w)\n except TargetNotFoundError:\n return False\n\n if touch_template:\n self.touch(touch_template, times, blind, ocr_mode=ocr_mode, cls=cls)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{touch_template.name}\")\n else:\n self.touch(pos, times)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{rec_template.name}\")\n return True\n\n @staticmethod\n @logwrap\n def exists(v: Template, timeout: float = 0, ocr_mode: int = 0, cls: Type[Ocr] = Ocr) -> bool or tuple:\n \"\"\"\n Check whether given target exists on device screen\n\n Args:\n v: target to be checked\n timeout: time limit, default is 0 which means loop_find will only search once\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n False if target is not found, otherwise returns the coordinates of the target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n if exists(ImageTemplate(r\"tpl1606822430589.png\")):\n touch(ImageTemplate(r\"tpl1606822430589.png\"))\n\n Since ``exists()`` will return the coordinates,\n we can directly click on this return value to reduce one image search:\n\n pos = exists(ImageTemplate(r\"tpl1606822430589.png\"))\n if pos:\n touch(pos)\n \"\"\"\n try:\n pos = loop_find(v, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n except TargetNotFoundError:\n return False\n else:\n return pos\n\n @staticmethod\n @logwrap\n def wait(\n v: Template,\n timeout: float = None,\n interval: float = 0.5,\n interval_func: Callable = None,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> tuple:\n \"\"\"\n Wait to match the Template on the device screen\n\n Args:\n v: target object to wait for, Template instance\n timeout: time interval to wait for the match, default is None which is ``ST.FIND_TIMEOUT``\n interval: time interval in seconds to attempt to find a match\n interval_func: called after each unsuccessful attempt to find the corresponding match\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Raises:\n TargetNotFoundError: raised if target is not found after the time limit expired\n\n Returns:\n coordinates of the matched target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n wait(Template(r\"tpl1606821804906.png\")) # timeout after ST.FIND_TIMEOUT\n # find Template every 3 seconds, timeout after 120 seconds\n wait(Template(r\"tpl1606821804906.png\"), timeout=120, interval=3)\n\n You can specify a callback function every time the search target fails::\n\n def notfound():\n print(\"No target found\")\n wait(Template(r\"tpl1607510661400.png\"), interval_func=notfound)\n \"\"\"\n if timeout is None:\n timeout = ST.FIND_TIMEOUT\n pos = loop_find(v, timeout=timeout, interval=interval, interval_func=interval_func, ocr_mode=ocr_mode, cls=cls)\n\n return pos\n\n @staticmethod\n def swipe(\n v1: Template or tuple,\n v2: Template or tuple = None,\n vector: tuple = None,\n blind1: bool = False,\n blind2: bool = False,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the swipe action on the device screen.\n\n There are two ways of assigning the parameters\n * ``swipe(v1, v2=Template(...))`` # swipe from v1 to v2\n * ``swipe(v1, vector=(x, y))`` # swipe starts at v1 and moves along the vector.\n\n Args:\n v1: the start point of swipe, either a Template instance or absolute coordinates (x, y)\n v2: the end point of swipe, either a Template instance or absolute coordinates (x, y)\n vector: a vector coordinates of swipe action, either absolute coordinates (x, y) or percentage of\n screen e.g.(0.5, 0.5)\n blind1: Whether to recognize Template1, same as parameter of touch().\n blind2: Whether to recognize Template2, same as parameter of touch().\n **kwargs: platform specific `kwargs`, please refer to corresponding docs\n\n Raises:\n general exception when not enough parameters to perform swap action have been provided\n\n Returns:\n Origin position and target position\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n swipe(Template(r\"tpl1606814865574.png\"), vector=[-0.0316, -0.3311])\n swipe((100, 100), (200, 200))\n\n Custom swiping duration and number of steps(Android and iOS)::\n\n # swiping lasts for 1 second, divided into 6 steps\n swipe((100, 100), (200, 200), duration=1, steps=6)\n \"\"\"\n if isinstance(v1, Template):\n if blind1:\n pos1 = (v1.area[2] + v1.area[0]) / 2, (v1.area[3] + v1.area[1]) / 2\n else:\n pos1 = loop_find(v1, timeout=ST.FIND_TIMEOUT)\n else:\n try_log_screen()\n pos1 = v1\n\n if v2:\n if isinstance(v2, Template):\n if blind2:\n pos2 = (v2.area[2] + v2.area[0]) / 2, (v2.area[3] + v2.area[1]) / 2\n else:\n pos2 = loop_find(v2, timeout=ST.FIND_TIMEOUT_TMP)\n else:\n pos2 = v2\n elif vector:\n if vector[0] <= 1 and vector[1] <= 1:\n w, h = G.DEVICE.get_current_resolution()\n vector = (int(vector[0] * w), int(vector[1] * h))\n pos2 = (pos1[0] + vector[0], pos1[1] + vector[1])\n else:\n raise ScriptError(\"no enough params for swipe\")\n\n G.DEVICE.swipe(pos1, pos2, **kwargs)\n delay_after_operation()\n logger.info(f\"Swipe {pos1} -> {pos2}\")\n return pos1, pos2\n\n @staticmethod\n def screenshot():\n \"\"\"\n Returns:\n Screenshot image\n \"\"\"\n return G.DEVICE.snapshot(filename=None, quality=ST.SNAPSHOT_QUALITY)\n\n @staticmethod\n def snapshot(filename=None, msg=\"\", quality=None, max_size=None):\n \"\"\"\n Returns:\n {\"screen\": filename, \"resolution\": resolution of the screen} or None\n \"\"\"\n return snapshot(filename, msg, quality, max_size)\n\n @staticmethod\n def shell(cmd):\n return shell(cmd)\n\n @staticmethod\n def start_app(package, activity=None):\n start_app(package, activity)\n\n @staticmethod\n def clear_app(package):\n clear_app(package)\n\n @staticmethod\n def install(filepath, **kwargs):\n return install(filepath, **kwargs)\n\n @staticmethod\n def uninstall(package):\n return uninstall(package)\n\n @staticmethod\n def wake():\n wake()\n\n @staticmethod\n def home():\n home()\n\n @staticmethod\n def double_click(v):\n return double_click(v)\n\n @staticmethod\n def pinch(in_or_out='in', center=None, percent=0.5):\n pinch(in_or_out, center, percent)\n\n @staticmethod\n def key_event(keyname, **kwargs):\n keyevent(keyname, **kwargs)\n\n @staticmethod\n def text(txt, enter=True, **kwargs):\n text(txt, enter, **kwargs)\n\n @staticmethod\n def sleep(secs=1.0):\n sleep(secs)\n\n @staticmethod\n def find_all(v):\n return find_all(v)\n\n @staticmethod\n def get_clipboard(*args, **kwargs):\n return get_clipboard(*args, **kwargs)\n\n @staticmethod\n def set_clipboard(content, *args, **kwargs):\n set_clipboard(content, *args, **kwargs)" }, { "identifier": "Template", "path": "zafkiel/device/template.py", "snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n target_pos: int = TargetPos.MID,\n resolution: tuple = (1280, 720),\n rgb: bool = False,\n scale_max: int = 800,\n scale_step: float = 0.005,\n template_path: str = 'templates'\n ):\n def filepath(self) -> str:\n def name(self) -> str:\n def image(self) -> ndarray:\n def height(self) -> int:\n def width(self) -> int:\n def _has_border(self) -> bool:\n def ratio(self, screen_height: float = None) -> float:\n def area(self) -> tuple:" }, { "identifier": "Timer", "path": "zafkiel/timer.py", "snippet": "class Timer:\n def __init__(self, limit, count=0):\n \"\"\"\n From https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/base/timer.py\n\n Args:\n limit (int, float): Timer limit\n count (int): Timer reach confirm count. Default to 0.\n When using a structure like this, must set a count.\n Otherwise, it goes wrong if screenshot time cost greater than limit.\n\n if self.appear(MAIN_CHECK):\n if confirm_timer.reached():\n pass\n else:\n confirm_timer.reset()\n\n Also, It's a good idea to set `count`, to make program run more stable on slow computers.\n Expected speed is 0.35 second / screenshot.\n \"\"\"\n self.limit = limit\n self.count = count\n self._current = 0\n self._reach_count = count\n\n def start(self):\n if not self.started():\n self._current = time.time()\n self._reach_count = 0\n\n return self\n\n def started(self):\n return bool(self._current)\n\n def current(self):\n \"\"\"\n Returns:\n float\n \"\"\"\n if self.started():\n return time.time() - self._current\n else:\n return 0.\n\n def set_current(self, current, count=0):\n self._current = time.time() - current\n self._reach_count = count\n\n def reached(self):\n \"\"\"\n Returns:\n bool\n \"\"\"\n self._reach_count += 1\n return time.time() - self._current > self.limit and self._reach_count > self.count\n\n def reset(self):\n self._current = time.time()\n self._reach_count = 0\n return self\n\n def clear(self):\n self._current = 0\n self._reach_count = self.count\n return self\n\n def reached_and_reset(self):\n \"\"\"\n Returns:\n bool:\n \"\"\"\n if self.reached():\n self.reset()\n return True\n else:\n return False\n\n def wait(self):\n \"\"\"\n Wait until timer reached.\n \"\"\"\n diff = self._current + self.limit - time.time()\n if diff > 0:\n time.sleep(diff)\n\n def show(self):\n logger.info(str(self))\n\n def __str__(self):\n return f'Timer(limit={round(self.current(), 3)}/{self.limit}, count={self._reach_count}/{self.count})'\n\n __repr__ = __str__" }, { "identifier": "simple_report", "path": "zafkiel/report.py", "snippet": "def simple_report(filepath, log_path=True, logfile=None, output=HTML_FILE):\n path, name = script_dir_name(filepath)\n if log_path is True:\n log_path = os.path.join(path, getattr(Config, \"LOG_DIR\", DEFAULT_LOG_DIR))\n rpt = HtmlReport(path, log_path, logfile=logfile or getattr(Config, \"LOG_FILE\", DEFAULT_LOG_FILE), script_name=name)\n rpt.report(HTML_TPL, output_file=output)" }, { "identifier": "Keyword", "path": "zafkiel/ocr/keyword.py", "snippet": "class Keyword:\n cn: str = ''\n cht: str = ''\n en: str = ''\n jp: str = ''\n # id: int # To be considered\n name: str = ''\n\n \"\"\"\n Instance attributes and methods\n TODO: Error handling for missing attributes\n \"\"\"\n\n @cached_property\n def ch(self) -> str:\n return self.cn\n\n @cached_property\n def cn_parsed(self) -> str:\n return parse_name(self.cn)\n\n @cached_property\n def en_parsed(self) -> str:\n return parse_name(self.en)\n\n @cached_property\n def jp_parsed(self) -> str:\n return parse_name(self.jp)\n\n @cached_property\n def cht_parsed(self) -> str:\n return parse_name(self.cht)\n\n def __str__(self):\n keyword_list = []\n for keyword in [self.cn, self.cht, self.en, self.jp]:\n if keyword != '':\n keyword_list.append(keyword)\n return f\"{self.__class__.__name__}({self.name})->{'/'.join(keyword_list)}\"\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return str(self) == str(other)\n\n def __hash__(self):\n return hash(self.name)\n\n def __bool__(self):\n return True\n\n def keywords_to_find(self, lang: str = None, ignore_punctuation: bool = True):\n if lang is None:\n lang = Config.SERVER_LANG\n\n # TODO: fix this refer to SRC\n if lang == 'cn':\n if ignore_punctuation:\n return [self.cn_parsed]\n else:\n return [self.cn]\n elif lang == 'en':\n if ignore_punctuation:\n return [self.en_parsed]\n else:\n return [self.en]\n elif lang == 'jp':\n if ignore_punctuation:\n return [self.jp_parsed]\n else:\n return [self.jp]\n elif lang == 'cht':\n if ignore_punctuation:\n return [self.cht_parsed]\n else:\n return [self.cht]\n else:\n if ignore_punctuation:\n return [\n self.cn_parsed,\n self.en_parsed,\n self.jp_parsed,\n self.cht_parsed,\n ]\n else:\n return [\n self.cn,\n self.en,\n self.jp,\n self.cht,\n ]\n\n \"\"\"\n Class attributes and methods\n\n Note that dataclasses inherited `Keyword` must override `instances` attribute,\n or `instances` will still be a class attribute of base class.\n ```\n @dataclass\n class DungeonNav(Keyword):\n instances: ClassVar = {}\n ```\n \"\"\"\n # Key: instance name. Value: instance object.\n instances: ClassVar = {}\n\n def __post_init__(self):\n self.__class__.instances[self.name] = self\n\n @classmethod\n def _compare(cls, name, keyword):\n return name == keyword\n\n @classmethod\n def find(cls, name, lang: str = None, ignore_punctuation: bool = True):\n \"\"\"\n Args:\n name: Name in any server or instance id.\n lang: Lang to find from. None to search the names from current server only.\n ignore_punctuation: True to remove punctuations and turn into lowercase before searching.\n\n Returns:\n Keyword instance.\n\n Raises:\n ScriptError: If nothing found.\n \"\"\"\n # Already a keyword\n if isinstance(name, Keyword):\n return name\n\n # Probably a variable name\n if isinstance(name, str) and '_' in name:\n for instance in cls.instances.values():\n if name == instance.name:\n return instance\n # Probably an in-game name\n if ignore_punctuation:\n name = parse_name(name)\n else:\n name = str(name)\n instance: Keyword\n for instance in cls.instances.values():\n for keyword in instance.keywords_to_find(\n lang=lang, ignore_punctuation=ignore_punctuation):\n if cls._compare(name, keyword):\n return instance\n\n # Not found\n raise ScriptError(f'Cannot find a {cls.__name__} instance that matches \"{name}\"')" }, { "identifier": "Ocr", "path": "zafkiel/ocr/ocr.py", "snippet": "class Ocr:\n # Merge results with box distance <= thres\n merge_thres_x = 0\n merge_thres_y = 0\n\n def __init__(self, button: ImageTemplate, lang=None, name=None):\n \"\"\"\n Args:\n button:\n lang: If None, use in-game language\n name: If None, use button.name\n \"\"\"\n if lang is None:\n lang = Config.SERVER_LANG\n if name is None:\n name = button.name\n\n self.button: ImageTemplate = button\n self.lang: str = lang\n self.name: str = name\n\n @cached_property\n def model(self) -> TextSystem:\n return OCR_MODEL.get_by_lang(self.lang)\n\n @staticmethod\n def pre_process(image):\n \"\"\"\n To be overridden.\n \"\"\"\n return image\n\n @staticmethod\n def after_process(result):\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def format_result(self, result) -> str:\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def ocr_single_line(self, image):\n # pre process\n start_time = time.time()\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n result, _ = self.model.ocr_single_line(image)\n # after proces\n result = self.after_process(result)\n result = self.format_result(result)\n\n cost_time = time.time() - start_time\n logger.debug(f'OCR <{self.name}> cost {cost_time:.2f}s: {result}')\n return result\n\n def filter_detected(self, result: BoxedResult) -> bool:\n \"\"\"\n Return False to drop result.\n To be overridden.\n \"\"\"\n return True\n\n def detect_and_ocr(self, image, direct_ocr=False) -> list[BoxedResult]:\n \"\"\"\n Args:\n image:\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n\n \"\"\"\n # pre process\n start_time = time.time()\n if not direct_ocr:\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n results: list[BoxedResult] = self.model.detect_and_ocr(image)\n # after proces\n for result in results:\n if not direct_ocr:\n result.box += self.button.area[:2]\n result.box = tuple(corner2area(result.box))\n\n results = [result for result in results if self.filter_detected(result)]\n results = merge_buttons(results, thres_x=self.merge_thres_x, thres_y=self.merge_thres_y)\n for result in results:\n result.ocr_text = self.after_process(result.ocr_text)\n\n cost_time = time.time() - start_time\n logger.debug(f\"OCR <{self.name}> cost {cost_time:.2f}s: {', '.join([result.ocr_text for result in results])}\")\n return results\n\n @staticmethod\n def _match_result(\n result: str,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True):\n \"\"\"\n Args:\n result (str):\n keyword_classes: A list of `Keyword` class or classes inherited `Keyword`\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n # Digits will be considered as the index of keyword\n if ignore_digit:\n if result.isdigit():\n return None\n\n # Try in current lang\n for keyword_class in keyword_classes:\n try:\n matched = keyword_class.find(\n result,\n lang=lang,\n ignore_punctuation=ignore_punctuation\n )\n return matched\n except ScriptError:\n continue\n\n return None\n\n def matched_single_line(\n self,\n image,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True\n ):\n \"\"\"\n Args:\n image: Image to detect\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n lang:\n ignore_punctuation:\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n result = self.ocr_single_line(image)\n\n result = self._match_result(\n result,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n )\n\n logger.debug(f'<{self.name}> matched: {str(result)}')\n return result\n\n def _product_button(\n self,\n boxed_result: BoxedResult,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True\n ) -> OcrResultButton:\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n matched_keyword = self._match_result(\n boxed_result.ocr_text,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n ignore_digit=ignore_digit,\n )\n button = OcrResultButton(boxed_result, matched_keyword)\n return button\n\n def matched_ocr(self, image, keyword_classes, direct_ocr=False) -> list[OcrResultButton]:\n \"\"\"\n Match all instances of 'keyword_classes' on the screen.\n\n Args:\n image: Screenshot\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n List of matched OcrResultButton.\n OCR result which didn't matched known keywords will be dropped.\n \"\"\"\n results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n results = [self._product_button(result, keyword_classes) for result in results]\n results = [result for result in results if result.is_keyword_matched]\n\n if results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return results\n\n def ocr_match_keyword(self, image, keyword_instance, direct_ocr=False, mode: int = OCR_EQUAL, threshold=0.75) \\\n -> list[OcrResultButton]:\n \"\"\"\n Match a specified keyword instance on the screen.\n\n Args:\n image: Screenshot\n keyword_instance: Instance of `Keyword` class or its subclass.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n mode: Match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n threshold: Similarity threshold, default 0.75, only work when mode is OCR_SIMILAR.\n\n Returns:\n List of matched OcrResultButton or empty list.\n \"\"\"\n boxed_results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n final_results = []\n for boxed_result in boxed_results:\n for keyword in keyword_instance.keywords_to_find():\n if mode == OCR_EQUAL and boxed_result.ocr_text != keyword:\n continue\n elif mode == OCR_CONTAINS and keyword not in boxed_result.ocr_text:\n continue\n elif mode == OCR_SIMILAR:\n similarity = SequenceMatcher(None, boxed_result.ocr_text, keyword).ratio()\n if similarity < threshold:\n continue\n button = OcrResultButton(boxed_result, keyword_instance)\n final_results.append(button)\n\n if final_results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in final_results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return final_results" }, { "identifier": "Digit", "path": "zafkiel/ocr/ocr.py", "snippet": "class Digit(Ocr):\n def __init__(self, button: ImageTemplate, lang='en', name=None):\n super().__init__(button, lang=lang, name=name)\n\n def format_result(self, result) -> int:\n \"\"\"\n Returns:\n int:\n \"\"\"\n result = super().after_process(result)\n # logger.attr(name=self.name, text=str(result))\n\n res = re.search(r'(\\d+)', result)\n if res:\n return int(res.group(1))\n else:\n # logger.warning(f'No digit found in {result}')\n return 0" }, { "identifier": "DigitCounter", "path": "zafkiel/ocr/ocr.py", "snippet": "class DigitCounter(Ocr):\n def __init__(self, button: ImageTemplate, lang='en', name=None):\n super().__init__(button, lang=lang, name=name)\n\n def format_result(self, result) -> tuple[int, int, int]:\n \"\"\"\n Do OCR on a counter, such as `14/15`, and returns 14, 1, 15\n\n Returns:\n int:\n \"\"\"\n result = super().after_process(result)\n # logger.attr(name=self.name, text=str(result))\n\n res = re.search(r'(\\d+)/(\\d+)', result)\n if res:\n groups = [int(s) for s in res.groups()]\n current, total = int(groups[0]), int(groups[1])\n # current = min(current, total)\n return current, total - current, total\n else:\n # logger.warning(f'No digit counter found in {result}')\n return 0, 0, 0" }, { "identifier": "Duration", "path": "zafkiel/ocr/ocr.py", "snippet": "class Duration(Ocr):\n @classmethod\n def timedelta_regex(cls, lang):\n regex_str = {\n 'cn': r'^(?P<prefix>.*?)'\n r'((?P<days>\\d{1,2})\\s*天\\s*)?'\n r'((?P<hours>\\d{1,2})\\s*小时\\s*)?'\n r'((?P<minutes>\\d{1,2})\\s*分钟\\s*)?'\n r'((?P<seconds>\\d{1,2})\\s*秒)?'\n r'(?P<suffix>[^天时钟秒]*?)$',\n 'en': r'^(?P<prefix>.*?)'\n r'((?P<days>\\d{1,2})\\s*d\\s*)?'\n r'((?P<hours>\\d{1,2})\\s*h\\s*)?'\n r'((?P<minutes>\\d{1,2})\\s*m\\s*)?'\n r'((?P<seconds>\\d{1,2})\\s*s)?'\n r'(?P<suffix>[^dhms]*?)$'\n }[lang]\n return re.compile(regex_str)\n\n def after_process(self, result):\n result = super().after_process(result)\n result = result.strip('.,。,')\n result = result.replace('Oh', '0h').replace('oh', '0h')\n return result\n\n def format_result(self, result: str) -> timedelta:\n \"\"\"\n Do OCR on a duration, such as `18d 2h 13m 30s`, `2h`, `13m 30s`, `9s`\n\n Returns:\n timedelta:\n \"\"\"\n matched = self.timedelta_regex(self.lang).search(result)\n if not matched:\n return timedelta()\n days = self._sanitize_number(matched.group('days'))\n hours = self._sanitize_number(matched.group('hours'))\n minutes = self._sanitize_number(matched.group('minutes'))\n seconds = self._sanitize_number(matched.group('seconds'))\n return timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)\n\n @staticmethod\n def _sanitize_number(number) -> int:\n if number is None:\n return 0\n return int(number)" }, { "identifier": "OcrResultButton", "path": "zafkiel/ocr/ocr.py", "snippet": "class OcrResultButton:\n def __init__(self, boxed_result: BoxedResult, matched_keyword: Optional[Keyword]):\n \"\"\"\n Args:\n boxed_result: BoxedResult from ppocr-onnx\n matched_keyword: Keyword object or None\n \"\"\"\n self.area = boxed_result.box\n self.search = area_pad(self.area, pad=-20)\n # self.button = boxed_result.box\n\n if matched_keyword is not None:\n self.matched_keyword = matched_keyword\n self.name = str(matched_keyword)\n else:\n self.matched_keyword = None\n self.name = boxed_result.ocr_text\n\n self.text = boxed_result.ocr_text\n self.score = boxed_result.score\n\n @property\n def is_keyword_matched(self) -> bool:\n return self.matched_keyword is not None\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return str(self) == str(other)\n\n def __hash__(self):\n return hash(self.name)\n\n def __bool__(self):\n return True" }, { "identifier": "Page", "path": "zafkiel/ui/page.py", "snippet": "class Page:\n \"\"\"\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/page.py\n \"\"\"\n\n # Key: str, page name like \"page_main\"\n # Value: Page, page instance\n all_pages = {}\n\n @classmethod\n def clear_connection(cls):\n for page in cls.all_pages.values():\n page.parent = None\n\n @classmethod\n def init_connection(cls, destination: Page):\n \"\"\"Initialize an A* path finding among pages.\n\n Args:\n destination:\n \"\"\"\n cls.clear_connection()\n\n visited = [destination]\n visited = set(visited)\n while True:\n new = visited.copy()\n for page in visited:\n for link in cls.iter_pages():\n if link in visited:\n continue\n if page in link.links:\n link.parent = page\n new.add(link)\n if len(new) == len(visited):\n break\n visited = new\n\n @classmethod\n def iter_pages(cls, start_page: Page = None):\n pages = list(cls.all_pages.values())\n if start_page is not None and start_page in pages:\n # Move start_page to the front of the list\n pages.remove(start_page)\n pages.insert(0, start_page)\n cls.all_pages = {page.name: page for page in pages}\n return cls.all_pages.values()\n\n @classmethod\n def iter_check_buttons(cls):\n for page in cls.all_pages.values():\n yield page.check_button\n\n def __init__(self, check_button: Template, switch: Switch = None):\n self.check_button = check_button\n self.switch = switch\n self.links = {}\n (filename, line_number, function_name, text) = traceback.extract_stack()[-2]\n self.name = text[:text.find('=')].strip()\n self.parent = None\n Page.all_pages[self.name] = self\n\n def __eq__(self, other):\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def link(self, button: Template, destination: Page):\n self.links[destination] = button" }, { "identifier": "Switch", "path": "zafkiel/ui/switch.py", "snippet": "class Switch:\n \"\"\"\n A wrapper to handle switches in game, switch among states with retries.\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py\n\n Examples:\n # Definitions\n submarine_hunt = Switch('Submarine_hunt', offset=120)\n submarine_hunt.add_state('on', check_button=Template(r\"assets/ON.png\"))\n submarine_hunt.add_state('off', check_button=Template(r\"assets/OFF.png\"))\n\n # Change state to ON\n submarine_view.set(TPL_ON)\n \"\"\"\n\n def __init__(self, name: str = 'Switch', is_selector: bool = False):\n \"\"\"\n Args:\n name:\n is_selector: True if this is a multi choice, click to choose one of the switches.\n For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] |\n False if this is a switch, click the switch itself, and it changed in the same position.\n For example: | [ON] | -> click -> | [OFF] |\n \"\"\"\n self.name = name\n self.is_choice = is_selector\n self.state_list = []\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def add_state(self, state: str, check_button: Template, click_button: Template = None):\n \"\"\"\n Args:\n state: Must match check_button.name\n check_button:\n click_button:\n \"\"\"\n self.state_list.append({\n 'state': state,\n 'check_button': check_button,\n 'click_button': click_button if click_button is not None else check_button,\n })\n\n def get_data(self, state: Template) -> dict:\n \"\"\"\n Args:\n state:\n\n Returns:\n Dictionary in add_state\n\n Raises:\n ScriptError: If state invalid\n \"\"\"\n for row in self.state_list:\n if row['state'] == state.name:\n return row\n\n raise ScriptError(f'Switch {self.name} received an invalid state {state}')" }, { "identifier": "UI", "path": "zafkiel/ui/ui.py", "snippet": "class UI(API):\n \"\"\"\n Processing interface related functions.\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py\n and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py\n \"\"\"\n\n # Make ui_current mutable so that it can be shared among subclasses of the UI class.\n ui_current: dict = {'page': None}\n popup_list: list = []\n\n def ui_switch_appear(self, switch: Switch) -> bool:\n \"\"\"\n Args:\n switch:\n \"\"\"\n if self.ui_get_current_page().switch != switch:\n return False\n\n for data in switch.state_list:\n if self.exists(data['check_button']):\n return True\n return False\n\n def ui_get_current_state(self, switch: Switch) -> str:\n \"\"\"\n Args:\n switch:\n\n Returns:\n state name or 'unknown'.\n \"\"\"\n if self.ui_current['page'].switch != switch:\n logger.warning(f\"{self.ui_current['page']} does not have {switch}\")\n return 'unknown'\n\n for data in switch.state_list:\n if self.exists(data['check_button']):\n return data['state']\n return 'unknown'\n\n def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:\n \"\"\"\n Args:\n page:\n timeout: Seconds to find.\n\n Returns:\n If found, return tuple of (x, y), else return False.\n \"\"\"\n return self.exists(page.check_button, timeout)\n\n def ui_get_current_page(self):\n \"\"\"\n Returns:\n Page:\n\n Raises:\n NotRunningError:\n PageUnknownError:\n \"\"\"\n\n @run_once\n def app_check():\n if not self.app_is_running():\n raise NotRunningError(\"Game not running\")\n\n timeout = Timer(10, count=20).start()\n while True:\n\n # End\n if timeout.reached():\n break\n\n # Known pages\n for page in Page.iter_pages():\n if page.check_button is None:\n continue\n if self.ui_page_appear(page=page):\n self.ui_current['page'] = page\n return page\n\n # Unknown page but able to handle\n if self.ui_additional():\n timeout.reset()\n continue\n\n app_check()\n\n # Unknown page, need manual switching\n raise PageUnknownError\n\n def _set_state(self, switch: Switch, state: Template) -> bool:\n counter = 0\n changed = False\n warning_show_timer = Timer(5, count=10).start()\n click_timer = Timer(1, count=3)\n while True:\n\n # Detect\n current = self.ui_get_current_state(switch)\n\n # End\n if current == state.name:\n logger.info(f'{switch.name} set to {state.name}')\n return changed\n\n # Warning\n if current == 'unknown':\n if self.ui_additional():\n continue\n if warning_show_timer.reached():\n logger.warning(f'Unknown {switch.name} switch')\n warning_show_timer.reset()\n if counter >= 1:\n logger.warning(\n f'{switch.name} switch {state.name} asset has evaluated to unknown too many times, '\n f'asset should be re-verified')\n return False\n counter += 1\n continue\n\n # Click\n if click_timer.reached():\n click_state = state if switch.is_choice else current\n button = switch.get_data(click_state)['click_button']\n self.touch(button)\n click_timer.reset()\n changed = True\n\n return changed\n\n def ui_goto(self, destination: Page, state: Template = None):\n \"\"\"\n Args:\n destination:\n state: Target state of switch, which must be in destination page.\n \"\"\"\n\n # check if state is valid\n if state is not None:\n if destination.switch is None:\n raise ScriptError(f'Page {destination} has no switch')\n destination.switch.get_data(state)\n\n logger.debug(f\"------ UI GOTO {str(destination).upper()}:{state.name.upper()} ------\")\n else:\n logger.debug(f\"------ UI GOTO {str(destination).upper()} ------\")\n\n # Create connection\n Page.init_connection(destination)\n\n while True:\n\n # Destination page\n if self.ui_page_appear(destination, timeout=0.5):\n self.ui_current['page'] = destination\n logger.debug(f'Page arrive: {destination}')\n if state is not None:\n self._set_state(destination.switch, state)\n break\n\n # Other pages\n clicked = False\n for page in Page.iter_pages(start_page=self.ui_current['page']):\n if page.parent is None or page.check_button is None:\n continue\n if self.exists(page.check_button):\n self.ui_current['page'] = page\n button = page.links[page.parent]\n self.touch(button)\n logger.info(f'Page switch: {page} -> {page.parent}')\n clicked = True\n break\n if clicked:\n continue\n\n # Additional\n if self.ui_additional():\n continue\n\n # Reset connection\n Page.clear_connection()\n\n def ui_ensure(self, destination: Page, state: Template = None) -> bool:\n \"\"\"\n Args:\n destination:\n state: Target state of switch, which must be in destination page.\n\n Returns:\n bool: If UI switched.\n \"\"\"\n self.ui_get_current_page()\n\n if self.ui_current['page'] == destination:\n if state is not None:\n if self.ui_get_current_state(destination.switch) == state.name:\n logger.debug(f\"Arrived at {destination}:{state.name}\")\n return False\n else:\n self._set_state(destination.switch, state)\n return True\n else:\n logger.debug(f\"Already at {destination}\")\n return False\n else:\n self.ui_goto(destination, state)\n return True\n\n def ui_ensure_index(\n self,\n index: int,\n letter: Ocr or callable,\n next_button: Template,\n prev_button: Template,\n fast: bool = True,\n interval: float = 0.2\n ):\n \"\"\"\n For pages with similar layout, ensure index of target page.\n\n Args:\n index: Index of target page.\n letter: OCR button.\n next_button:\n prev_button:\n fast: Default true. False when index is not continuous.\n interval: Seconds between two click.\n \"\"\"\n retry = Timer(1, count=2)\n while True:\n if isinstance(letter, Ocr):\n current = letter.ocr_single_line(self.screenshot())\n else:\n current = letter(self.screenshot())\n\n logger.info(f\"{self.ui_current['page']}: Index {current}\")\n diff = index - current\n if diff == 0:\n break\n if current == 0:\n logger.warning(f'ui_ensure_index got an empty current value: {current}')\n continue\n\n if retry.reached():\n button = next_button if diff > 0 else prev_button\n if fast:\n self.touch(button, times=abs(diff), interval=interval)\n else:\n self.touch(button)\n retry.reset()\n\n def get_popup_list(self, popups: list):\n \"\"\"\n Get list from program, must be called before self.ui_additional().\n\n Args:\n popups: list of handle popup functions\n \"\"\"\n for popup in popups:\n self.popup_list.append(popup)\n\n def ui_additional(self) -> bool:\n \"\"\"\n Handle all possible popups during UI switching.\n\n Returns:\n If handled any popup.\n \"\"\"\n for popup in self.popup_list:\n if popup():\n return True\n\n return False\n\n def to_json(self) -> dict:\n # May not be actual current page\n return {'ui_current': str(self.ui_current['page'])}" } ]
from zafkiel import API, Template, logger, Timer, simple_report, Config from zafkiel.ocr import Keyword, Ocr, Digit, DigitCounter, Duration, OcrResultButton from zafkiel.ui import Page, Switch, UI
12,430
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch UI API
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch UI API
Template
3
2023-11-12 09:33:35+00:00
16k
medkit-lib/medkit
tests/unit/io/test_brat_output_converter.py
[ { "identifier": "Attribute", "path": "medkit/core/attribute.py", "snippet": "class Attribute(dict_conv.SubclassMapping):\n \"\"\"\n Medkit attribute, to be added to an annotation\n\n Attributes\n ----------\n label:\n The attribute label\n value:\n The value of the attribute. Should be either simple built-in types (int,\n float, bool, str) or collections of these types (list, dict, tuple). If\n you need structured complex data you should create a subclass of\n `Attribute`.\n metadata:\n The metadata of the attribute\n uid:\n The identifier of the attribute\n \"\"\"\n\n label: str\n value: Optional[Any]\n metadata: Dict[str, Any]\n uid: str\n\n def __init__(\n self,\n label: str,\n value: Optional[Any] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n if metadata is None:\n metadata = {}\n if uid is None:\n uid = generate_id()\n\n self.uid = uid\n self.label = label\n self.value = value\n self.metadata = metadata\n\n def __init_subclass__(cls):\n Attribute.register_subclass(cls)\n super().__init_subclass__()\n\n def to_dict(self) -> Dict[str, Any]:\n attribute_dict = dict(\n uid=self.uid,\n label=self.label,\n value=self.value,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, attribute_dict)\n return attribute_dict\n\n def to_brat(self) -> Optional[Any]:\n \"\"\"\n Return a value compatible with the brat format\n \"\"\"\n\n return self.value\n\n def to_spacy(self) -> Optional[Any]:\n \"\"\"\n Return a value compatible with spaCy\n \"\"\"\n\n return self.value\n\n def copy(self) -> Attribute:\n \"\"\"\n Create a new attribute that is a copy of the current instance, but\n with a new identifier\n\n This is used when we want to duplicate an existing attribute onto a\n different annotation.\n \"\"\"\n return dataclasses.replace(self, uid=generate_id())\n\n @classmethod\n def from_dict(cls, attribute_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates an Attribute from a dict\n\n Parameters\n ----------\n attribute_dict: dict\n A dictionary from a serialized Attribute as generated by to_dict()\n \"\"\"\n\n subclass = cls.get_subclass_for_data_dict(attribute_dict)\n if subclass is not None:\n return subclass.from_dict(attribute_dict)\n\n return cls(\n uid=attribute_dict[\"uid\"],\n label=attribute_dict[\"label\"],\n value=attribute_dict[\"value\"],\n metadata=attribute_dict[\"metadata\"],\n )" }, { "identifier": "Entity", "path": "medkit/core/text/annotation.py", "snippet": "class Entity(Segment):\n \"\"\"\n Text entity referencing part of an :class:`~medkit.core.text.TextDocument`.\n\n Attributes\n ----------\n uid:\n The entity identifier.\n label:\n The label for this entity (e.g., DISEASE)\n text:\n Text of the entity.\n spans:\n List of spans indicating which parts of the entity text correspond to\n which part of the document's full text.\n attrs:\n Attributes of the entity. Stored in a\n :class:{~medkit.core.EntityAttributeContainer} but can be passed as a list at\n init.\n metadata:\n The metadata of the entity\n keys:\n Pipeline output keys to which the entity belongs to.\n \"\"\"\n\n attrs: EntityAttributeContainer\n\n def __init__(\n self,\n label: str,\n text: str,\n spans: List[AnySpan],\n attrs: Optional[List[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n store: Optional[Store] = None,\n attr_container_class: Type[EntityAttributeContainer] = EntityAttributeContainer,\n ):\n super().__init__(label, text, spans, attrs, metadata, uid, store, attr_container_class)" }, { "identifier": "Relation", "path": "medkit/core/text/annotation.py", "snippet": "class Relation(TextAnnotation):\n \"\"\"\n Relation between two text entities.\n\n Attributes\n ----------\n uid:\n The identifier of the relation\n label:\n The relation label\n source_id:\n The identifier of the entity from which the relation is defined\n target_id:\n The identifier of the entity to which the relation is defined\n attrs:\n The attributes of the relation\n metadata:\n The metadata of the relation\n keys:\n Pipeline output keys to which the relation belongs to\n \"\"\"\n\n source_id: str\n target_id: str\n\n def __init__(\n self,\n label: str,\n source_id: str,\n target_id: str,\n attrs: Optional[List[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n store: Optional[Store] = None,\n attr_container_class: Type[AttributeContainer] = AttributeContainer,\n ):\n super().__init__(\n label=label,\n attrs=attrs,\n metadata=metadata,\n uid=uid,\n attr_container_class=attr_container_class,\n )\n\n self.source_id = source_id\n self.target_id = target_id\n\n def to_dict(self) -> Dict[str, Any]:\n attrs = [a.to_dict() for a in self.attrs]\n relation_dict = dict(\n uid=self.uid,\n label=self.label,\n source_id=self.source_id,\n target_id=self.target_id,\n attrs=attrs,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, relation_dict)\n return relation_dict\n\n @classmethod\n def from_dict(cls, relation_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a Relation from a dict\n\n Parameters\n ----------\n relation_dict: dict\n A dictionary from a serialized relation as generated by to_dict()\n \"\"\"\n\n attrs = [Attribute.from_dict(a) for a in relation_dict[\"attrs\"]]\n return cls(\n uid=relation_dict[\"uid\"],\n label=relation_dict[\"label\"],\n source_id=relation_dict[\"source_id\"],\n target_id=relation_dict[\"target_id\"],\n attrs=attrs,\n metadata=relation_dict[\"metadata\"],\n )" }, { "identifier": "Segment", "path": "medkit/core/text/annotation.py", "snippet": "class Segment(TextAnnotation):\n \"\"\"\n Text segment referencing part of an :class:`~medkit.core.text.TextDocument`.\n\n Attributes\n ----------\n uid:\n The segment identifier.\n label:\n The label for this segment (e.g., SENTENCE)\n text:\n Text of the segment.\n spans:\n List of spans indicating which parts of the segment text correspond to\n which part of the document's full text.\n attrs:\n Attributes of the segment. Stored in a\n :class:{~medkit.core.AttributeContainer} but can be passed as a list at\n init.\n metadata:\n The metadata of the segment\n keys:\n Pipeline output keys to which the segment belongs to.\n \"\"\"\n\n spans: List[AnySpan]\n text: str\n\n def __init__(\n self,\n label: str,\n text: str,\n spans: List[AnySpan],\n attrs: Optional[List[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n store: Optional[Store] = None,\n attr_container_class: Type[AttributeContainer] = AttributeContainer,\n ):\n super().__init__(\n label=label,\n attrs=attrs,\n metadata=metadata,\n uid=uid,\n attr_container_class=attr_container_class,\n )\n\n self.text = text\n self.spans = spans\n\n # check if spans length is equal to text length\n length = sum(s.length for s in self.spans)\n assert len(self.text) == length, \"Spans length does not match text length\"\n\n def to_dict(self) -> Dict[str, Any]:\n spans = [s.to_dict() for s in self.spans]\n attrs = [a.to_dict() for a in self.attrs]\n segment_dict = dict(\n uid=self.uid,\n label=self.label,\n text=self.text,\n spans=spans,\n attrs=attrs,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, segment_dict)\n return segment_dict\n\n @classmethod\n def from_dict(cls, segment_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a Segment from a dict\n\n Parameters\n ----------\n segment_dict: dict\n A dictionary from a serialized segment as generated by to_dict()\n \"\"\"\n\n spans = [AnySpan.from_dict(s) for s in segment_dict[\"spans\"]]\n attrs = [Attribute.from_dict(a) for a in segment_dict[\"attrs\"]]\n return cls(\n uid=segment_dict[\"uid\"],\n label=segment_dict[\"label\"],\n text=segment_dict[\"text\"],\n spans=spans,\n attrs=attrs,\n metadata=segment_dict[\"metadata\"],\n )" }, { "identifier": "TextDocument", "path": "medkit/core/text/document.py", "snippet": "class TextDocument(dict_conv.SubclassMapping):\n \"\"\"\n Document holding text annotations\n\n Annotations must be subclasses of `TextAnnotation`.\n\n Attributes\n ----------\n uid:\n Unique identifier of the document.\n text:\n Full document text.\n anns:\n Annotations of the document. Stored in an\n :class:`~.text.TextAnnotationContainer` but can be passed as a list at init.\n attrs:\n Attributes of the document. Stored in an\n :class:`~.core.AttributeContainer` but can be passed as a list at init\n metadata:\n Document metadata.\n raw_segment:\n Auto-generated segment containing the full unprocessed document text. To\n get the raw text as an annotation to pass to processing operations:\n\n >>> doc = TextDocument(text=\"hello\")\n >>> raw_text = doc.anns.get(label=TextDocument.RAW_LABEL)[0]\n \"\"\"\n\n RAW_LABEL: ClassVar[str] = \"RAW_TEXT\"\n\n uid: str\n anns: TextAnnotationContainer\n attrs: AttributeContainer\n metadata: Dict[str, Any]\n raw_segment: Segment\n\n def __init__(\n self,\n text: str,\n anns: Optional[Sequence[TextAnnotation]] = None,\n attrs: Optional[Sequence[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n if anns is None:\n anns = []\n if attrs is None:\n attrs = []\n if metadata is None:\n metadata = {}\n if uid is None:\n uid = generate_id()\n\n self.uid = uid\n self.metadata = metadata\n\n # auto-generated raw segment to hold the text\n self.raw_segment = self._generate_raw_segment(text, uid)\n\n self.anns = TextAnnotationContainer(doc_id=self.uid, raw_segment=self.raw_segment)\n for ann in anns:\n self.anns.add(ann)\n\n self.attrs = AttributeContainer(\n owner_id=self.uid,\n )\n\n for attr in attrs:\n self.attrs.add(attr)\n\n @classmethod\n def _generate_raw_segment(cls, text: str, doc_id: str) -> Segment:\n uid = str(generate_deterministic_id(reference_id=doc_id))\n\n return Segment(\n label=cls.RAW_LABEL,\n spans=[Span(0, len(text))],\n text=text,\n uid=uid,\n )\n\n @property\n def text(self) -> str:\n return self.raw_segment.text\n\n def __init_subclass__(cls):\n TextDocument.register_subclass(cls)\n super().__init_subclass__()\n\n def to_dict(self, with_anns: bool = True) -> Dict[str, Any]:\n doc_dict = dict(\n uid=self.uid,\n text=self.text,\n metadata=self.metadata,\n )\n if with_anns:\n doc_dict[\"anns\"] = [a.to_dict() for a in self.anns]\n\n if self.attrs:\n doc_dict[\"attrs\"] = [a.to_dict() for a in self.attrs]\n\n dict_conv.add_class_name_to_data_dict(self, doc_dict)\n return doc_dict\n\n @classmethod\n def from_dict(cls, doc_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a TextDocument from a dict\n\n Parameters\n ----------\n doc_dict: dict\n A dictionary from a serialized TextDocument as generated by to_dict()\n \"\"\"\n\n # if class method is not the same as the TextDocument one\n # (e.g., when subclassing with an overriding method)\n subclass = cls.get_subclass_for_data_dict(doc_dict)\n if subclass is not None:\n return subclass.from_dict(doc_dict)\n\n anns = [TextAnnotation.from_dict(a) for a in doc_dict.get(\"anns\", [])]\n attrs = [Attribute.from_dict(a) for a in doc_dict.get(\"attrs\", [])]\n return cls(\n uid=doc_dict[\"uid\"],\n text=doc_dict[\"text\"],\n anns=anns,\n attrs=attrs,\n metadata=doc_dict[\"metadata\"],\n )\n\n @classmethod\n def from_file(cls, path: os.PathLike, encoding: Optional[str] = \"utf-8\") -> Self:\n \"\"\"\n Create a document from a text file\n\n Parameters\n ----------\n path:\n Path of the text file\n encoding:\n Text encoding to use\n\n Returns\n -------\n TextDocument:\n Text document with contents of `path` as text. The file path is\n included in the document metadata.\n \"\"\"\n\n path = Path(path)\n text = path.read_text(encoding=encoding)\n return cls(text=text, metadata={\"path_to_text\": str(path.absolute())})\n\n @classmethod\n def from_dir(\n cls,\n path: os.PathLike,\n pattern: str = \"*.txt\",\n encoding: Optional[str] = \"utf-8\",\n ) -> List[Self]:\n \"\"\"\n Create documents from text files in a directory\n\n Parameters\n ----------\n path:\n Path of the directory containing text files\n pattern:\n Glob pattern to match text files in `path`\n encoding:\n Text encoding to use\n\n Returns\n -------\n List[TextDocument]:\n Text documents with contents of each file as text\n \"\"\"\n\n path = Path(path)\n files = sorted(path.glob(pattern))\n return [cls.from_file(f, encoding) for f in files]\n\n def get_snippet(self, segment: Segment, max_extend_length: int) -> str:\n \"\"\"Return a portion of the original text containing the annotation\n\n Parameters\n ----------\n segment:\n The annotation\n\n max_extend_length:\n Maximum number of characters to use around the annotation\n\n Returns\n -------\n str:\n A portion of the text around the annotation\n \"\"\"\n spans_normalized = span_utils.normalize_spans(segment.spans)\n start = min(s.start for s in spans_normalized)\n end = max(s.end for s in spans_normalized)\n start_extended = max(start - max_extend_length // 2, 0)\n remaining_max_extend_length = max_extend_length - (start - start_extended)\n end_extended = min(end + remaining_max_extend_length, len(self.text))\n return self.text[start_extended:end_extended]" }, { "identifier": "EntityNormAttribute", "path": "medkit/core/text/entity_norm_attribute.py", "snippet": "class EntityNormAttribute(Attribute):\n \"\"\"\n Normalization attribute linking an entity to an ID in a knowledge base\n\n Attributes\n ----------\n uid:\n Identifier of the attribute\n label:\n The attribute label, always set to :attr:`EntityNormAttribute.LABEL\n <.core.text.EntityNormAttribute.LABEL>`\n value:\n String representation of the normalization, containing `kb_id`, along\n with `kb_name` if available (ex: \"umls:C0011849\"). For special cases\n where only `term` is available, it is used as value.\n kb_name:\n Name of the knowledge base (ex: \"icd\"). Should always be provided except\n in special cases when we just want to store a normalized term.\n kb_id:\n ID in the knowledge base to which the annotation should be linked.\n Should always be provided except in special cases when we just want to\n store a normalized term.\n kb_version:\n Optional version of the knowledge base.\n term:\n Optional normalized version of the entity text.\n score:\n Optional score reflecting confidence of this link.\n metadata:\n Metadata of the attribute\n \"\"\"\n\n kb_name: Optional[str]\n kb_id: Optional[Any]\n kb_version: Optional[str]\n term: Optional[str]\n score: Optional[float]\n\n LABEL: ClassVar[str] = \"NORMALIZATION\"\n \"\"\"\n Label used for all normalization attributes\n \"\"\"\n\n def __init__(\n self,\n kb_name: Optional[str],\n kb_id: Optional[Any],\n kb_version: Optional[str] = None,\n term: Optional[str] = None,\n score: Optional[float] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n if kb_id is None and term is None:\n raise ValueError(\"Must provide at least kb_id or term\")\n\n if kb_id is not None:\n if kb_name is not None:\n value = f\"{kb_name}:{kb_id}\"\n else:\n value = kb_id\n else:\n value = term\n\n super().__init__(label=self.LABEL, value=value, metadata=metadata, uid=uid)\n\n self.kb_name = kb_name\n self.kb_id = kb_id\n self.kb_version = kb_version\n self.term = term\n self.score = score\n\n def to_brat(self) -> str:\n return self.value\n\n def to_spacy(self) -> str:\n return self.value\n\n def to_dict(self) -> Dict[str, Any]:\n norm_dict = dict(\n uid=self.uid,\n label=self.label,\n kb_name=self.kb_name,\n kb_id=self.kb_id,\n kb_version=self.kb_version,\n term=self.term,\n score=self.score,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, norm_dict)\n return norm_dict\n\n @classmethod\n def from_dict(cls, data_dict: Dict[str, Any]) -> Self:\n return cls(\n uid=data_dict[\"uid\"],\n kb_name=data_dict[\"kb_name\"],\n kb_id=data_dict[\"kb_id\"],\n kb_version=data_dict[\"kb_version\"],\n term=data_dict[\"term\"],\n score=data_dict[\"score\"],\n metadata=data_dict[\"metadata\"],\n )" }, { "identifier": "ModifiedSpan", "path": "medkit/core/text/span.py", "snippet": "class ModifiedSpan(AnySpan):\n \"\"\"\n Slice of text not present in the original text\n\n Parameters\n ----------\n length:\n Number of characters\n replaced_spans:\n Slices of the original text that this span is replacing\n \"\"\"\n\n length: int\n replaced_spans: List[Span]\n\n def to_dict(self) -> Dict[str, Any]:\n replaced_spans = [s.to_dict() for s in self.replaced_spans]\n span_dict = dict(\n length=self.length,\n replaced_spans=replaced_spans,\n )\n dict_conv.add_class_name_to_data_dict(self, span_dict)\n return span_dict\n\n @classmethod\n def from_dict(cls, modified_span_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a Modified from a dict\n\n Parameters\n ----------\n modified_span_dict: dict\n A dictionary from a serialized ModifiedSpan as generated by to_dict()\n \"\"\"\n\n replaced_spans = [Span.from_dict(s) for s in modified_span_dict[\"replaced_spans\"]]\n return cls(modified_span_dict[\"length\"], replaced_spans)" }, { "identifier": "Span", "path": "medkit/core/text/span.py", "snippet": "class Span(AnySpan):\n \"\"\"\n Slice of text extracted from the original text\n\n Parameters\n ----------\n start: int\n Index of the first character in the original text\n end: int\n Index of the last character in the original text, plus one\n \"\"\"\n\n start: int\n end: int\n\n @property\n def length(self):\n return self.end - self.start\n\n def to_dict(self) -> Dict[str, Any]:\n span_dict = dict(start=self.start, end=self.end)\n dict_conv.add_class_name_to_data_dict(self, span_dict)\n return span_dict\n\n def overlaps(self, other: Span):\n \"\"\"Test if 2 spans reference at least one character in common\"\"\"\n return (self.start < other.end) and (self.end > other.start)\n\n @classmethod\n def from_dict(cls, span_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a Span from a dict\n\n Parameters\n ----------\n span_dict: dict\n A dictionary from a serialized span as generated by to_dict()\n \"\"\"\n return cls(start=span_dict[\"start\"], end=span_dict[\"end\"])" }, { "identifier": "UMLSNormAttribute", "path": "medkit/core/text/umls_norm_attribute.py", "snippet": "class UMLSNormAttribute(EntityNormAttribute):\n \"\"\"\n Normalization attribute linking an entity to a CUI in the UMLS knowledge base\n\n Attributes\n ----------\n uid:\n Identifier of the attribute\n label:\n The attribute label, always set to :attr:`EntityNormAttribute.LABEL\n <.core.text.EntityNormAttribute.LABEL>`\n value:\n CUI prefixed with \"umls:\" (ex: \"umls:C0011849\")\n kb_name:\n Name of the knowledge base. Always \"umls\"\n kb_id:\n CUI (Concept Unique Identifier) to which the annotation should be linked\n cui:\n Convenience alias of `kb_id`\n kb_version:\n Version of the UMLS database (ex: \"202AB\")\n umls_version:\n Convenience alias of `kb_version`\n term:\n Optional normalized version of the entity text\n score:\n Optional score reflecting confidence of this link\n sem_types:\n Optional IDs of semantic types of the CUI (ex: [\"T047\"])\n metadata:\n Metadata of the attribute\n \"\"\"\n\n sem_types: Optional[List[str]] = None\n\n def __init__(\n self,\n cui: str,\n umls_version: str,\n term: Optional[str] = None,\n score: Optional[float] = None,\n sem_types: Optional[List[str]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n super().__init__(\n kb_name=\"umls\",\n kb_id=cui,\n kb_version=umls_version,\n term=term,\n score=score,\n metadata=metadata,\n uid=uid,\n )\n self.sem_types = sem_types\n\n @property\n def cui(self):\n return self.kb_id\n\n @property\n def umls_version(self):\n return self.kb_version\n\n def to_dict(self) -> Dict[str, Any]:\n norm_dict = dict(\n uid=self.uid,\n cui=self.cui,\n umls_version=self.umls_version,\n term=self.term,\n score=self.score,\n sem_types=self.sem_types,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, norm_dict)\n return norm_dict\n\n @classmethod\n def from_dict(cls, data: Dict[str, Any]) -> Self:\n return cls(\n uid=data[\"uid\"],\n cui=data[\"cui\"],\n umls_version=data[\"umls_version\"],\n term=data[\"term\"],\n score=data[\"score\"],\n sem_types=data[\"sem_types\"],\n metadata=data[\"metadata\"],\n )" }, { "identifier": "BratAnnConfiguration", "path": "medkit/io/_brat_utils.py", "snippet": "class BratAnnConfiguration:\n \"\"\"A data structure to represent 'annotation.conf' in brat documents.\n This is necessary to generate a valid annotation project in brat.\n An 'annotation.conf' has four sections. The section 'events' is not\n supported in medkit, so the section is empty.\n \"\"\"\n\n def __init__(self, top_values_by_attr: int = 50):\n self._entity_types: Set[str] = set()\n # key: relation type\n self._rel_types_arg_1: Dict[str, Set[str]] = defaultdict(set)\n # key: relation type\n self._rel_types_arg_2: Dict[str, Set[str]] = defaultdict(set)\n # key: attribute type\n self._attr_entity_values: Dict[str, List[str]] = defaultdict(list)\n self._attr_relation_values: Dict[str, List[str]] = defaultdict(list)\n # 'n' most common values by attr to be included in the conf file\n self.top_values_by_attr = top_values_by_attr\n\n # return sorted version of BratAnnotationConfiguration\n @property\n def entity_types(self) -> List[str]:\n return sorted(self._entity_types)\n\n @property\n def rel_types_arg_1(self) -> Dict[str, List[str]]:\n rels = {}\n for rel_type, values in self._rel_types_arg_1.items():\n rels[rel_type] = sorted(values)\n return rels\n\n @property\n def rel_types_arg_2(self) -> Dict[str, List[str]]:\n rels = {}\n for rel_type, values in self._rel_types_arg_2.items():\n rels[rel_type] = sorted(values)\n return rels\n\n # as brat only allows defined values, certain data types\n # are not fully supported (e.g. int, float).\n # We limit the number of different values of an attribute\n # to show in the configuration.\n @property\n def attr_relation_values(self) -> Dict[str, List[str]]:\n attrs = {}\n for attr_type, values in self._attr_relation_values.items():\n # get the 'n' most common values in the attr\n most_common_values = Counter(values).most_common(self.top_values_by_attr)\n attrs[attr_type] = sorted(attr_value for attr_value, _ in most_common_values)\n return attrs\n\n @property\n def attr_entity_values(self) -> Dict[str, List[str]]:\n attrs = {}\n for attr_type, values in self._attr_entity_values.items():\n # get the 'n' most common values in the attr\n most_common_values = Counter(values).most_common(self.top_values_by_attr)\n attrs[attr_type] = sorted(attr_value for attr_value, _ in most_common_values)\n return attrs\n\n def add_entity_type(self, type: str):\n self._entity_types.add(type)\n\n def add_relation_type(self, relation_conf: RelationConf):\n self._rel_types_arg_1[relation_conf.type].add(relation_conf.arg1)\n self._rel_types_arg_2[relation_conf.type].add(relation_conf.arg2)\n\n def add_attribute_type(self, attr_conf: AttributeConf):\n if attr_conf.from_entity:\n self._attr_entity_values[attr_conf.type].append(attr_conf.value)\n else:\n self._attr_relation_values[attr_conf.type].append(attr_conf.value)\n\n def to_str(self) -> str:\n annotation_conf = (\n \"#Text-based definitions of entity types, relation types\\n\"\n \"#and attributes. This file was generated using medkit\\n\"\n \"#from the HeKa project\"\n )\n annotation_conf += \"\\n[entities]\\n\\n\"\n entity_section = \"\\n\".join(self.entity_types)\n annotation_conf += entity_section\n\n # add relations section\n annotation_conf += \"\\n[relations]\\n\\n\"\n annotation_conf += \"# This line enables entity overlapping\\n\"\n annotation_conf += \"<OVERLAP>\\tArg1:<ENTITY>, Arg2:<ENTITY>, <OVL-TYPE>:<ANY>\\n\"\n\n rel_types_arg_1 = self.rel_types_arg_1\n rel_types_arg_2 = self.rel_types_arg_2\n for type in rel_types_arg_1:\n arg_1_types = rel_types_arg_1[type]\n arg_2_types = rel_types_arg_2[type]\n relation_line = self._relation_to_str(type, arg_1_types, arg_2_types)\n annotation_conf += f\"{relation_line}\\n\"\n\n # add attributes section\n attr_entity_values = self.attr_entity_values\n annotation_conf += \"[attributes]\\n\\n\"\n for type, values in attr_entity_values.items():\n attr_line = self._attribute_to_str(type, values, True)\n annotation_conf += f\"{attr_line}\\n\"\n\n attr_relation_values = self.attr_relation_values\n for type, values in attr_relation_values.items():\n attr_line = self._attribute_to_str(type, values, False)\n annotation_conf += f\"{attr_line}\\n\"\n # add events section (empty)\n annotation_conf += \"[events]\\n\\n\"\n return annotation_conf\n\n @staticmethod\n def _attribute_to_str(type: str, values: List[str], from_entity: bool) -> str:\n arg = \"<ENTITY>\" if from_entity else \"<RELATION>\"\n values_str = \"|\".join(values)\n return f\"{type}\\tArg:{arg}\" if not values_str else f\"{type}\\tArg:{arg}, Value:{values_str}\"\n\n @staticmethod\n def _relation_to_str(type: str, arg_1_types: List[str], arg_2_types: List[str]) -> str:\n arg_1_str = \"|\".join(arg_1_types)\n arg_2_str = \"|\".join(arg_2_types)\n return f\"{type}\\tArg1:{arg_1_str}, Arg2:{arg_2_str}\"" }, { "identifier": "BratAttribute", "path": "medkit/io/_brat_utils.py", "snippet": "class BratAttribute:\n \"\"\"A simple attribute data structure.\"\"\"\n\n uid: str\n type: str\n target: str\n value: str = None # Only one value is possible\n\n def to_str(self) -> str:\n value = ensure_attr_value(self.value)\n value_str = f\" {value}\" if value else \"\"\n return f\"{self.uid}\\t{self.type} {self.target}{value_str}\\n\"" }, { "identifier": "BratEntity", "path": "medkit/io/_brat_utils.py", "snippet": "class BratEntity:\n \"\"\"A simple entity annotation data structure.\"\"\"\n\n uid: str\n type: str\n span: List[Tuple[int, int]]\n text: str\n\n @property\n def start(self) -> int:\n return self.span[0][0]\n\n @property\n def end(self) -> int:\n return self.span[-1][-1]\n\n def to_str(self) -> str:\n spans_str = \";\".join(f\"{span[0]} {span[1]}\" for span in self.span)\n return f\"{self.uid}\\t{self.type} {spans_str}\\t{self.text}\\n\"" }, { "identifier": "BratNote", "path": "medkit/io/_brat_utils.py", "snippet": "class BratNote:\n \"\"\"A simple note data structure.\"\"\"\n\n uid: str\n target: str\n value: str\n type: str = \"AnnotatorNotes\"\n\n def to_str(self) -> str:\n return f\"{self.uid}\\t{self.type} {self.target}\\t{self.value}\\n\"" }, { "identifier": "BratRelation", "path": "medkit/io/_brat_utils.py", "snippet": "class BratRelation:\n \"\"\"A simple relation data structure.\"\"\"\n\n uid: str\n type: str\n subj: str\n obj: str\n\n def to_str(self) -> str:\n return f\"{self.uid}\\t{self.type} Arg1:{self.subj} Arg2:{self.obj}\\n\"" }, { "identifier": "get_anns_by_type", "path": "medkit/io/_common.py", "snippet": "def get_anns_by_type(medkit_doc: TextDocument, anns_labels: Optional[List[str]] = None) -> Dict[str, TextAnnotation]:\n \"\"\"Filter annotations by labels and return a dictionary by type of annotation.\n\n Parameters\n ----------\n medkit_doc:\n Text document with annotations\n anns_labels:\n Labels to filter annotations.\n If not provided, all annotations will be in the dictionary\n\n Returns\n -------\n Dict[str, TextAnnotation]\n Annotations by type: 'entities', 'relations', and 'segments'.\n\n \"\"\"\n anns_by_type = {\"entities\": [], \"relations\": [], \"segments\": []}\n annotations = medkit_doc.anns.get()\n\n if anns_labels is not None:\n # filter annotations by label\n annotations = [ann for ann in annotations if ann.label in anns_labels]\n if anns_labels and annotations == []:\n # labels_anns were a list but none of the annotations\n # had a label of interest\n labels_str = \",\".join(anns_labels)\n logger.info(f\"No medkit annotations were included because none have '{labels_str}'\" \" as label.\")\n\n for ann in annotations:\n if isinstance(ann, Entity):\n anns_by_type[\"entities\"].append(ann)\n elif isinstance(ann, Relation):\n anns_by_type[\"relations\"].append(ann)\n elif isinstance(ann, Segment):\n anns_by_type[\"segments\"].append(ann)\n return anns_by_type" }, { "identifier": "BratOutputConverter", "path": "medkit/io/brat.py", "snippet": "class BratOutputConverter(OutputConverter):\n \"\"\"Class in charge of converting a list of TextDocuments into a\n brat collection file.\n\n .. hint::\n BRAT checks the coherence between span and text for each annotation.\n This converter adjusts the text and spans to get the right visualization\n and ensure compatibility.\n \"\"\"\n\n def __init__(\n self,\n anns_labels: Optional[List[str]] = None,\n attrs: Optional[List[str]] = None,\n notes_label: str = \"brat_note\",\n ignore_segments: bool = True,\n convert_cuis_to_notes: bool = True,\n create_config: bool = True,\n top_values_by_attr: int = 50,\n uid: Optional[str] = None,\n ):\n \"\"\"\n Initialize the Brat output converter\n\n Parameters\n ----------\n anns_labels:\n Labels of medkit annotations to convert into Brat annotations.\n If `None` (default) all the annotations will be converted\n attrs:\n Labels of medkit attributes to add in the annotations that will be included.\n If `None` (default) all medkit attributes found in the segments or relations\n will be converted to Brat attributes\n notes_label:\n Label of attributes that will be converted to annotator notes.\n ignore_segments:\n If `True` medkit segments will be ignored. Only entities, attributes and relations\n will be converted to Brat annotations. If `False` the medkit segments will be\n converted to Brat annotations as well.\n convert_cuis_to_notes:\n If `True`, UMLS normalization attributes will be converted to\n annotator notes rather than attributes. For entities with multiple\n UMLS attributes, CUIs will be separated by spaces (ex: \"C0011849 C0004096\").\n create_config:\n Whether to create a configuration file for the generated collection.\n This file defines the types of annotations generated, it is necessary for the correct\n visualization on Brat.\n top_values_by_attr:\n Defines the number of most common values by attribute to show in the configuration.\n This is useful when an attribute has a large number of values, only the 'top' ones\n will be in the config. By default, the top 50 of values by attr will be in the config.\n uid:\n Identifier of the converter\n \"\"\"\n if uid is None:\n uid = generate_id()\n\n self.uid = uid\n self.anns_labels = anns_labels\n self.attrs = attrs\n self.notes_label = notes_label\n self.ignore_segments = ignore_segments\n self.convert_cuis_to_notes = convert_cuis_to_notes\n self.create_config = create_config\n self.top_values_by_attr = top_values_by_attr\n\n @property\n def description(self) -> OperationDescription:\n config = dict(\n anns_labels=self.anns_labels,\n attrs=self.attrs,\n ignore_segments=self.ignore_segments,\n create_config=self.create_config,\n top_values_by_attr=self.top_values_by_attr,\n )\n return OperationDescription(uid=self.uid, class_name=self.__class__.__name__, config=config)\n\n def save(\n self,\n docs: List[TextDocument],\n dir_path: Union[str, Path],\n doc_names: Optional[List[str]] = None,\n ):\n \"\"\"Convert and save a collection or list of TextDocuments into a Brat collection.\n For each collection or list of documents, a folder is created with '.txt' and '.ann'\n files; an 'annotation.conf' is saved if required.\n\n Parameters\n ----------\n docs:\n List of medkit doc objects to convert\n dir_path:\n String or path object to save the generated files\n doc_names:\n Optional list with the names for the generated files. If 'None', 'uid' will\n be used as the name. Where 'uid.txt' has the raw text of the document and\n 'uid.ann' the Brat annotation file.\n \"\"\"\n\n if doc_names is not None:\n assert len(doc_names) == len(docs)\n\n dir_path = Path(dir_path)\n dir_path.mkdir(parents=True, exist_ok=True)\n config = BratAnnConfiguration(self.top_values_by_attr)\n\n for i, medkit_doc in enumerate(docs):\n text = medkit_doc.text\n doc_id = medkit_doc.uid if doc_names is None else doc_names[i]\n\n # convert medkit anns to brat format\n annotations = get_anns_by_type(medkit_doc, anns_labels=self.anns_labels)\n all_segments = annotations[\"entities\"]\n\n if not self.ignore_segments:\n # In brat only entities exists, in some cases\n # a medkit document could include segments\n # that may be exported as entities\n all_segments += annotations[\"segments\"]\n\n brat_anns = self._convert_medkit_anns_to_brat(\n segments=all_segments,\n relations=annotations[\"relations\"],\n config=config,\n raw_text=text,\n )\n\n # save text file\n text_path = dir_path / f\"{doc_id}{TEXT_EXT}\"\n text_path.write_text(text, encoding=\"utf-8\")\n # save ann file\n ann_path = dir_path / f\"{doc_id}{ANN_EXT}\"\n brat_str = \"\".join(f\"{brat_ann.to_str()}\" for brat_ann in brat_anns)\n ann_path.write_text(brat_str, encoding=\"utf-8\")\n\n if self.create_config:\n # save configuration file by collection or list of documents\n conf_path = dir_path / ANN_CONF_FILE\n conf_path.write_text(config.to_str(), encoding=\"utf-8\")\n\n def _convert_medkit_anns_to_brat(\n self,\n segments: List[Segment],\n relations: List[Relation],\n config: BratAnnConfiguration,\n raw_text: str,\n ) -> List[Union[BratEntity, BratAttribute, BratRelation, BratNote]]:\n \"\"\"\n Convert Segments, Relations and Attributes into brat data structures\n\n Parameters\n ----------\n segments:\n Medkit segments to convert\n relations:\n Medkit relations to convert\n config:\n Optional `BratAnnConfiguration` structure, this object is updated\n with the types of the generated Brat annotations.\n raw_text:\n Text of reference to get the original text of the annotations\n Returns\n -------\n List[Union[BratEntity, BratAttribute, BratRelation, BratNote]]\n A list of brat annotations\n \"\"\"\n nb_segment, nb_relation, nb_attribute, nb_note = 1, 1, 1, 1\n brat_entities_by_medkit_id = dict()\n brat_anns = []\n\n # First convert segments then relations including its attributes\n for medkit_segment in segments:\n brat_entity = self._convert_segment_to_brat(medkit_segment, nb_segment, raw_text)\n brat_anns.append(brat_entity)\n # store link between medkit id and brat entities\n # (needed for relations)\n brat_entities_by_medkit_id[medkit_segment.uid] = brat_entity\n config.add_entity_type(brat_entity.type)\n nb_segment += 1\n\n # include selected attributes\n if self.attrs is None:\n attrs = medkit_segment.attrs.get()\n else:\n attrs = [a for label in self.attrs for a in medkit_segment.attrs.get(label=label)]\n for attr in attrs:\n # skip UMLS attributes that will be converted to notes\n if self.convert_cuis_to_notes and isinstance(attr, UMLSNormAttribute):\n continue\n # skip attributes that will be converted to notes\n if attr.label == self.notes_label:\n continue\n\n value = attr.to_brat()\n\n if isinstance(value, bool) and not value:\n # in brat 'False' means the attributes does not exist\n continue\n\n try:\n brat_attr, attr_config = self._convert_attribute_to_brat(\n label=attr.label,\n value=value,\n nb_attribute=nb_attribute,\n target_brat_id=brat_entity.uid,\n is_from_entity=True,\n )\n brat_anns.append(brat_attr)\n config.add_attribute_type(attr_config)\n nb_attribute += 1\n\n except TypeError as err:\n logger.warning(f\"Ignore attribute {attr.uid}. {err}\")\n\n if self.convert_cuis_to_notes:\n cuis = [attr.kb_id for attr in attrs if isinstance(attr, UMLSNormAttribute)]\n if len(cuis):\n brat_note = self._convert_umls_attributes_to_brat_note(\n cuis=cuis,\n nb_note=nb_note,\n target_brat_id=brat_entity.uid,\n )\n brat_anns.append(brat_note)\n nb_note += 1\n\n note_attrs = medkit_segment.attrs.get(label=self.notes_label)\n if note_attrs:\n values = [a.to_brat() for a in note_attrs]\n brat_note = self._convert_attributes_to_brat_note(\n values=values,\n nb_note=nb_note,\n target_brat_id=brat_entity.uid,\n )\n brat_anns.append(brat_note)\n nb_note += 1\n\n for medkit_relation in relations:\n try:\n brat_relation, relation_config = self._convert_relation_to_brat(\n medkit_relation, nb_relation, brat_entities_by_medkit_id\n )\n brat_anns.append(brat_relation)\n config.add_relation_type(relation_config)\n nb_relation += 1\n except ValueError as err:\n logger.warning(f\"Ignore relation {medkit_relation.uid}. {err}\")\n continue\n\n # Note: it seems that brat does not support attributes for relations\n # include selected attributes\n if self.attrs is None:\n attrs = medkit_relation.attrs.get()\n else:\n attrs = [a for label in self.attrs for a in medkit_relation.attrs.get(label=label)]\n for attr in attrs:\n value = attr.to_brat()\n\n if isinstance(value, bool) and not value:\n continue\n\n try:\n brat_attr, attr_config = self._convert_attribute_to_brat(\n label=attr.label,\n value=value,\n nb_attribute=nb_attribute,\n target_brat_id=brat_relation.uid,\n is_from_entity=False,\n )\n brat_anns.append(brat_attr)\n config.add_attribute_type(attr_config)\n nb_attribute += 1\n except TypeError as err:\n logger.warning(f\"Ignore attribute {attr.uid}. {err}\")\n\n return brat_anns\n\n @staticmethod\n def _ensure_text_and_spans(segment: Segment, raw_text: str) -> Tuple[str, List[Tuple[int, int]]]:\n \"\"\"Ensure consistency between the segment and the raw text.\n The text of a BRAT annotation can't contain multiple white spaces (including a newline character).\n This method clean the text of the fragments and adjust its spans to point to the same\n location in the raw text.\n\n Parameters\n ----------\n segment:\n Segment to ensure\n raw_text:\n Text of reference\n\n Returns\n -------\n Tuple[str, List[Tuple[int, int]]]\n A tuple with the text cleaned and its spans\n \"\"\"\n pattern_to_clean = r\"(\\s*\\n+\\s*)\"\n segment_spans = span_utils.normalize_spans(segment.spans)\n texts_brat, spans_brat = [], []\n\n for fragment in segment_spans:\n text = raw_text[fragment.start : fragment.end]\n offset = fragment.start\n # remove leading spaces from text or multiple spaces\n text_stripped, start_text, end_text = utils.strip(text, offset)\n real_offset = offset + start_text\n\n # create text and spans without blank regions\n for match in re.finditer(pattern_to_clean, text_stripped):\n end_fragment = start_text + match.start()\n texts_brat.append(raw_text[start_text:end_fragment])\n spans_brat.append((start_text, end_fragment))\n start_text = match.end() + real_offset\n\n # add last fragment\n texts_brat.append(raw_text[start_text:end_text])\n spans_brat.append((start_text, end_text))\n\n text_brat = \" \".join(texts_brat)\n return text_brat, spans_brat\n\n def _convert_segment_to_brat(self, segment: Segment, nb_segment: int, raw_text: str) -> BratEntity:\n \"\"\"\n Get a brat entity from a medkit segment\n\n Parameters\n ----------\n segment:\n A medkit segment to convert into brat format\n nb_segment:\n The current counter of brat segments\n raw_text:\n Text of reference to get the original text of the segment\n Returns\n -------\n BratEntity\n The equivalent brat entity of the medkit segment\n \"\"\"\n assert nb_segment != 0\n brat_id = f\"T{nb_segment}\"\n # brat does not support spaces in labels\n type = segment.label.replace(\" \", \"_\")\n text, spans = self._ensure_text_and_spans(segment, raw_text)\n return BratEntity(brat_id, type, spans, text)\n\n @staticmethod\n def _convert_relation_to_brat(\n relation: Relation,\n nb_relation: int,\n brat_entities_by_segment_id: Dict[str, BratEntity],\n ) -> Tuple[BratRelation, RelationConf]:\n \"\"\"\n Get a brat relation from a medkit relation\n\n Parameters\n ----------\n relation:\n A medkit relation to convert into brat format\n nb_relation:\n The current counter of brat relations\n brat_entities_by_segment_id:\n A dict to map medkit ID to brat annotation\n\n Returns\n -------\n BratRelation\n The equivalent brat relation of the medkit relation\n RelationConf\n Configuration of the brat attribute\n\n Raises\n ------\n ValueError\n When the source or target was not found in the mapping object\n \"\"\"\n assert nb_relation != 0\n brat_id = f\"R{nb_relation}\"\n # brat does not support spaces in labels\n type = relation.label.replace(\" \", \"_\")\n subj = brat_entities_by_segment_id.get(relation.source_id)\n obj = brat_entities_by_segment_id.get(relation.target_id)\n\n if subj is None or obj is None:\n raise ValueError(\"Entity target/source was not found.\")\n\n relation_conf = RelationConf(type, arg1=subj.type, arg2=obj.type)\n return BratRelation(brat_id, type, subj.uid, obj.uid), relation_conf\n\n @staticmethod\n def _convert_attribute_to_brat(\n label: str,\n value: Union[str, None],\n nb_attribute: int,\n target_brat_id: str,\n is_from_entity: bool,\n ) -> Tuple[BratAttribute, AttributeConf]:\n \"\"\"\n Get a brat attribute from a medkit attribute\n\n Parameters\n ----------\n label:\n Attribute label to convert into brat format\n value:\n Attribute value\n nb_attribute:\n The current counter of brat attributes\n target_brat_id:\n Corresponding target brat ID\n\n Returns\n -------\n BratAttribute:\n The equivalent brat attribute of the medkit attribute\n AttributeConf:\n Configuration of the brat attribute\n \"\"\"\n assert nb_attribute != 0\n brat_id = f\"A{nb_attribute}\"\n type = label.replace(\" \", \"_\")\n\n value: str = brat_utils.ensure_attr_value(value)\n attr_conf = AttributeConf(from_entity=is_from_entity, type=type, value=value)\n return BratAttribute(brat_id, type, target_brat_id, value), attr_conf\n\n @staticmethod\n def _convert_umls_attributes_to_brat_note(\n cuis: List[str],\n nb_note: int,\n target_brat_id: str,\n ) -> BratNote:\n \"\"\"\n Get a brat note from a medkit umls norm attribute\n\n Parameters\n ----------\n cui:\n CUI to convert to brat note\n nb_note:\n The current counter of brat notes\n target_brat_id:\n Corresponding target brat ID\n\n Returns\n -------\n BratNote:\n The equivalent brat note of the medkit umls attribute\n \"\"\"\n assert nb_note != 0\n brat_id = f\"#{nb_note}\"\n return BratNote(uid=brat_id, target=target_brat_id, value=\" \".join(cuis))\n\n @staticmethod\n def _convert_attributes_to_brat_note(\n values: List[Any],\n nb_note: int,\n target_brat_id: str,\n ) -> BratNote:\n \"\"\"\n Get a brat note from medkit attribute values\n\n Parameters\n ----------\n values:\n Attribute values\n nb_note:\n The current counter of brat notes\n target_brat_id:\n Corresponding target brat ID\n\n Returns\n -------\n BratNote:\n The equivalent brat note of the medkit attribute values\n \"\"\"\n assert nb_note != 0\n brat_id = f\"#{nb_note}\"\n value = \"\\n\".join(str(v) for v in values if v is not None)\n return BratNote(uid=brat_id, target=target_brat_id, value=value)" } ]
from pathlib import Path from medkit.core import Attribute from medkit.core.text import ( Entity, EntityNormAttribute, ModifiedSpan, Relation, Segment, Span, TextDocument, UMLSNormAttribute, ) from medkit.io._brat_utils import ( BratAnnConfiguration, BratAttribute, BratEntity, BratNote, BratRelation, ) from medkit.io._common import get_anns_by_type from medkit.io.brat import BratOutputConverter import pytest
12,658
def _get_medkit_doc(): text = "Le patient présente une douleur abdominale de grade 4, la douleur abdominale" " est sévère." doc = TextDocument(uid="doc_brat", text=text) medkit_anns = [
def _get_medkit_doc(): text = "Le patient présente une douleur abdominale de grade 4, la douleur abdominale" " est sévère." doc = TextDocument(uid="doc_brat", text=text) medkit_anns = [
Entity(
1
2023-11-13 16:28:56+00:00
16k
kampta/asic
train.py
[ { "identifier": "Logger", "path": "commons/logger.py", "snippet": "class Logger(SummaryWriter):\n\n def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):\n super().__init__(results_path)\n self.results_path = results_path\n self.log_to_tb = log_to_tb\n self.log_to_wandb = log_to_wandb\n\n def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),\n scale_each=False, nrow=None, **kwargs):\n nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow\n if type(images[0]) is torch.Tensor:\n ndarr = images2grid(images, return_as_PIL=True, nrow=nrow,\n normalize=True, value_range=range,\n scale_each=scale_each, **kwargs)\n grid = Image.fromarray(ndarr)\n grid.save(f\"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png\")\n if self.log_to_wandb:\n wandb.log({logging_name: wandb.Image(grid)}, step=itr)\n else:\n grid = concat_v(*images)\n grid.save(f\"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png\")\n if self.log_to_wandb:\n wandb.log({logging_name: [wandb.Image(im) for im in images]}, step=itr)\n\n if self.log_to_tb:\n self.add_image(f\"{prefix}/{logging_name}\", ndarr, itr,\n dataformats='HWC')\n\n def log_image_grid(self, images, logging_name, itr, imgs_to_show,\n log_mean_img=True, mean_range=None, range=(-1, 1),\n scale_each=False, num_heads=1, nrow=None, **kwargs):\n self._log_image_grid(images[:imgs_to_show], logging_name, \"grids\", itr,\n range=range, scale_each=scale_each, nrow=nrow, **kwargs)\n if log_mean_img: # Log average images:\n images = images.reshape(images.size(0) // num_heads, num_heads,\n *images.size()[1:])\n self._log_image_grid(images.mean(dim=0), f'mean_{logging_name}',\n \"means\", itr, range=mean_range,\n scale_each=True, nrow=nrow)\n\n def add_scalar(self, tag, scalar_value, global_step=None, **kwargs):\n if self.log_to_wandb:\n wandb.log({tag: scalar_value}, step=global_step)\n return super().add_scalar(tag, scalar_value, global_step, **kwargs)\n\n def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, **kwargs):\n if self.log_to_wandb:\n wandb.log(tag_scalar_dict, step=global_step)\n return super().add_scalars(main_tag, tag_scalar_dict, global_step, **kwargs)" }, { "identifier": "log_visuals", "path": "commons/logger.py", "snippet": "@torch.inference_mode()\ndef log_visuals(canon, stn, dset, train_idx, writer, vis_sample=2,\n vis_denseres=32):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n pseudo_kps = dset.pseudo_kps\n parts = dset.parts\n vis_sample = min(vis_sample, len(dset))\n res = dset.img_size\n has_gt_kp = dset.kps is not None\n has_fixed_pairs = dset.fixed_pairs is not None # SPair\n\n # Run full test dataloader (assuming small dataset)\n all_imgs = dset.imgs\n all_masks = dset.masks\n all_kps = dset.kps\n all_flows, _ = stn(all_imgs)\n\n if has_gt_kp:\n kps_cols = torch.from_numpy(get_colors(all_kps.size(1))).float()\n kps_cols = map_minmax(kps_cols, 0, 1, -1, 1).to(device).unsqueeze(0)\n\n parts_cols = torch.from_numpy(get_colors(dset.num_parts+1)).float()\n parts_cols = map_minmax(parts_cols, 0, 1, -1, 1).to(device)\n parts_cols[-1] = 0\n\n # Text logging\n text_kp, text_kp_col = load_text_points('CVPR')\n text_kp = text_kp.to(device).unsqueeze(0)\n text_kp_col = text_kp_col.to(device).unsqueeze(0)\n\n pairs = sample_tuples(len(dset), count=vis_sample, seed=0)\n src_idx, trg_idx = pairs[:, 0], pairs[:, 1]\n\n # Log only once during the training\n if train_idx == 0:\n # Log images and the mask\n writer.log_image_grid(all_imgs[:vis_sample], 'img', train_idx,\n vis_sample, nrow=vis_sample)\n writer.log_image_grid(all_imgs[:vis_sample]*all_masks[:vis_sample],\n 'img_mask', train_idx, vis_sample, nrow=vis_sample)\n\n # Log neural best buddies (sparse)\n kp1 = pseudo_kps[src_idx, trg_idx]\n kp2 = pseudo_kps[trg_idx, src_idx]\n kp_vis = kp1[..., -1] * kp2[..., -1]\n kp1, kp2 = kp1[..., :2], kp2[..., :2]\n colors = map_minmax(get_dense_colors(kp1), 0, 1, -1, 1)\n\n blend_src = splat_points(\n all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n blend_trg = splat_points(\n all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n\n writer.log_image_grid(stacked, 'kp_pseudo_gt', train_idx, 2*vis_sample,\n log_mean_img=False, nrow=2)\n\n # Log parts\n parts_img = parts_cols[parts[:vis_sample]].permute(0, 3, 1, 2)\n writer.log_image_grid(parts_img, 'parts', train_idx, vis_sample,\n nrow=vis_sample, log_mean_img=False)\n\n # Log groundtruth kp\n if has_gt_kp:\n kp1, kp2 = all_kps[src_idx], all_kps[trg_idx]\n kp_vis = kp1[..., -1] * kp2[..., -1]\n kp1, kp2 = kp1[..., :2], kp2[..., :2]\n\n colors = kps_cols.expand(vis_sample, -1, -1)\n blend_src = splat_points(\n all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n blend_trg = splat_points(\n all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n writer.log_image_grid(stacked, 'kp_gt', train_idx, 2*vis_sample,\n log_mean_img=False, nrow=2)\n\n # Log kp and top predictions by STN (if kp are available)\n if has_gt_kp:\n kp1 = all_kps[src_idx][..., :2]\n kp_vis = all_kps[src_idx][..., 2]\n\n kp_pred = stn.transfer_points(\n kp1, src_idx, trg_idx, all_flows, mask=all_masks, res=res, is_flow=True)\n colors = kps_cols.expand(vis_sample, -1, -1)\n\n blend_src = splat_points(\n all_imgs[src_idx], kp1, sigma=3., opacity=1.0,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n blend_trg = splat_points(\n all_imgs[trg_idx], kp_pred.float(), sigma=3., opacity=1.0,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n writer.log_image_grid(stacked, 'kp_pred_sparse', train_idx,\n 2*vis_sample, log_mean_img=False, nrow=2)\n\n # Log current canon image\n canon_grid = canon.get_grid(vis_sample)\n if canon_grid.size(1) > 3:\n canon_grid = canon_grid[:, :3]\n scale_factor = res / canon_grid.size(-1)\n canon_grid = F.interpolate(\n canon_grid, scale_factor=scale_factor, mode='bilinear')\n writer.log_image_grid(canon_grid, 'canon', train_idx, 1, log_mean_img=False)\n\n # Log dense correspondences\n kp, kp_vis, kp_col_dense = load_fg_points(all_masks[src_idx],\n resolution=vis_denseres)\n kp_pred, kp_canon = stn.transfer_points(\n kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,\n return_canon=True, is_flow=True)\n colors = map_minmax(kp_col_dense, 0, 1, -1, 1)\n\n blend_src = splat_points(\n all_imgs[src_idx], kp, sigma=4., opacity=0.75,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n\n blend_trg = splat_points(\n all_imgs[trg_idx], kp_pred.float(), sigma=4., opacity=0.75,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n\n blend_canon = splat_points(\n torch.ones_like(canon_grid) * -1, kp_canon, sigma=1.3, opacity=1.0,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\\\n flatten(0, 1)\n writer.log_image_grid(\n stacked, 'kp_pred_dense', train_idx, 3*vis_sample,\n log_mean_img=False, nrow=3)\n\n # # Log dense correspondences with text\n # text_kp = text_kp.expand(vis_sample, -1, -1)\n # text_kp_col = text_kp_col.expand(vis_sample, -1, -1)\n # kp_pred, kp_canon = stn.transfer_points(\n # text_kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,\n # return_canon=True, is_flow=True)\n\n # blend_src = splat_points(all_imgs[src_idx], text_kp, sigma=0.7, opacity=1.,\n # colors=text_kp_col)\n\n # blend_trg = splat_points(all_imgs[trg_idx], kp_pred.float(), sigma=0.7,\n # opacity=1., colors=text_kp_col)\n\n # blend_canon = splat_points(torch.ones_like(canon_grid) * -1, kp_canon,\n # sigma=0.7, opacity=1., colors=text_kp_col)\n\n # stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\\\n # flatten(0, 1)\n # writer.log_image_grid(\n # stacked, 'kp_pred_text', train_idx, 3*vis_sample,\n # log_mean_img=False, nrow=3)\n\n # Log dense mapping from canonical space to Image space\n wheel = color_wheel_fast_smooth(res).permute(2, 0, 1).unsqueeze(0).to(device)\n colors = wheel.expand(vis_sample, -1, -1, -1)\n flow, _ = stn(all_imgs[src_idx])\n colors = F.grid_sample(colors, flow, padding_mode='border',\n align_corners=True)\n colors = map_minmax(colors, 0, 1, -1, 1)\n alpha = 0.5\n blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \\\n (all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]\n blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])\n writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),\n log_mean_img=False, nrow=len(blend_img)//2)\n\n # Log keypoints from Image space to canonical space\n if has_gt_kp:\n canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)\n canon_corrs = stn.unnormalize(canon_corrs, res, res)\n canon_vis = all_kps[..., -1]\n num_kp = canon_vis.size(-1)\n N = canon_vis.size(0)\n colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)\n heatmaps = splat_points(\n torch.ones(num_kp, 3, res, res, device=device) * -1,\n canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,\n colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))\n writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,\n num_kp, padding=2, pad_value=1.)\n\n # Log parts from Image space to canonical space\n # Splat one part at a time to canonical\n # TODO: splat all at once\n num_parts = dset.num_parts\n part_kp_canons = []\n part_kp_vis = [] \n for part in range(num_parts):\n part_masks = (parts == part).float().unsqueeze(1)\n kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)\n kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)\n kp_canon = stn.unnormalize(kp_canon, res, res)\n part_kp_canons.append(kp_canon.reshape(-1, 2))\n part_kp_vis.append(kp_vis.reshape(-1))\n\n part_kp_canons = torch.stack(part_kp_canons)\n part_kp_vis = torch.stack(part_kp_vis)\n colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)\n heatmaps = splat_points(\n torch.ones(num_parts, 3, res, res, device=device) * -1,\n part_kp_canons, sigma=2., opacity=1.,\n colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))\n writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,\n num_parts, padding=2, pad_value=1.)\n\n # Compute PCKs\n N = all_imgs.size(0)\n transfer_fn = stn.transfer_points\n pck_pairs = None\n if has_gt_kp:\n # First compute PCK for all 2-pairs\n if has_fixed_pairs:\n tuples = dset.fixed_pairs\n if dset.thresholds is not None:\n thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]\n else:\n thresholds = None\n else:\n tuples = sample_tuples(N)\n thresholds = None\n print(f\"First computing 2-point PCK for {len(tuples)} pairs\")\n gt_corrs, pred_corrs, vis = pck_loop(\n tuples, all_kps, transfer_fn, all_flows, all_masks, res,\n return_canon=False, is_flow=True)\n pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,\n img_size=res)\n\n # Compute k-cycle PCK\n pck_cycles = []\n if not has_gt_kp:\n kp, kp_vis, kp_col_dense = load_fg_points(all_masks,\n resolution=vis_denseres)\n ignore_idx = kp_vis.sum(dim=0) == 0\n all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)\n ignore_interim = True\n else:\n ignore_interim = False\n\n for k in [2, 3, 4]:\n tuples = sample_tuples(N, k=k, count=200)\n if has_fixed_pairs and dset.thresholds is not None:\n thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])\n thresholds = thresholds.reshape(-1)\n else:\n thresholds = None\n print(f\"Next computing {k}-cycle PCK for {len(tuples)} tuples\")\n gt_corrs, pred_corrs, vis = pck_loop(\n tuples, all_kps, transfer_fn, all_flows, all_masks, res,\n return_canon=False, is_flow=True, ignore_interim=ignore_interim)\n pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)\n pck_cycles.append(pck)\n\n return pck_pairs, pck_cycles" }, { "identifier": "get_rank", "path": "commons/distributed.py", "snippet": "def get_rank():\n if not dist.is_available():\n return 0\n\n if not dist.is_initialized():\n return 0\n\n return dist.get_rank()" }, { "identifier": "setup_distributed", "path": "commons/distributed.py", "snippet": "def setup_distributed():\n local_rank = int(os.environ['LOCAL_RANK']) if 'LOCAL_RANK' in os.environ else 0\n n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n is_distributed = n_gpu > 1\n if is_distributed:\n torch.cuda.set_device(local_rank)\n dist.init_process_group(backend=\"nccl\", init_method=\"env://\")\n synchronize()\n return is_distributed" }, { "identifier": "reduce_loss_dict", "path": "commons/distributed.py", "snippet": "def reduce_loss_dict(loss_dict):\n world_size = get_world_size()\n\n if world_size < 2:\n return loss_dict\n\n with torch.no_grad():\n keys = []\n losses = []\n\n for k in sorted(loss_dict.keys()):\n keys.append(k)\n losses.append(loss_dict[k])\n\n losses = torch.stack(losses, 0)\n dist.reduce(losses, dst=0)\n\n if dist.get_rank() == 0:\n losses /= world_size\n\n reduced_losses = {k: v for k, v in zip(keys, losses)}\n\n return reduced_losses" }, { "identifier": "get_world_size", "path": "commons/distributed.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n\n if not dist.is_initialized():\n return 1\n\n return dist.get_world_size()" }, { "identifier": "primary", "path": "commons/distributed.py", "snippet": "def primary():\n if not dist.is_available():\n return True\n\n if not dist.is_initialized():\n return True\n\n return get_rank() == 0" }, { "identifier": "sample_tuples", "path": "commons/utils.py", "snippet": "def sample_tuples(N, k=1, count=None, seed=None):\n\n if seed is not None:\n np.random.seed(seed)\n\n if count is None: # return all possible (k+1) permutations\n # (N!/(N-k)!) x k array\n samples = np.array(list(permutations(range(N), k+1)))\n\n elif k == 1:\n p1 = np.random.choice(N, count)\n p2 = np.random.choice(N, count)\n return np.stack([p1, p2], axis=1)\n\n elif count == -1:\n samples = np.array(list(permutations(range(N), k)))\n samples = np.concatenate([samples, samples[:, 0].reshape(-1, 1)], axis=1)\n\n else: # sample count number of permutations\n # count x k array\n samples = np.zeros((count, k+1), dtype=int)\n for i in range(count):\n samples[i, :k] = np.random.choice(N, k, replace=False)\n # Force the last column to be same as the first column\n samples[:, k] = samples[:, 0]\n\n return samples" }, { "identifier": "CUBDataset", "path": "datasets/cub.py", "snippet": "class CUBDataset(Dataset):\n def __init__(self, data_dir, split='test', img_size=256, cls_idx=1,\n flow_dir=None, num_parts=0,\n mask_threshold=1, use_coseg_masks=False, padding_mode='border'):\n super().__init__()\n self.img_size = img_size\n self.split = split\n self.cls_idx = cls_idx\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n self.fixed_pairs = None\n self.thresholds = None\n self.border = True if padding_mode=='border' else False\n\n os.makedirs(data_dir, exist_ok=True)\n download_cub(data_dir)\n download_cub_metadata(data_dir)\n\n self.files, self.bboxes, self.kps, self.masks = load_acsm_data(\n data_dir, size=img_size, split=split, cls_idx=cls_idx)\n\n imgs = []\n for i in range(len(self.files)):\n img = Image.open(self.files[i]).convert('RGB')\n img = cub_crop(img, self.img_size, self.bboxes[i], border=self.border)\n imgs.append(torch.from_numpy(np.array(img)).permute(2, 0, 1))\n self.imgs = torch.stack(imgs) / 127.5 - 1.0 # normalize (-1, 1)\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{Path(self.files[i]).stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) > mask_threshold).float()\n\n self.parts = None\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{Path(self.files[i]).stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n\n # Load pseudo keypoints\n self.pseudo_kps = None\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "InMemoryDataset", "path": "datasets/in_memory.py", "snippet": "class InMemoryDataset(Dataset):\n def __init__(self, data_dir, img_size=256, flow_dir=None,\n num_parts=0, mask_threshold=1, use_coseg_masks=False,\n every_k=1):\n\n self.img_size = img_size\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n\n normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n transform = transforms.Compose([\n transforms.Resize(img_size),\n transforms.CenterCrop(img_size),\n transforms.ToTensor(),\n normalize,\n ])\n\n files = []\n imgs = []\n for base_dir, dirnames, filenames in os.walk(data_dir):\n if len(dirnames) > 0:\n continue\n for f in sorted(filenames):\n if not f.lower().endswith(('.png', '.jpg', '.jpeg')):\n continue\n filename = Path(base_dir) / f\n files.append(filename)\n img = Image.open(filename).convert('RGB')\n imgs.append(transform(img))\n \n self.files = files[::every_k]\n self.imgs = torch.stack(imgs[::every_k])\n\n self.kps = None\n self.fixed_pairs = None\n self.thresholds = None\n self.pseudo_kps = None\n self.parts = None\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{self.files[i].stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) >= mask_threshold).float()\n\n # Load parts\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{self.files[i].stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n\n # Load pseudo keypoints\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "SpairDataset", "path": "datasets/spair.py", "snippet": "class SpairDataset(Dataset):\n def __init__(self, data_dir, split='test', img_size=256, spair_cat='cat',\n flow_dir=None, padding_mode='edge', num_parts=0,\n mask_threshold=1, use_coseg_masks=False):\n super().__init__()\n self.img_size = img_size\n self.split = split\n self.cat = spair_cat\n self.padding_mode = padding_mode\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n\n normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n transform = transforms.Compose([\n SquarePad(padding_mode),\n transforms.Resize(img_size),\n transforms.ToTensor(),\n normalize,\n ])\n\n os.makedirs(data_dir, exist_ok=True)\n spair_dir = download_spair(data_dir)\n\n self.files, self.kps, fixed_pairs, thresholds = load_spair_data(\n spair_dir, size=img_size, split=split, category=spair_cat)\n imgs = [transform(Image.open(self.files[i]).convert('RGB'))\n for i in range(len(self))]\n self.imgs = torch.stack(imgs)\n self.fixed_pairs = np.array(fixed_pairs)\n self.thresholds = np.array(thresholds)\n\n self.masks = torch.ones(len(self), 1, img_size, img_size)\n self.pseudo_kps = None\n self.parts = None\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{Path(self.files[i]).stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) >= mask_threshold).float()\n\n # Load parts\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{Path(self.files[i]).stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n \n # Load pseudo keypoints\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "Augmentor", "path": "datasets/utils.py", "snippet": "class Augmentor(nn.Module):\n def __init__(self, jitter=[0.4, 0.4, 0.2, 0.1], jitter_prob=0.8,\n gray_prob=0.2, solar_prob=0.2, tps_scale=0.4):\n super().__init__()\n self.color_transform = K.AugmentationSequential(\n # https://github.com/facebookresearch/dino/blob/main/main_dino.py#L424\n K.ColorJitter(brightness=jitter[0], contrast=jitter[1],\n saturation=jitter[2], hue=jitter[3], p=jitter_prob),\n K.RandomGrayscale(p=gray_prob),\n K.RandomGaussianBlur((3, 3), (0.1, 2.0), p=0.1),\n K.RandomSolarize(0.1, 0.1, p=solar_prob),\n )\n\n self.perspective_transform = K.RandomPerspective(0.5, p=1.)\n self.affine_transform = K.RandomAffine(30, scale=(0.7, 1.1),\n padding_mode='border', p=1.0)\n self.elastic_transform = K.RandomElasticTransform(\n p=1.0, sigma=(16., 16.), alpha=(3, 3), padding_mode='border')\n\n # TPS doesn't support transforming points\n # Using it only for dense equivariance loss\n self.tps_transform = K.RandomThinPlateSpline(scale=tps_scale, p=1.)\n\n def forward(self, x):\n pass\n\n @torch.no_grad()\n def forward_color(self, img):\n return self.color_transform(img)\n\n @torch.no_grad()\n def forward_tps(self, img, fixed=False):\n if fixed:\n img_t = self.tps_transform(img, params=self.tps_transform._params)\n else:\n img_t = self.tps_transform(img)\n return img_t\n \n @torch.no_grad()\n def forward_geom(self, img, fixed=False):\n if fixed:\n img_t = self.elastic_transform(\n self.affine_transform(img, params=self.affine_transform._params),\n params=self.elastic_transform._params)\n else:\n img_t = self.elastic_transform(self.affine_transform(img))\n return img_t\n\n\n @torch.no_grad()\n def forward_perspective(self, img, fixed=False):\n if fixed:\n img_t = self.perspective_transform(img, params=self.perspective_transform._params)\n else:\n img_t = self.perspective_transform(img)\n return img_t\n\n @torch.no_grad()\n def forward_perspective_kp(self, kp):\n return kornia.geometry.transform_points(\n self.perspective_transform.transform_matrix, kp)" }, { "identifier": "accumulate", "path": "models/utils.py", "snippet": "def accumulate(model1, model2, decay=0.999):\n par1 = dict(model1.named_parameters())\n par2 = dict(model2.named_parameters())\n\n for k in par1.keys():\n par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)" }, { "identifier": "requires_grad", "path": "models/utils.py", "snippet": "def requires_grad(model, flag=True):\n for p in model.parameters():\n p.requires_grad = flag" }, { "identifier": "Canonical", "path": "models/canonical.py", "snippet": "class Canonical(nn.Module):\n def __init__(self, size, std=0.1, clamp=True):\n super().__init__()\n mean = torch.zeros(size)\n std = torch.ones(size) * std\n self.grid = nn.Parameter(torch.normal(mean=mean, std=std),\n requires_grad=True)\n norm_class = Normalize()\n norm_class.apply(self.grid)\n if clamp:\n clamp_class = Clamp()\n clamp_class.apply(self.grid)\n\n def get_grid(self, N):\n return self.grid.expand(N, -1, -1, -1)\n\n def unwarp(self, flow, sample_res=256):\n N = flow.size(0)\n if sample_res is not None and sample_res != flow.size(1):\n scale_factor = sample_res / flow.size(1)\n sample_flow = F.interpolate(\n flow.permute(0, 3, 1, 2), scale_factor=scale_factor,\n mode='bilinear').permute(0, 2, 3, 1)\n else:\n sample_flow = flow\n warped_img = F.grid_sample(\n self.get_grid(N), sample_flow,\n padding_mode='border', align_corners=True)\n return warped_img\n\n def forward(self, x):\n return x" }, { "identifier": "CanonicalMLP", "path": "models/canonical.py", "snippet": "class CanonicalMLP(nn.Module):\n def __init__(self, input_dim=2, output_dim=3, hidden_dim=256,\n use_positional=True, positional_dim=10,\n skip_layers=[4, 7], num_layers=8, resolution=256,\n use_tanh=True, apply_softmax=False):\n super().__init__()\n self.use_tanh = use_tanh\n self.resolution = resolution\n self.apply_softmax = apply_softmax\n self.output_dim = output_dim\n if apply_softmax:\n self.softmax= nn.Softmax()\n if use_positional:\n encoding_dimensions = 2 * input_dim * positional_dim\n self.b = nn.Parameter(\n torch.tensor([(2 ** j) * np.pi\n for j in range(positional_dim)], requires_grad = False))\n else:\n encoding_dimensions = input_dim\n\n self.hidden = nn.ModuleList()\n for i in range(num_layers):\n if i == 0:\n input_dims = encoding_dimensions\n elif i in skip_layers:\n input_dims = hidden_dim + encoding_dimensions\n else:\n input_dims = hidden_dim\n\n if i == num_layers - 1:\n # last layer\n self.hidden.append(nn.Linear(input_dims, output_dim, bias=True))\n else:\n self.hidden.append(nn.Linear(input_dims, hidden_dim, bias=True))\n\n self.skip_layers = skip_layers\n self.num_layers = num_layers\n\n self.positional_dim = positional_dim\n self.use_positional = use_positional\n\n def get_grid(self, N, device='cuda'):\n resolution = self.resolution\n indsy = torch.linspace(0, resolution-1, resolution, device=device)\n indsx = torch.linspace(0, resolution-1, resolution, device=device)\n\n # Keep (x, y) indexing to make it consistent with the flow\n points = torch.stack(\n torch.meshgrid(indsx, indsy, indexing='xy'), dim=-1).reshape(-1, 2)\n\n with torch.no_grad():\n grid = self(points)\n\n grid = grid.reshape(1, resolution, resolution, self.output_dim)\n grid = grid.permute(0, 3, 1, 2)\n return grid.expand(N, -1, -1, -1)\n\n def unwarp(self, flow, sample_res=256):\n N = flow.size(0)\n # Output of flow model is usually normalized between -1 and 1\n # So we need to first scale it up to self.resolution\n flow = map_minmax(flow, -1, 1, 0, self.resolution-1)\n\n # Resize flow if computed at a lower resolution\n if sample_res is not None and sample_res != flow.size(1):\n scale_factor = sample_res / flow.size(1)\n sample_flow = F.interpolate(\n flow.permute(0, 3, 1, 2), scale_factor=scale_factor,\n mode='bilinear').permute(0, 2, 3, 1)\n else:\n sample_flow = flow\n\n # Unwarp\n warped_img = self(sample_flow.reshape(-1, 2))\n warped_img = warped_img.reshape(N, sample_res, sample_res, -1)\n warped_img = warped_img.permute(0, 3, 1, 2)\n return warped_img\n\n def forward(self, x):\n if self.use_positional:\n if self.b.device != x.device:\n self.b = self.b.to(x.device)\n pos = positionalEncoding_vec(x, self.b)\n x = pos\n\n input = x.detach().clone()\n for i, layer in enumerate(self.hidden):\n if i > 0:\n x = F.relu(x)\n if i in self.skip_layers:\n x = torch.cat((x, input), 1)\n x = layer(x)\n\n if self.use_tanh:\n x = torch.tanh(x)\n\n if self.apply_softmax:\n x = self.softmax(x)\n return x" }, { "identifier": "Asic", "path": "models/asic.py", "snippet": "class Asic(nn.Module):\n def __init__(self, in_ch, in_size, mf=1., bilinear=False,\n padding_mode='zeros', use_tanh=False):\n super().__init__()\n self.model = UNet(in_ch, 2, mf=mf, bilinear=bilinear)\n self.size = in_size\n self.register_buffer('identity_flow', self.get_identity_flow())\n self.padding_mode = padding_mode\n self.use_tanh = use_tanh\n\n def get_identity_flow(self):\n return F.affine_grid(\n torch.eye(2, 3).unsqueeze(0), (1, 1, self.size, self.size),\n align_corners=True).permute(0, 3, 1, 2).contiguous()\n\n def forward(self, x):\n if self.use_tanh:\n flow = torch.tanh(self.model(x))\n delta_flow = flow - self.identity_flow\n else:\n delta_flow = self.model(x) # (N, 2, H, W)\n flow = self.identity_flow + delta_flow\n\n flow = flow.permute(0, 2, 3, 1)\n delta_flow = delta_flow.permute(0, 2, 3, 1)\n return flow, delta_flow\n\n @torch.no_grad()\n def transfer_points(self, src_kps, src_idx, trg_idx, img, mask=None,\n res=None, return_canon=False, is_flow=False):\n # src_kps are N x P x 2 (in xy format)\n\n # Compute flow from images\n if is_flow:\n flow = img\n else:\n flow, _ = self(img)\n\n # Step 1: Map the points in src to the canonical space\n max_batch_size = 2\n if src_kps.size(0) > max_batch_size:\n N = len(src_kps)\n points_canon = []\n for start_idx in range(0, N, max_batch_size):\n end_idx = min(start_idx+max_batch_size, N)\n\n points_canon_batch = self.transfer_forward(\n flow[src_idx[start_idx:end_idx]],\n src_kps[start_idx:end_idx], res=res, is_flow=True)\n points_canon.append(points_canon_batch)\n points_canon = torch.cat(points_canon, dim=0)\n else:\n points_canon = self.transfer_forward(flow[src_idx], src_kps,\n res=res, is_flow=True)\n # points_canon = torch.clamp(points_canon, min=-1, max=1)\n\n # Step 2: Map the points in the canonical space to trg\n # This is a memory intensive step, so do a single image at a time\n # if the number of points are large\n if src_kps.size(1) > 256 or src_kps.size(0) > max_batch_size:\n N = len(src_kps)\n points_transfered = []\n for start_idx in range(0, N, max_batch_size):\n end_idx = min(start_idx+max_batch_size, N)\n points_transfered_single = self.transfer_reverse(\n flow[[trg_idx[start_idx:end_idx]]],\n points_canon[start_idx:end_idx], res=res,\n mask=mask[trg_idx[start_idx:end_idx]], is_flow=True)\n points_transfered.append(points_transfered_single)\n points_transfered = torch.cat(points_transfered, dim=0)\n else:\n points_transfered = self.transfer_reverse(\n flow[trg_idx], points_canon, res=res, mask=mask[trg_idx],\n is_flow=True)\n\n if return_canon:\n points_canon = self.unnormalize(points_canon, res, res)\n return points_transfered, points_canon\n else:\n return points_transfered\n\n def transfer_forward(self, img, points, res=None, is_flow=False):\n\n # TODO: currently points generated by load_fg_points are not\n # scaled properly. Take a look\n # TODO: Also double check normalize and unnormalize logic\n # points are N x P x 2 (in xy format)\n # assume that the flow is also xy format\n points = self.normalize(points, res, res)\n if is_flow:\n flow = img\n else:\n flow, _ = self(img)\n flow_grid = flow.permute(0, 3, 1, 2)\n points_transfered = F.grid_sample(\n flow_grid, points.unsqueeze(2).float(),\n padding_mode='border', align_corners=True)\n points_transfered = points_transfered.squeeze(3).permute(0, 2, 1)\n\n return points_transfered\n\n def transfer_reverse(self, img, points, res=None, mask=None, is_flow=False):\n N = points.size(0)\n num_points = points.size(1)\n # points are N x P x 2 (in xy format)\n points = points\n if is_flow:\n flow = img\n else:\n flow, _ = self(img)\n if flow.size(1) != res:\n scale_factor = res/flow.size(1)\n flow = F.interpolate(\n flow.permute(0, 3, 1, 2),\n scale_factor=scale_factor,\n mode='bilinear').permute(0, 2, 3, 1)\n # From (N, H, W, 2) to (N, H, W, 1, 1, 2)\n flow_reshaped = flow.unsqueeze(-2).unsqueeze(-2)\n\n # From (N, num_points, 2) to (N, 1, 1, num_points, 2, 1)\n points = points.unsqueeze(1).unsqueeze(1).unsqueeze(-1)\n\n # (N, H, W, num_points)\n similarities = (flow_reshaped @ points)[..., 0, 0]\n distances = points.pow(2).squeeze(-1).sum(dim=-1) + \\\n flow_reshaped.pow(2).sum(dim=-1).squeeze(-1) - 2 * similarities\n\n if mask is not None:\n distances[mask.squeeze(1)<0.1] = float('inf')\n\n nearest_neighbors = distances.reshape(\n N, flow_reshaped.size(1) * flow_reshaped.size(2),\n num_points).argmin(dim=1)\n points_transfered = unravel_index(\n nearest_neighbors, (flow_reshaped.size(1), flow_reshaped.size(2)))\n return points_transfered\n\n @staticmethod\n def normalize(points, res, out_res):\n return points.div(out_res - 1).add(-0.5).mul(2).mul((res - 1) / res)\n\n @staticmethod\n def unnormalize(points, res, out_res):\n return points.div((res - 1) / res).div(2).add(0.5).mul(out_res - 1)" }, { "identifier": "total_variation_loss", "path": "losses/reg_losses.py", "snippet": "def total_variation_loss(delta_flow, reduce_batch=True):\n # flow should be size (N, H, W, 2)\n reduce_dims = (0, 1, 2, 3) if reduce_batch else (1, 2, 3)\n distance_fn = lambda a: torch.where(a <= 1.0, 0.5 * a.pow(2), a - 0.5).mean(dim=reduce_dims)\n # assert delta_flow.size(-1) == 2\n diff_y = distance_fn((delta_flow[:, :-1, :, :] - delta_flow[:, 1:, :, :]).abs())\n diff_x = distance_fn((delta_flow[:, :, :-1, :] - delta_flow[:, :, 1:, :]).abs())\n loss = diff_x + diff_y\n return loss" }, { "identifier": "get_perceptual_loss", "path": "thirdparty/lpips/lpips.py", "snippet": "def get_perceptual_loss(loss_fn, device):\n if loss_fn == 'vgg_ssl':\n download_model('simclr_vgg_phase150') # Download the weights\n loss_fn_vgg = LPIPS(net='vgg', lpips=False, pnet_rand=True, pretrained_weights='pretrained/simclr_vgg_phase150.pt').to(device)\n loss_fn = lambda x,y: loss_fn_vgg(x, y) / 18.0\n elif loss_fn == 'lpips':\n download_lpips() # Download LPIPS weights\n loss_fn = LPIPS(net='vgg').to(device)\n else:\n raise NotImplementedError\n return loss_fn" }, { "identifier": "LossCorrsSparse", "path": "losses/matching_losses.py", "snippet": "class LossCorrsSparse(nn.Module):\n def __init__(self, extractor=None, flow_size=256, T=1.0):\n super().__init__()\n self.extractor = extractor\n self.flow_size = flow_size\n self.T = T\n self.dist_fn = nn.PairwiseDistance(p=2)\n self.loss_fn = nn.CrossEntropyLoss(reduction='none')\n\n def forward(self, src_flow, trg_flow, src_kp, trg_kp, kp_vis, kp_wt):\n N = src_flow.size(0)\n res = src_flow.size(1)\n top_k = kp_vis.shape[1]\n # bb1_canon - N x 2 x top_k x 1\n # bb2_canon - N x 2 x 1 x top_k\n # Sample flow values using the pseudo GT from the flow_grid\n src_kp_canon = F.grid_sample(\n src_flow.permute(0, 3, 1, 2),\n map_minmax(src_kp.unsqueeze(2), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n trg_kp_canon = F.grid_sample(\n trg_flow.permute(0, 3, 1, 2),\n map_minmax(trg_kp.unsqueeze(1), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n\n # dists - N x top_k x top_k\n dists1 = self.dist_fn(src_kp_canon, trg_kp_canon.detach()) * (-1/self.T)\n dists2 = self.dist_fn(src_kp_canon.detach(), trg_kp_canon) * (-1/self.T)\n labels = torch.arange(top_k, dtype=torch.long, device='cuda')\n labels = labels.unsqueeze(0).repeat(N, 1)\n labels[~kp_vis] = -100\n \n loss = self.loss_fn(dists1, labels) + self.loss_fn(dists2, labels)\n loss *= kp_wt\n return loss.sum() / kp_vis.sum()\n\n def forward_eq(self, src_flow, trg_flow, src_kp, trg_kp, kp_vis):\n N = src_flow.size(0)\n res = src_flow.size(1)\n top_k = kp_vis.shape[1]\n # bb1_canon - N x 2 x top_k x 1\n # bb2_canon - N x 2 x 1 x top_k\n # Sample flow values using the pseudo GT from the flow_grid\n src_kp_canon = F.grid_sample(\n src_flow.permute(0, 3, 1, 2),\n map_minmax(src_kp.unsqueeze(2), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n trg_kp_canon = F.grid_sample(\n trg_flow.permute(0, 3, 1, 2),\n map_minmax(trg_kp.unsqueeze(1), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n\n # dists - N x top_k x top_k\n dists1 = self.dist_fn(src_kp_canon, trg_kp_canon.detach()) * (-1/self.T)\n dists2 = self.dist_fn(src_kp_canon.detach(), trg_kp_canon) * (-1/self.T)\n labels = torch.arange(top_k, dtype=torch.long, device='cuda')\n labels = labels.unsqueeze(0).repeat(N, 1)\n labels[~kp_vis] = -100\n return self.loss_fn(dists1, labels).mean() + self.loss_fn(dists2, labels).mean()" }, { "identifier": "DecayingCosineAnnealingWarmRestarts", "path": "thirdparty/gangealing/annealing.py", "snippet": "class DecayingCosineAnnealingWarmRestarts(_LRScheduler):\n r\"\"\"Set the learning rate of each parameter group using a cosine annealing\n schedule, where :math:`\\eta_{max}` is set to the initial lr,\n :math:`T_{cur}` is the number of epochs since the last restart and\n :math:`T_{i}` is the number of epochs between two warm restarts in SGDR:\n .. math::\n \\eta_t = \\eta_{min} + \\frac{1}{2}(\\eta_{max} - \\eta_{min})\\left(1 +\n \\cos\\left(\\frac{T_{cur}}{T_{i}}\\pi\\right)\\right)\n When :math:`T_{cur}=T_{i}`, set :math:`\\eta_t = \\eta_{min}`.\n When :math:`T_{cur}=0` after restart, set :math:`\\eta_t=\\eta_{max}`.\n It has been proposed in\n `SGDR: Stochastic Gradient Descent with Warm Restarts`_.\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n T_0 (int): Number of iterations for the first restart.\n T_mult (int, optional): A factor increases :math:`T_{i}` after a\n restart. Default: 1.\n eta_min (float, optional): Minimum learning rate. Default: 0.\n last_epoch (int, optional): The index of last epoch. Default: -1.\n .. _SGDR\\: Stochastic Gradient Descent with Warm Restarts:\n https://arxiv.org/abs/1608.03983\n \"\"\"\n\n def __init__(self, optimizer, T_0, decay=0.9, T_mult=1, eta_min=0,\n last_epoch=-1):\n if T_0 <= 0 or not isinstance(T_0, int):\n raise ValueError(f\"Expected positive integer T_0, but got {T_0}\")\n if T_mult < 1 or not isinstance(T_mult, int):\n raise ValueError(f\"Expected integer T_mult >= 1, but got {T_mult}\")\n self.T_0 = T_0\n self.T_i = T_0\n self.T_mult = T_mult\n self.eta_min = eta_min\n self.decay = decay\n self.cur_decay = 1.0\n\n super(DecayingCosineAnnealingWarmRestarts, self).__init__(optimizer,\n last_epoch)\n\n self.T_cur = self.last_epoch\n\n def get_lr(self):\n if not self._get_lr_called_within_step:\n warnings.warn(\"To get the last learning rate computed by the \"\n \"scheduler, use `get_last_lr()`.\", UserWarning)\n\n return [self.cur_decay * (self.eta_min + (base_lr - self.eta_min) *\n (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2)\n for base_lr in self.base_lrs]\n\n def step(self, epoch=None):\n \"\"\"Step could be called after every batch update\"\"\"\n\n if epoch is None and self.last_epoch < 0:\n epoch = 0\n\n if epoch is None:\n epoch = self.last_epoch + 1\n self.T_cur = self.T_cur + 1\n if self.T_cur >= self.T_i:\n self.T_cur = self.T_cur - self.T_i\n self.T_i = self.T_i * self.T_mult\n else:\n if epoch < 0:\n raise ValueError(f\"Expected non-negative epoch, got {epoch}\")\n if epoch >= self.T_0:\n if self.T_mult == 1:\n self.T_cur = epoch % self.T_0\n n = int(epoch // self.T_0)\n else:\n n = int(math.log((epoch / self.T_0 * (self.T_mult - 1)\n + 1), self.T_mult))\n self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / \\\n (self.T_mult - 1)\n self.T_i = self.T_0 * self.T_mult ** (n)\n else:\n self.T_i = self.T_0\n self.T_cur = epoch\n n = 0\n self.cur_decay = self.decay ** n\n self.last_epoch = math.floor(epoch)\n\n class _enable_get_lr_call:\n\n def __init__(self, o):\n self.o = o\n\n def __enter__(self):\n self.o._get_lr_called_within_step = True\n return self\n\n def __exit__(self, type, value, traceback):\n self.o._get_lr_called_within_step = False\n return self\n\n with _enable_get_lr_call(self):\n for param_group, lr in zip(self.optimizer.param_groups,\n self.get_lr()):\n param_group['lr'] = lr\n\n self._last_lr = [group['lr'] for group in self.optimizer.param_groups]" }, { "identifier": "lr_cycle_iters", "path": "thirdparty/gangealing/annealing.py", "snippet": "def lr_cycle_iters(anneal_psi, period, iter, tm):\n zero_lr_iters = [anneal_psi - 1]\n num_cycles = int(math.log((iter - anneal_psi) / period, tm))\n for n in range(num_cycles):\n step = zero_lr_iters[-1] + period * tm ** n\n zero_lr_iters.append(int(step))\n print(f'Learning Rate Cycles: {zero_lr_iters}')\n return zero_lr_iters" } ]
import argparse import torch import numpy as np import json import os import torch.nn.functional as F import wandb from torch import nn, optim from tqdm import tqdm from pathlib import Path from commons.logger import Logger, log_visuals from commons.distributed import get_rank, setup_distributed, reduce_loss_dict,\ get_world_size, primary from commons.utils import sample_tuples from datasets.cub import CUBDataset from datasets.in_memory import InMemoryDataset from datasets.spair import SpairDataset from datasets.utils import Augmentor from models.utils import accumulate, requires_grad from models.canonical import Canonical, CanonicalMLP from models.asic import Asic from losses.reg_losses import total_variation_loss from thirdparty.lpips.lpips import get_perceptual_loss from losses.matching_losses import LossCorrsSparse from thirdparty.gangealing.annealing import DecayingCosineAnnealingWarmRestarts,\ lr_cycle_iters
14,383
def save_state_dict(ckpt_name, c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, step, add_step_to_name=False): ckpt_dict = { "canon": c_module.state_dict(), "t": t_module.state_dict(), "c_ema": c_ema.state_dict(), "t_ema": t_ema.state_dict(), "t_optim": t_optim.state_dict(), "t_sched": t_sched.state_dict(), "canon_optim": canon_optim.state_dict() if canon_optim is not None else None, "canon_sched": canon_sched.state_dict() if canon_sched is not None else None, "args": args, "iter": step } torch.save(ckpt_dict, f'{results_path}/{ckpt_name}.pt') if add_step_to_name: torch.save(ckpt_dict, f'{results_path}/{ckpt_name}_{step:07d}.pt') def count_parameters(model):
def save_state_dict(ckpt_name, c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, step, add_step_to_name=False): ckpt_dict = { "canon": c_module.state_dict(), "t": t_module.state_dict(), "c_ema": c_ema.state_dict(), "t_ema": t_ema.state_dict(), "t_optim": t_optim.state_dict(), "t_sched": t_sched.state_dict(), "canon_optim": canon_optim.state_dict() if canon_optim is not None else None, "canon_sched": canon_sched.state_dict() if canon_sched is not None else None, "args": args, "iter": step } torch.save(ckpt_dict, f'{results_path}/{ckpt_name}.pt') if add_step_to_name: torch.save(ckpt_dict, f'{results_path}/{ckpt_name}_{step:07d}.pt') def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
13
2023-11-14 16:43:16+00:00
16k
doodledood/chat-flock
chatflock/use_cases/bshr.py
[ { "identifier": "InMemoryChatDataBackingStore", "path": "chatflock/backing_stores/in_memory.py", "snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __init__(\n self, messages: Optional[List[ChatMessage]] = None, participants: Optional[List[ChatParticipant]] = None\n ):\n self.messages = messages or []\n self.participants = {participant.name: participant for participant in (participants or [])}\n self.last_message_id = None if len(self.messages) == 0 else self.messages[-1].id\n\n def get_messages(self) -> List[ChatMessage]:\n return self.messages\n\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime.datetime] = None) -> ChatMessage:\n self.last_message_id = self.last_message_id + 1 if self.last_message_id is not None else 1\n\n message = ChatMessage(\n id=self.last_message_id,\n sender_name=sender_name,\n content=content,\n timestamp=timestamp or datetime.datetime.now(),\n )\n\n self.messages.append(message)\n\n return message\n\n def clear_messages(self):\n self.messages = []\n self.last_message_id = None\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n participants = list(self.participants.values())\n active_participants = [\n participant for participant in participants if isinstance(participant, ActiveChatParticipant)\n ]\n\n return active_participants\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n participants = list(self.participants.values())\n participants = [\n participant for participant in participants if not isinstance(participant, ActiveChatParticipant)\n ]\n\n return participants\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if not isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if participant.name in self.participants:\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.participants[participant.name] = participant\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n if participant.name not in self.participants:\n raise ChatParticipantNotJoinedToChatError(participant.name)\n\n self.participants.pop(participant.name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return isinstance(participant, ActiveChatParticipant)\n\n return False\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return not isinstance(participant, ActiveChatParticipant)\n\n return False" }, { "identifier": "LangChainMemoryBasedChatDataBackingStore", "path": "chatflock/backing_stores/langchain.py", "snippet": "class LangChainMemoryBasedChatDataBackingStore(InMemoryChatDataBackingStore):\n no_output_message: str = \"##NO_OUTPUT##\"\n\n def __init__(\n self,\n memory: BaseChatMemory,\n memory_key_getter: Optional[Callable[[BaseChatMemory], str]] = None,\n messages: Optional[List[ChatMessage]] = None,\n include_timestamp_in_messages: bool = False,\n participants: Optional[List[ChatParticipant]] = None,\n ):\n super().__init__(participants=participants)\n\n self.memory = memory\n self.include_timestamp_in_messages = include_timestamp_in_messages\n\n if memory_key_getter is None:\n\n def default_memory_key_getter(memory: BaseChatMemory) -> str:\n if hasattr(memory, \"memory_key\"):\n return str(memory.memory_key)\n\n return self.memory.output_key or \"history\"\n\n self.memory_key_getter: Callable[[BaseChatMemory], str] = default_memory_key_getter\n else:\n self.memory_key_getter = memory_key_getter\n\n def get_messages(self) -> List[ChatMessage]:\n prev_return_messages = self.memory.return_messages\n\n self.memory.return_messages = True\n\n memory_key = self.memory_key_getter(self.memory)\n base_messages = self.memory.load_memory_variables({})[memory_key]\n chat_messages = [\n base_message_to_chat_message(base_message)\n for base_message in base_messages\n if base_message.content != self.no_output_message\n ]\n\n self.memory.return_messages = prev_return_messages\n\n return chat_messages\n\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime.datetime] = None) -> ChatMessage:\n message = super().add_message(sender_name=sender_name, content=content)\n\n prefix = \"\"\n if self.include_timestamp_in_messages:\n pretty_datetime = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n prefix = f\"[{pretty_datetime}] \"\n\n self.memory.save_context(\n {\"input\": f\"{prefix}{message.id}. {message.sender_name}: {message.content}\"},\n {\"output\": self.no_output_message},\n )\n\n return message\n\n def clear_messages(self):\n super().clear_messages()\n\n self.memory.clear()" }, { "identifier": "Chat", "path": "chatflock/base.py", "snippet": "class Chat:\n backing_store: ChatDataBackingStore\n renderer: ChatRenderer\n name: Optional[str] = None\n max_total_messages: Optional[int] = None\n hide_messages: bool = False\n\n def __init__(\n self,\n backing_store: ChatDataBackingStore,\n renderer: ChatRenderer,\n initial_participants: Optional[Sequence[ChatParticipant]] = None,\n name: Optional[str] = None,\n max_total_messages: Optional[int] = None,\n hide_messages: bool = False,\n ):\n if max_total_messages is not None and max_total_messages <= 0:\n raise ValueError(\"Max total messages must be None or greater than 0.\")\n\n self.backing_store = backing_store\n self.renderer = renderer\n self.name = name\n self.hide_messages = hide_messages\n self.max_total_messages = max_total_messages\n\n for i, participant in enumerate(initial_participants or []):\n self.add_participant(participant)\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if self.has_active_participant_with_name(participant.name) or self.has_non_active_participant_with_name(\n participant.name\n ):\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.backing_store.add_participant(participant)\n\n all_participants = (\n self.backing_store.get_active_participants() + self.backing_store.get_non_active_participants()\n )\n for participant in all_participants:\n participant.on_participant_joined_chat(chat=self, participant=participant)\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n self.backing_store.remove_participant(participant)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_participant_left_chat(chat=self, participant=participant)\n\n def add_message(self, sender_name: str, content: str) -> None:\n sender = self.backing_store.get_active_participant_by_name(sender_name)\n if sender is None:\n raise ChatParticipantNotJoinedToChatError(sender_name)\n\n message = self.backing_store.add_message(sender_name=sender_name, content=content)\n\n self.renderer.render_new_chat_message(chat=self, message=message)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_new_chat_message(chat=self, message=message)\n\n def get_messages(self) -> List[ChatMessage]:\n return self.backing_store.get_messages()\n\n def clear_messages(self):\n self.backing_store.clear_messages()\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n return self.backing_store.get_active_participants()\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n return self.backing_store.get_non_active_participants()\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n return self.backing_store.get_active_participant_by_name(name=name)\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n return self.backing_store.get_non_active_participant_by_name(name=name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_active_participant_with_name(participant_name=participant_name)\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_non_active_participant_with_name(participant_name=participant_name)\n\n @property\n def active_participants_str(self):\n return \"\\n\\n\".join([participant.detailed_str() for participant in self.get_active_participants()])" }, { "identifier": "ChatDataBackingStore", "path": "chatflock/base.py", "snippet": "class ChatDataBackingStore(abc.ABC):\n @abc.abstractmethod\n def get_messages(self) -> List[ChatMessage]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime] = None) -> ChatMessage:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def clear_messages(self) -> None:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_non_active_participants(self) -> List[ChatParticipant]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def add_participant(self, participant: ChatParticipant) -> None:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def remove_participant(self, participant: ChatParticipant) -> None:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n raise NotImplementedError()" }, { "identifier": "RoundRobinChatConductor", "path": "chatflock/conductors/round_robin.py", "snippet": "class RoundRobinChatConductor(ChatConductor):\n def select_next_speaker(self, chat: Chat) -> Optional[ActiveChatParticipant]:\n active_participants = chat.get_active_participants()\n if len(active_participants) <= 0:\n return None\n\n messages = chat.get_messages()\n last_message = messages[-1] if len(messages) > 0 else None\n\n if last_message is not None and self.is_termination_message(last_message):\n return None\n\n last_speaker = last_message.sender_name if last_message is not None else None\n if last_speaker is None:\n return next(iter(active_participants))\n\n # Rotate to the next participant in the list.\n participant_names = [participant.name for participant in active_participants]\n\n if last_speaker not in participant_names:\n next_speaker_name = participant_names[0]\n else:\n last_speaker_index = participant_names.index(last_speaker)\n next_speaker_index = (last_speaker_index + 1) % len(participant_names)\n next_speaker_name = participant_names[next_speaker_index]\n\n next_speaker = chat.get_active_participant_by_name(next_speaker_name)\n if next_speaker is None or not isinstance(next_speaker, ActiveChatParticipant):\n raise ChatParticipantNotJoinedToChatError(next_speaker_name)\n\n return next_speaker\n\n def get_chat_result(self, chat: \"Chat\") -> str:\n result = super().get_chat_result(chat=chat)\n\n try:\n idx = result.rindex(\"TERMINATE\")\n result = result[:idx].strip()\n except ValueError:\n result = result.strip()\n\n return result\n\n def is_termination_message(self, message: ChatMessage) -> bool:\n return message.content.strip().endswith(\"TERMINATE\")" }, { "identifier": "chat_messages_to_pydantic", "path": "chatflock/parsing_utils.py", "snippet": "def chat_messages_to_pydantic(\n chat_messages: Sequence[ChatMessage],\n chat_model: BaseChatModel,\n output_schema: Type[TOutputSchema],\n spinner: Optional[Halo] = None,\n n_tries: int = 3,\n hide_message: bool = True,\n) -> TOutputSchema:\n text_to_json_ai = LangChainBasedAIChatParticipant(\n chat_model=chat_model,\n name=\"Jason\",\n role=\"JSON Converter\",\n symbol=\"📄\",\n personal_mission=\"Your only purpose is to convert the previous chat messages (usually the last one)\"\n \"to a valid and logical JSON that follows the JSON SCHEMA provided. Your message should \"\n \"include only correct JSON. No fluff.\",\n other_prompt_sections=[Section(name=\"JSON SCHEMA\", text=str(pydantic_to_json_schema(output_schema)))],\n ignore_group_chat_environment=True,\n spinner=spinner,\n )\n json_parser = JSONOutputParserChatParticipant(output_schema=output_schema)\n\n # Remove TERMINATE if present so the chat conductor doesn't end the chat prematurely\n if len(chat_messages) > 0:\n chat_messages = list(chat_messages).copy()\n last_message = chat_messages[-1]\n\n try:\n # Chop the content at the last instance of the word TERMINATE in the content\n idx = last_message.content.rindex(\"TERMINATE\")\n new_content = last_message.content[:idx].strip()\n\n last_message = ChatMessage(id=last_message.id, sender_name=last_message.sender_name, content=new_content)\n\n chat_messages[-1] = last_message\n except ValueError:\n pass\n\n parser_chat = Chat(\n backing_store=InMemoryChatDataBackingStore(messages=list(chat_messages)),\n renderer=NoChatRenderer(),\n initial_participants=[text_to_json_ai, json_parser],\n hide_messages=hide_message,\n max_total_messages=len(chat_messages) + 1 + (n_tries - 1) * 2,\n )\n conductor = RoundRobinChatConductor()\n\n _ = conductor.initiate_dialog(chat=parser_chat)\n\n if json_parser.output is None:\n raise MessageCouldNotBeParsedError(\"An output could not be parsed from the chat messages.\")\n\n return json_parser.output" }, { "identifier": "LangChainBasedAIChatParticipant", "path": "chatflock/participants/langchain.py", "snippet": "class LangChainBasedAIChatParticipant(ActiveChatParticipant):\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self,\n name: str,\n chat_model: BaseChatModel,\n symbol: str = \"🤖\",\n role: str = \"AI Assistant\",\n personal_mission: str = \"Be a helpful AI assistant.\",\n other_prompt_sections: Optional[List[Section]] = None,\n retriever: Optional[BaseRetriever] = None,\n tools: Optional[List[BaseTool]] = None,\n chat_model_args: Optional[Dict[str, Any]] = None,\n spinner: Optional[Halo] = None,\n ignore_group_chat_environment: bool = False,\n include_timestamp_in_messages: bool = False,\n **kwargs: Any,\n ):\n super().__init__(name=name, symbol=symbol, **kwargs)\n\n self.role = role\n self.chat_model = chat_model\n self.chat_model_args = chat_model_args or {}\n self.other_prompt_sections = other_prompt_sections or []\n self.ignore_group_chat_environment = ignore_group_chat_environment\n self.include_timestamp_in_messages = include_timestamp_in_messages\n self.retriever = retriever\n self.tools = tools\n self.spinner = spinner\n self.personal_mission = personal_mission\n\n def create_system_message(self, chat: \"Chat\", relevant_docs: Sequence[Document]) -> str:\n now = datetime.now()\n pretty_datetime = now.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n base_sections = [\n Section(name=\"Current Time\", text=pretty_datetime),\n Section(name=\"Name\", text=self.name),\n Section(name=\"Role\", text=self.role),\n Section(name=\"Personal Mission\", text=self.personal_mission),\n Section(\n name=\"Additional Context for Response\",\n text=\"None\"\n if len(relevant_docs) == 0\n else \"The following documents may be relevant for your response, only use \"\n \"them for context for a better response, if applicable\",\n sub_sections=[\n Section(name=f\"Document {i + 1}\", text=f\"```{doc.page_content}```\")\n for i, doc in enumerate(relevant_docs)\n ],\n ),\n Section(\n name=\"Response Message Format\",\n list=[\n \"Your response should be the message you want to send to the group chat as your own name, \"\n \"role, and personal mission.\",\n \"Must not include any prefix (e.g., timestamp, sender name, etc.).\",\n \"Response must be a message as will be shown in the chat (timestamp and sender name are \"\n \"system-generated for you).\",\n ],\n sub_sections=[\n Section(name=\"Well-Formatted Chat Response Examples\", list=['\"Hello, how are you?\"']),\n Section(\n name=\"Badly-Formatted Chat Response Examples\",\n list=[\n (\n '\"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else '\"John: Hello, how are you?\"'\n ),\n ],\n ),\n ],\n ),\n ]\n\n active_participants = chat.get_active_participants()\n if self.ignore_group_chat_environment:\n system_message = StructuredString(sections=[*base_sections, *self.other_prompt_sections])\n else:\n system_message = StructuredString(\n sections=[\n *base_sections,\n Section(\n name=\"Chat\",\n sub_sections=[\n Section(name=\"Name\", text=chat.name or \"No name provided. Just a general chat.\"),\n Section(\n name=\"Participants\",\n text=\"\\n\".join(\n [\n f'- {str(p)}{\" -> This is you.\" if p.name == self.name else \"\"}'\n for p in active_participants\n ]\n ),\n ),\n Section(\n name=\"Guidelines\",\n list=[\n \"Your personal mission is the most important thing to you. You should always \"\n \"prioritize it.\",\n \"If a chat goal is provided, you should still follow your personal mission but \"\n \"in a way that helps the group achieve the chat goal.\",\n \"If you are the only participant in the chat, you should act as if the chat is now \"\n \"a scratch pad for you to write down your thoughts, ideas, and work on your \"\n \"mission by yourself. \"\n \"In the messages do not refer to another entity, but rather to yourself \"\n \"(I instead of You); the messages should read and sound like \"\n \"your internal thoughts and should be succinct, unless they are concrete work \"\n \"(for example, implementing something, calculating things, etc.). \"\n \"You have all the time in the world to build your thoughts, ideas, and do the \"\n \"work needed. The chat is now your place to think and iterate on your mission and \"\n \" achieve it.\",\n ],\n ),\n Section(\n name=\"Rules\",\n list=[\n \"You do not have to respond directly to the one who sent you a message. You can respond \"\n \"to anyone in the group chat.\",\n \"You cannot have private conversations with other participants. Everyone can see all \"\n \"messages sent by all other participants.\",\n ],\n ),\n Section(\n name=\"Previous Chat Messages\",\n list=[\n \"Messages are prefixed by a timestamp and the sender's name (could also be everyone). \",\n \"The prefix is for context only; it's not actually part of the message they sent. \",\n (\n 'Example: \"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else 'Example: \"John: Hello, how are you?\"'\n ),\n \"Some messages could have been sent by participants who are no longer a part of this \"\n \"conversation. Use their contents for context only; do not talk to them.\",\n \"In your response only include the message without the prefix.\",\n \"If you are the only participant in the chat, the previous chat messages are your \"\n \" memories or internal thoughts instead.\",\n ],\n ),\n ],\n ),\n *self.other_prompt_sections,\n ]\n )\n\n return str(system_message)\n\n def chat_messages_to_chat_model_messages(\n self, chat_messages: Sequence[ChatMessage], active_participants: Sequence[ActiveChatParticipant]\n ) -> List[BaseMessage]:\n messages: List[BaseMessage] = []\n for i, message in enumerate(chat_messages):\n if self.include_timestamp_in_messages:\n pretty_datetime = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n content = f\"[{pretty_datetime}] \"\n else:\n content = \"\"\n\n if self.ignore_group_chat_environment:\n content += f\"{message.sender_name}: {message.content}\"\n else:\n content += message.content\n\n if message.sender_name == self.name:\n if len(active_participants) > 1 or i == len(active_participants) - 1:\n messages.append(AIMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n\n if len(messages) == 0:\n messages.append(HumanMessage(content=f\"SYSTEM: The chat has started.\"))\n\n return messages\n\n def respond_to_chat(self, chat: Chat) -> str:\n if self.spinner is not None:\n self.spinner.start(text=f\"{str(self)} is thinking...\")\n\n chat_messages = chat.get_messages()\n\n if self.retriever is not None and len(chat_messages) > 0:\n relevant_docs = self.get_relevant_docs(messages=chat_messages)\n else:\n relevant_docs = []\n\n system_message = self.create_system_message(chat=chat, relevant_docs=relevant_docs)\n\n active_participants = chat.get_active_participants()\n all_messages = self.chat_messages_to_chat_model_messages(chat_messages, active_participants)\n all_messages = [SystemMessage(content=system_message), *all_messages]\n\n message_content = self.execute_messages(messages=all_messages)\n\n if self.spinner is not None:\n self.spinner.stop()\n\n potential_prefix = f\"{self.name}:\"\n if message_content.startswith(potential_prefix):\n message_content = message_content[len(potential_prefix) :].strip()\n\n return message_content\n\n def get_relevant_docs(self, messages: Sequence[ChatMessage]) -> List[Document]:\n if self.retriever is None:\n return []\n\n return self.retriever.get_relevant_documents(query=messages[-1].content)\n\n def execute_messages(self, messages: Sequence[BaseMessage]) -> str:\n return execute_chat_model_messages(\n messages=messages,\n chat_model=self.chat_model,\n tools=self.tools,\n spinner=self.spinner,\n chat_model_args=self.chat_model_args,\n )\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n\n tool_names = \", \".join([tool.name for tool in self.tools or []])\n if tool_names == \"\":\n tool_names = \"None\"\n\n return (\n f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\\n\"\n f'{prefix} Personal Mission: \"{self.personal_mission}\"\\n{prefix} Tools: {tool_names}'\n )" }, { "identifier": "UserChatParticipant", "path": "chatflock/participants/user.py", "snippet": "class UserChatParticipant(ActiveChatParticipant):\n def __init__(self, name: str = \"User\", role: str = \"User\", symbol: str = \"👤\", **kwargs: Any):\n super().__init__(name, messages_hidden=True, **kwargs)\n\n self.role = role\n self.symbol = symbol\n\n def respond_to_chat(self, chat: Chat) -> str:\n return input(f\"{self.symbol} ({self.name}): \")\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n return f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\"" }, { "identifier": "TerminalChatRenderer", "path": "chatflock/renderers/terminal.py", "snippet": "class TerminalChatRenderer(ChatRenderer):\n def __init__(self, print_timestamps: bool = False):\n self.print_timestamps = print_timestamps\n\n def render_new_chat_message(self, chat: Chat, message: ChatMessage) -> None:\n if chat.hide_messages:\n return\n\n pretty_timestamp_with_date = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n sender = chat.get_active_participant_by_name(message.sender_name)\n if sender is None:\n symbol = \"❓\"\n\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {symbol} {message.sender_name}: {message.content}\")\n else:\n print(f\"{symbol} {message.sender_name}: {message.content}\")\n else:\n if sender.messages_hidden:\n return\n\n if chat.name is None:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {str(sender)}: {message.content}\")\n else:\n print(f\"{str(sender)}: {message.content}\")\n else:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {chat.name} > {str(sender)}: {message.content}\")\n else:\n print(f\"{chat.name} > {str(sender)}: {message.content}\")" }, { "identifier": "SequentialProcess", "path": "chatflock/sequencial_process/sequential_process.py", "snippet": "class SequentialProcess(Generic[T]):\n def __init__(self, steps: Sequence[Step[T]], initial_state: T, save_state: Callable[[T], None]):\n self.steps = steps\n self.state = initial_state\n self.save_state_func = save_state\n\n def save_state(self, state: T) -> None:\n self.save_state_func(state)\n\n def run(self) -> T:\n for step in self.steps:\n if step.on_step_start:\n step.on_step_start(self.state)\n\n try:\n for new_state in step.run(self.state) or []:\n self.state = new_state\n self.save_state(state=self.state)\n\n self.save_state(state=self.state)\n\n if step.on_step_completed:\n step.on_step_completed(self.state)\n except Exception as e:\n if step.on_step_failed:\n step.on_step_failed(self.state)\n\n raise e\n\n return self.state" }, { "identifier": "Step", "path": "chatflock/sequencial_process/sequential_process.py", "snippet": "class Step(Generic[T]):\n def __init__(\n self,\n name: str,\n func: Callable[[T, Any], Optional[Generator[T, None, None]]],\n on_step_start: Optional[Callable[[T], None]] = None,\n on_step_completed: Optional[Callable[[T], None]] = None,\n on_step_failed: Optional[Callable[[T], None]] = None,\n ):\n self.name = name\n self.func = func\n self.on_step_start = on_step_start\n self.on_step_completed = on_step_completed\n self.on_step_failed = on_step_failed\n\n def run(self, state: T, **kwargs: Any) -> Generator[T, None, None]:\n res = self.func(state, **kwargs) # type: ignore\n if res is None:\n return\n\n for new_state in res:\n yield new_state" }, { "identifier": "Section", "path": "chatflock/structured_string.py", "snippet": "class Section:\n name: str\n text: Optional[str] = None\n list: Optional[List[str]] = None\n sub_sections: Optional[List[\"Section\"]] = None\n list_item_prefix: Optional[str] = \"-\"\n uppercase_name: bool = True\n\n def to_text(self, level: int = 0) -> str:\n result = f'{\"#\" * (level + 1)} {self.name.upper() if self.uppercase_name else self.name}'\n\n if self.text is not None:\n result += \"\\n\" + self.text\n\n if self.list is not None:\n result += \"\\n\" + \"\\n\".join(\n [\n f'{self.list_item_prefix if self.list_item_prefix else str(i + 1) + \".\"} {item}'\n for i, item in enumerate(self.list)\n ]\n )\n\n if self.sub_sections is not None:\n for sub_section in self.sub_sections:\n result += \"\\n\\n\" + sub_section.to_text(level + 1)\n\n return result" }, { "identifier": "StructuredString", "path": "chatflock/structured_string.py", "snippet": "class StructuredString:\n sections: List[Section]\n\n def __getitem__(self, item: str) -> Section:\n if not isinstance(item, str):\n raise TypeError(f\"Item must be of type str, not {type(item)}.\")\n\n relevant_sections = [section for section in self.sections if section.name == item]\n if len(relevant_sections) == 0:\n raise KeyError(f\"No section with name {item} exists.\")\n\n return relevant_sections[0]\n\n def __setitem__(self, key: str, value: Section) -> None:\n if not isinstance(key, str):\n raise TypeError(f\"Key must be of type str, not {type(key)}.\")\n\n if not isinstance(value, Section):\n raise TypeError(f\"Value must be of type Section, not {type(value)}.\")\n\n try:\n section = self[key]\n\n # Remove old section and replace with new one, in the same place\n self.sections.insert(self.sections.index(section), value)\n self.sections.remove(section)\n except KeyError:\n self.sections.append(value)\n\n def __str__(self) -> str:\n result = \"\"\n for section in self.sections:\n result += section.to_text() + \"\\n\\n\"\n\n return result\n\n def __repr__(self) -> str:\n return self.__str__()" }, { "identifier": "get_response", "path": "chatflock/use_cases/request_response.py", "snippet": "def get_response(\n query: str,\n answerer: ActiveChatParticipant,\n backing_store: Optional[ChatDataBackingStore] = None,\n renderer: Optional[ChatRenderer] = None,\n) -> Tuple[str, Chat]:\n user = UserChatParticipant(name=\"User\")\n participants = [user, answerer]\n\n chat = Chat(\n backing_store=backing_store or InMemoryChatDataBackingStore(),\n renderer=renderer or NoChatRenderer(),\n initial_participants=participants,\n max_total_messages=2,\n )\n\n chat_conductor = RoundRobinChatConductor()\n answer = chat_conductor.initiate_dialog(chat=chat, initial_message=query, from_participant=user)\n\n return answer, chat" }, { "identifier": "WebSearch", "path": "chatflock/web_research/web_research.py", "snippet": "class WebSearch:\n def __init__(\n self,\n chat_model: BaseChatModel,\n search_results_provider: SearchResultsProvider,\n page_query_analyzer: PageQueryAnalyzer,\n skip_results_if_answer_snippet_found: bool = True,\n ):\n self.chat_model = chat_model\n self.search_results_provider = search_results_provider\n self.page_query_analyzer = page_query_analyzer\n self.skip_results_if_answer_snippet_found = skip_results_if_answer_snippet_found\n\n def get_answer(\n self, query: str, n_results: int = 3, urls: Optional[List[str]] = None, spinner: Optional[Halo] = None\n ) -> Tuple[bool, str]:\n original_spinner_text = None if spinner is None else spinner.text\n qna = []\n\n if urls is None:\n if spinner is not None:\n spinner.start(f'Getting search results for \"{query}\"...')\n\n try:\n search_results = self.search_results_provider.search(query=query, n_results=n_results)\n except (TransientHTTPError, NonTransientHTTPError) as e:\n return False, f'Failed to get search results for \"{query}\" because of an error: {e}'\n\n if spinner is not None:\n spinner.succeed(f'Got search results for \"{query}\".')\n\n if len(search_results.organic_results) == 0 and search_results.answer_snippet is None:\n return False, \"Nothing was found on the web for this query.\"\n\n if search_results.knowledge_graph_description is not None:\n qna.append({\"answer\": search_results.knowledge_graph_description, \"source\": \"Knowledge Graph\"})\n\n if search_results.answer_snippet is not None:\n qna.append({\"answer\": search_results.answer_snippet, \"source\": \"Answer Snippet\"})\n\n if not self.skip_results_if_answer_snippet_found or search_results.answer_snippet is None:\n for result in search_results.organic_results:\n if url_unsupported(result.link):\n continue\n\n if spinner is not None:\n spinner.start(f'Reading & analyzing #{result.position} result \"{result.title}\"')\n\n try:\n page_result = self.page_query_analyzer.analyze(\n url=result.link, title=result.title, query=query, spinner=spinner\n )\n answer = page_result.answer\n\n if spinner is not None:\n spinner.succeed(f'Read & analyzed #{result.position} result \"{result.title}\".')\n except Exception as e:\n if type(e) in (RetryError, TransientHTTPError, NonTransientHTTPError):\n if spinner is not None:\n spinner.warn(\n f'Failed to read & analyze #{result.position} result \"{result.title}\", moving on.'\n )\n\n answer = \"Unable to answer query because the page could not be read.\"\n else:\n raise\n\n qna.append({\"answer\": answer, \"source\": result.link})\n else:\n # Urls were provided, search in those urls instead of searching using a search engine\n for url in urls:\n if url_unsupported(url):\n continue\n\n if spinner is not None:\n spinner.start(f'Reading & analyzing URL \"{url}\"')\n\n try:\n page_result = self.page_query_analyzer.analyze(\n url=url, title=\"Unknown\", query=query, spinner=spinner\n )\n answer = page_result.answer\n\n if spinner is not None:\n spinner.succeed(f'Read & analyzed URL \"{url}\".')\n except Exception as e:\n if type(e) in (RetryError, TransientHTTPError, NonTransientHTTPError):\n if spinner is not None:\n spinner.warn(f'Failed to read & analyze URL \"{url}\", moving on.')\n\n answer = \"Unable to answer query because the page could not be read.\"\n else:\n raise\n\n qna.append({\"answer\": answer, \"source\": url})\n\n if spinner is not None:\n spinner.start(f\"Processing results...\")\n\n formatted_answers = \"\\n\".join([f'{i + 1}. {q[\"answer\"]}; Source: {q[\"source\"]}' for i, q in enumerate(qna)])\n\n chat = Chat(\n backing_store=InMemoryChatDataBackingStore(),\n renderer=NoChatRenderer(),\n initial_participants=[\n UserChatParticipant(),\n LangChainBasedAIChatParticipant(\n name=\"Query Answer Aggregator\",\n role=\"Query Answer Aggregator\",\n personal_mission=\"Analyze query answers, discard unlikely ones, and provide an aggregated final response.\",\n chat_model=self.chat_model,\n other_prompt_sections=[\n Section(\n name=\"Aggregating Query Answers\",\n sub_sections=[\n Section(\n name=\"Process\",\n list=[\n \"Receive query and answers with sources.\",\n \"Analyze answers, discard unlikely or minority ones.\",\n \"Formulate final answer based on most likely answers.\",\n 'If no data found, respond \"The answer could not be found.\"',\n ],\n list_item_prefix=None,\n ),\n Section(\n name=\"Aggregation\",\n list=[\n \"Base final answer on sources.\",\n \"Incorporate sources as inline citations in Markdown format.\",\n 'Example: \"Person 1 was [elected president in 2012](https://...).\"',\n \"Only include sources from provided answers.\",\n \"If part of an answer is used, use the same links inline.\",\n ],\n ),\n Section(\n name=\"Final Answer Notes\",\n list=[\n \"Do not fabricate information. Stick to provided data.\",\n \"You will be given the top search results from a search engine, there is a reason they are the top results. You should pay attention to all of them and think about the query intent.\"\n \"If the answer is not found in the page data, state it clearly.\",\n \"Should be formatted in Markdown with inline citations.\",\n ],\n ),\n ],\n )\n ],\n ),\n ],\n max_total_messages=2,\n )\n chat_conductor = RoundRobinChatConductor()\n final_answer = chat_conductor.initiate_dialog(\n chat=chat,\n initial_message=str(\n StructuredString(\n sections=[Section(name=\"Query\", text=query), Section(name=\"Answers\", text=formatted_answers)]\n )\n ),\n )\n\n if spinner is not None:\n spinner.succeed(f\"Done searching the web.\")\n\n if original_spinner_text is not None:\n spinner.start(original_spinner_text)\n\n return True, final_answer" }, { "identifier": "WebResearchTool", "path": "chatflock/web_research/web_research.py", "snippet": "class WebResearchTool(BaseTool):\n web_search: WebSearch\n n_results: int = 3\n spinner: Optional[Halo] = None\n name: str = \"web_search\"\n description: str = \"Research the web. Use that to get an answer for a query you don't know or unsure of the answer to, for recent events, or if the user asks you to. This will evaluate answer snippets, knowledge graphs, and the top N results from google and aggregate a result.\"\n args_schema: Type[BaseModel] = WebSearchToolArgs\n progress_text: str = \"Searching the web...\"\n\n def _run(\n self,\n query: str,\n urls: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n return self.web_search.get_answer(query=query, n_results=self.n_results, urls=urls, spinner=self.spinner)[1]" } ]
from typing import Any, Dict, Generator, Generic, List, Optional, Type, TypeVar from functools import partial from halo import Halo from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.chat_models.base import BaseChatModel from langchain.llms.openai import OpenAI from langchain.memory import ConversationSummaryBufferMemory from langchain.tools import BaseTool from pydantic import BaseModel, Field from chatflock.backing_stores import InMemoryChatDataBackingStore from chatflock.backing_stores.langchain import LangChainMemoryBasedChatDataBackingStore from chatflock.base import Chat, ChatDataBackingStore from chatflock.conductors import RoundRobinChatConductor from chatflock.parsing_utils import chat_messages_to_pydantic from chatflock.participants.langchain import LangChainBasedAIChatParticipant from chatflock.participants.user import UserChatParticipant from chatflock.renderers import TerminalChatRenderer from chatflock.sequencial_process import SequentialProcess, Step from chatflock.structured_string import Section, StructuredString from chatflock.use_cases.request_response import get_response from chatflock.web_research import WebSearch from chatflock.web_research.web_research import WebResearchTool import datetime import json import questionary
10,936
class BHSRState(BaseModel): information_need: Optional[str] = None queries_to_run: Optional[List[str]] = None answers_to_queries: Optional[Dict[str, str]] = None current_hypothesis: Optional[str] = None proposed_hypothesis: Optional[str] = None feedback: Optional[str] = None is_satisficed: Optional[bool] = None def save_state(state: BHSRState, state_file: Optional[str]) -> None: if state_file is None: return data = state.model_dump() with open(state_file, "w") as f: json.dump(data, f, indent=2) def load_state(state_file: Optional[str]) -> Optional[BHSRState]: if state_file is None: return None try: with open(state_file) as f: data = json.load(f) return BHSRState.model_validate(data) except FileNotFoundError: return None class QueryGenerationResult(BaseModel): information_need: str = Field(description="Information need as requested by the user.") queries: List[str] = Field(description="Set of queries to run.") class HypothesisGenerationResult(BaseModel): hypothesis: str = Field( description="A new or updated hypothesis based on the materials provided. Rich formatting using Markdown. Should include all relevant citations inline." ) class SatisficationCheckResult(BaseModel): feedback: str = Field( description="If not satisficed yet, feedback on why not satisfied and what to think about next. If satisficed, feedback can be empty." ) is_satisficed: bool = Field(description="Whether or not the information need has been satisficed.") def generate_queries( state: BHSRState, chat_model: BaseChatModel, interactive_user: bool = True, max_queries: int = 5, shared_sections: Optional[List[Section]] = None, web_search_tool: Optional[BaseTool] = None, spinner: Optional[Halo] = None, ) -> None: if state.queries_to_run is not None and len(state.queries_to_run) > 0: # Means we are continuing a previous session return if shared_sections is None: shared_sections = [] query_generator = LangChainBasedAIChatParticipant( name="Search Query Generator", role="Search Query Generator", personal_mission="You will be given a specific query or problem by the user and you are to generate a list of " f"AT MOST {max_queries} search queries that will be used to search the internet. Make sure you " f"generate comprehensive, counterfactual, and maximally orthogonal search queries. " "Employ everything you know about " "information foraging and information literacy to generate the best possible questions. " "Use a step-by-step approach and think about the information need and the information " "domain before generating the queries. Order the queries by their importance and relevance " "to the main information need of the user.", other_prompt_sections=shared_sections + [ Section( name="Unclear Information Need", text=( "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or ask the user for clarification." if interactive_user else "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or make a best guess. The user will not be available to " "respond back." ), ), Section( name="Refine Queries", text='You might be given a first-pass information need with "None" previous queries and answers, ' "in which case you will do the best you" 'can to generate "naive queries" (uninformed search queries). However the USER might also ' "give you previous search queries or other background information such as accumulated notes. " 'If these materials are present, you are to generate "informed queries" - more specific ' "search queries that aim to zero in on the correct information domain. Do not duplicate " "previously asked questions. Use the notes and other information presented to create " "targeted queries and/or to cast a wider net.", ), Section( name="Termination", text="Once you generate a new set of queries to run, you should terminate the chat immediately by " "ending your message with TERMINATE", ), ], tools=[web_search_tool] if web_search_tool is not None else None, ignore_group_chat_environment=True, chat_model=chat_model, spinner=spinner, ) user = UserChatParticipant() participants = [user, query_generator] try: memory = ConversationSummaryBufferMemory( llm=chat_model, max_token_limit=OpenAI.modelname_to_contextsize(chat_model.model_name) # type: ignore )
# Based directly on David Shaprio's BSHR Loop: https://github.com/daveshap/BSHR_Loop class BHSRState(BaseModel): information_need: Optional[str] = None queries_to_run: Optional[List[str]] = None answers_to_queries: Optional[Dict[str, str]] = None current_hypothesis: Optional[str] = None proposed_hypothesis: Optional[str] = None feedback: Optional[str] = None is_satisficed: Optional[bool] = None def save_state(state: BHSRState, state_file: Optional[str]) -> None: if state_file is None: return data = state.model_dump() with open(state_file, "w") as f: json.dump(data, f, indent=2) def load_state(state_file: Optional[str]) -> Optional[BHSRState]: if state_file is None: return None try: with open(state_file) as f: data = json.load(f) return BHSRState.model_validate(data) except FileNotFoundError: return None class QueryGenerationResult(BaseModel): information_need: str = Field(description="Information need as requested by the user.") queries: List[str] = Field(description="Set of queries to run.") class HypothesisGenerationResult(BaseModel): hypothesis: str = Field( description="A new or updated hypothesis based on the materials provided. Rich formatting using Markdown. Should include all relevant citations inline." ) class SatisficationCheckResult(BaseModel): feedback: str = Field( description="If not satisficed yet, feedback on why not satisfied and what to think about next. If satisficed, feedback can be empty." ) is_satisficed: bool = Field(description="Whether or not the information need has been satisficed.") def generate_queries( state: BHSRState, chat_model: BaseChatModel, interactive_user: bool = True, max_queries: int = 5, shared_sections: Optional[List[Section]] = None, web_search_tool: Optional[BaseTool] = None, spinner: Optional[Halo] = None, ) -> None: if state.queries_to_run is not None and len(state.queries_to_run) > 0: # Means we are continuing a previous session return if shared_sections is None: shared_sections = [] query_generator = LangChainBasedAIChatParticipant( name="Search Query Generator", role="Search Query Generator", personal_mission="You will be given a specific query or problem by the user and you are to generate a list of " f"AT MOST {max_queries} search queries that will be used to search the internet. Make sure you " f"generate comprehensive, counterfactual, and maximally orthogonal search queries. " "Employ everything you know about " "information foraging and information literacy to generate the best possible questions. " "Use a step-by-step approach and think about the information need and the information " "domain before generating the queries. Order the queries by their importance and relevance " "to the main information need of the user.", other_prompt_sections=shared_sections + [ Section( name="Unclear Information Need", text=( "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or ask the user for clarification." if interactive_user else "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or make a best guess. The user will not be available to " "respond back." ), ), Section( name="Refine Queries", text='You might be given a first-pass information need with "None" previous queries and answers, ' "in which case you will do the best you" 'can to generate "naive queries" (uninformed search queries). However the USER might also ' "give you previous search queries or other background information such as accumulated notes. " 'If these materials are present, you are to generate "informed queries" - more specific ' "search queries that aim to zero in on the correct information domain. Do not duplicate " "previously asked questions. Use the notes and other information presented to create " "targeted queries and/or to cast a wider net.", ), Section( name="Termination", text="Once you generate a new set of queries to run, you should terminate the chat immediately by " "ending your message with TERMINATE", ), ], tools=[web_search_tool] if web_search_tool is not None else None, ignore_group_chat_environment=True, chat_model=chat_model, spinner=spinner, ) user = UserChatParticipant() participants = [user, query_generator] try: memory = ConversationSummaryBufferMemory( llm=chat_model, max_token_limit=OpenAI.modelname_to_contextsize(chat_model.model_name) # type: ignore )
backing_store: ChatDataBackingStore = LangChainMemoryBasedChatDataBackingStore(memory=memory)
3
2023-11-12 11:10:58+00:00
16k
atlantic-quantum/Shipyard
shipyard/passes/semantic_analysis/semantic_analyzer.py
[ { "identifier": "ErrorCode", "path": "shipyard/compiler_error.py", "snippet": "class ErrorCode(Enum):\n \"\"\"Class to enumerate error codes of the shipyard\"\"\"\n\n ID_NOT_FOUND = \"Identifier not found\"\n DUPLICATE_ID = \"Duplicate id found\"\n NOT_IN_GLOBAL_SCOPE = \"Not in global scope\"\n INVALID_DEFCAL_ARGUMENT = \"Invalid defcal argument\"\n EXPRESSION_IN_DEFCAL = \"Expression in defcal signature, unhandled\"\n INVALID_GATECALL_ARGUMENT = \"Invalid gatecall argument\"\n UNHANDLED = \"Unhandled case\"\n UNDETERMINED_CALL = \"Unable to determine a unique function for function call\"\n NO_SEQC_STATEMENT = \"No equivalent SEQC statement\"\n COMPILE_OUT = \"Statement should be compiled out before printing SEQC code\"\n PORT_NOT_FOUND = \"Port was not found within setup\"\n INSTRUMENT_NOT_FOUND = \"Instrument was not found within setup\"\n INPUT_NOT_FOUND = \"Input value was not found\"\n OUTPUT_NOT_SUPPORTED = \"Output type not supported\"\n INPUT_TYPE_NOT_SUPPORTED = \"Input type not supported\"\n INVALID_ARGUMENT = \"Invalid argument\"\n INVALID_WAVEFORM = \"Waveform does not meet timing constraints\"\n INCLUDE_ERROR = \"Error in include statement\"" }, { "identifier": "SemanticError", "path": "shipyard/compiler_error.py", "snippet": "class SemanticError(Error):\n \"\"\"Error class for semantic errors, raised by SemanticAnalyser\"\"\"" }, { "identifier": "LOGGER", "path": "shipyard/logger.py", "snippet": "LOGGER = logging.getLogger(\"Compiler\")" }, { "identifier": "Mangler", "path": "shipyard/mangle.py", "snippet": "class Mangler(LiteralVisitor, TypeVisitor, GenericVisitor):\n \"\"\"\n QASMVisitor that visits CalibrationDefinition or QuantumGate nodes to gather\n the iformation required to mangle function definition signatures and function calls\n \"\"\"\n\n def __init__(\n self, node: ast.CalibrationDefinition | ast.QuantumGate = None\n ) -> None:\n super().__init__()\n self.name = None\n self.qubits = None\n self.arguments = None\n self.return_type = None\n if node:\n self.visit(node)\n\n def signature(self) -> FunctionSignature:\n \"\"\"Converts instances of Mangler class to FunctionSignature objects\n\n Returns:\n FunctionSignature:\n with name, params qubits and return_type from the Mangler class instance\n \"\"\"\n return FunctionSignature(\n name=self.name,\n params=self.arguments,\n qubits=self.qubits,\n return_type=self.return_type,\n )\n\n # pylint: disable=C0103\n # disable snake_case naming style\n # these functions are of the form \"visit_{QASMNode class name}\"\n def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition):\n \"\"\"\n CalibrationDefinition node visitor\n Extracts name, arguments, qubits and return_type from the node\n and makes them usable for mangling\n\n Args:\n node (ast.CalibrationDefinition):\n openQASM defcal statement to visit\n \"\"\"\n self.name = self.visit(node.name)\n self.arguments = [self.visit(arg) for arg in node.arguments]\n self.qubits = [self.visit(qubit) for qubit in node.qubits]\n self.return_type = self.visit(node.return_type) if node.return_type else \"\"\n\n def visit_QuantumGate(self, node: ast.QuantumGate):\n \"\"\"\n QuantumGate node visitor\n Extracts name, arguments, qubits and return_type from the node\n and makes them usable for mangling\n\n Args:\n node (ast.QuantumGate):\n openQASM quantum gate call node to visit\n \"\"\"\n self.name = self.visit(node.name)\n self.arguments = [self.visit(arg) for arg in node.arguments]\n self.qubits = [self.visit(qubit) for qubit in node.qubits]\n self.return_type = \"\"\n\n def visit_QuantumReset(self, node: ast.QuantumReset):\n \"\"\"\n QuantumReset node visitor\n Extracts qubits from the node.\n To be usable for mangling the following operations are performed\n name is set to \"reset\"\n arguments are set to empty ([])\n return_type is set to empty string (\"\")\n\n Args:\n node (ast.QuantumReset):\n openQASM quantum reset node to visit\n \"\"\"\n match node:\n case ast.QuantumReset(ast.Identifier(q)):\n self.name = \"reset\"\n self.arguments = []\n self.qubits = [q]\n self.return_type = \"\"\n case ast.QuantumReset(ast.IndexedIdentifier()):\n raise NotImplementedError\n case _:\n raise NotImplementedError # this should not happen on correct trees\n\n def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement):\n \"\"\"\n QuantumMeasurement node visitor\n Extracts qubits from the node.\n To be usable for mangling the following operations are performed\n name is set to \"measure\"\n arguments are set to empty ([])\n return_type is set \"BIT\"\n\n Args:\n node (ast.QuantumMeasurement):\n openQASM quantum measurement node to visit\n \"\"\"\n match node:\n case ast.QuantumMeasurement(ast.Identifier(q)):\n self.name = \"measure\"\n self.arguments = []\n self.qubits = [q]\n self.return_type = \"BIT\"\n case ast.QuantumMeasurement(ast.IndexedIdentifier()):\n raise NotImplementedError\n case _:\n raise NotImplementedError # this should not happen on correct trees\n\n def visit_Identifier(self, node: ast.Identifier) -> str:\n \"\"\"\n Identifier node visitor\n\n Args:\n node (ast.Identifier):\n openQASM identifier node to visit\n\n Returns:\n str: the name of the identifier\n \"\"\"\n return node.name\n\n def visit_ClassicalArgument(self, node: ast.ClassicalArgument) -> str:\n \"\"\"\n ClassicalArgument node visitor\n\n Args:\n node (ast.ClassicalArgument):\n openQASM classical argument node to visit\n\n Returns:\n str: the type of the classical argument\n \"\"\"\n return self.visit(node.type)\n\n # pylint: enable=C0103" }, { "identifier": "ScopeContext", "path": "shipyard/utilities.py", "snippet": "class ScopeContext(Enum):\n \"\"\"\n Class for keeping track of the current scope of a openQASM program\n\n detailed discussion can be found at:\n https://openqasm.com/language/scope.html\n\n With additional discussion regarding the scope of calibration definitions at:\n https://openqasm.com/language/pulses.html#inline-calibration-blocks\n \"\"\"\n\n GLOBAL = \"GLOBAL\"\n LOCAL = \"LOCAL\"\n SUBROUTINE = \"SUBROUTINE\"\n DEFCAL = \"DEFCAL\"" }, { "identifier": "GenericVisitor", "path": "shipyard/visitors/generic_visitor.py", "snippet": "class GenericVisitor(QASMVisitor):\n def _visit_list(\n self, nodes: list[ast.QASMNode], visit_function: callable, context=None\n ):\n [visit_function(node) for node in nodes]\n\n def visit_Program(self, node: ast.Program, context=None):\n \"\"\"\n An entire OpenQASM 3 program represented by a list of top level statements\n \"\"\"\n self._visit_list(node.statements, self.visit)\n\n def visit_Annotation(self, node: ast.Annotation, context=None):\n \"\"\"An annotation applied to a statment.\"\"\"\n\n def visit_Statement(self, node: ast.Statement, context=None):\n \"\"\"A statement: anything that can appear on its own line\"\"\"\n self._visit_list(node.annotations, self.visit)\n\n def visit_Include(\n self, node: ast.Include, context=None\n ) -> ast.Include | list[ast.Statement]:\n \"\"\"\n An include statement\n \"\"\"\n self.visit_Statement(node)\n\n def visit_ExpressionStatement(self, node: ast.ExpressionStatement, context=None):\n \"\"\"A statement that contains a single expression\"\"\"\n self.visit_Statement(node)\n self.visit(node.expression)\n\n # Note that QubitDeclaration is not a valid QuantumStatement, because qubits\n # can only be declared in global scopes, not in gates.\n def visit_QubitDeclaration(self, node: ast.QubitDeclaration, context=None):\n \"\"\"\n Global qubit declaration\n\n Example::\n\n qubit q;\n qubit[4] q;\n\n q // <- qubit\n 4 // <- size\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.qubit)\n if node.size:\n self.visit(node.size)\n\n def visit_QuantumGateDefinition(\n self, node: ast.QuantumGateDefinition, context=None\n ):\n \"\"\"\n Define a new quantum gate\n\n Example::\n\n gate cx c, t {\n ctrl @ unitary(pi, 0, pi) c, t;\n }\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit_Identifier)\n self._visit_list(node.qubits, self.visit_Identifier)\n self._visit_list(node.body, self.visit)\n\n def visit_QuantumStatement(self, node: ast.QuantumStatement, context=None):\n \"\"\"Statements that may appear inside a gate declaration\"\"\"\n self.visit_Statement(node)\n\n def visit_ExternDeclaration(self, node: ast.ExternDeclaration, context=None):\n \"\"\"\n A extern declaration\n\n Example::\n\n extern get_pauli(int[prec], context=None) -> bit[2 * n];\n\n get_pauli // <- name\n int[prec] // <- classical type\n bit[2 * n] // <- return type\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n if node.return_type:\n self.visit(node.return_type)\n\n def visit_Expression(self, node: ast.Expression, context=None):\n \"\"\"An expression: anything that returns a value\"\"\"\n\n def visit_Identifier(self, node: ast.Identifier, context=None):\n \"\"\"\n An identifier\n\n Example::\n\n q1\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_UnaryExpression(self, node: ast.UnaryExpression, context=None):\n \"\"\"\n A unary expression\n\n Example::\n\n ~b\n !bool\n -i\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_BinaryExpression(self, node: ast.BinaryExpression, context=None):\n \"\"\"\n A binary expression\n\n Example::\n\n q1 || q2\n\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_IntegerLiteral(self, node: ast.IntegerLiteral, context=None):\n \"\"\"\n An integer literal\n\n Example::\n\n 1\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_FloatLiteral(self, node: ast.FloatLiteral, context=None):\n \"\"\"\n An real number literal\n\n Example::\n\n 1.1\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral, context=None):\n \"\"\"\n An real number literal\n\n Example::\n\n 1.1im\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_BooleanLiteral(self, node: ast.BooleanLiteral, context=None):\n \"\"\"\n A boolean expression\n\n Example::\n\n true\n false\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_BitstringLiteral(self, node: ast.BitstringLiteral, context=None):\n \"\"\"A literal bitstring value. The ``value`` is the numerical value of the\n bitstring, and the ``width`` is the number of digits given.\"\"\"\n self.visit_Expression(node)\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral, context=None):\n \"\"\"\n A duration literal\n\n Example::\n\n 1.0ns\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_ArrayLiteral(self, node: ast.ArrayLiteral, context=None):\n \"\"\"Array literal, used to initialise declared arrays.\n\n For example::\n\n array[uint[8], 2] row{1, 2};\n array[uint[8], 2, 2] my_array{{1, 2}, {3, 4}};\n array[uint[8], 2, 2] my_array{row, row};\n \"\"\"\n self.visit_Expression(node)\n self._visit_list(node.values, self.visit)\n\n def visit_FunctionCall(self, node: ast.FunctionCall, context=None):\n \"\"\"\n A function call expression\n\n Example::\n\n foo(1)\n\n foo // <- name\n\n \"\"\"\n self.visit_Expression(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n\n def visit_Cast(self, node: ast.Cast, context=None):\n \"\"\"\n A cast call expression\n\n Example::\n\n counts += int[1](b);\n\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.type)\n self.visit(node.argument)\n\n def visit_DiscreteSet(self, node: ast.DiscreteSet, context=None):\n \"\"\"\n A set of discrete values. This can be used for the values in a ``for``\n loop, or to index certain values out of a register::\n\n for i in {1, 2, 3} {}\n let aliasqubits[{2, 3, 4}];\n \"\"\"\n self._visit_list(node.values, self.visit)\n\n def visit_RangeDefinition(self, node: ast.RangeDefinition, context=None):\n \"\"\"\n Range definition.\n\n Example::\n\n 1:2\n 1:1:10\n :\n \"\"\"\n if node.start:\n self.visit(node.start)\n if node.end:\n self.visit(node.end)\n if node.step:\n self.visit(node.step)\n\n IndexElement = ast.DiscreteSet | list[ast.Expression | ast.RangeDefinition]\n\n def _visit_IndexElement(self, node: IndexElement, context=None):\n if isinstance(node, list):\n return self._visit_list(node, self.visit)\n return self.visit(node)\n\n def visit_IndexExpression(self, node: ast.IndexExpression, context=None):\n \"\"\"\n An index expression.\n\n Example::\n\n q[1]\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.collection)\n self._visit_IndexElement(node.index)\n\n def visit_IndexedIdentifier(self, node: ast.IndexedIdentifier, context=None):\n \"\"\"An indentifier with index operators, such that it can be used as an\n lvalue. The list of indices is subsequent index brackets, so in::\n\n a[{1, 2, 3}][0:1, 0:1]\n\n the list of indices will have two elements. The first will be a\n :class:`.DiscreteSet`, and the second will be a list of two\n :class:`.RangeDefinition`\\\\ s.\n \"\"\"\n self.visit_Identifier(node.name)\n self._visit_list(node.indices, self._visit_IndexElement)\n\n def visit_Concatenation(self, node: ast.Concatenation, context=None):\n \"\"\"\n Concatenation of two registers, for example::\n\n a ++ b\n a[2:3] ++ a[0:1]\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_QuantumGate(self, node: ast.QuantumGate, context=None):\n \"\"\"\n Invoking a quantum gate\n\n Example::\n cx[dur] 0, 1;\n\n or\n\n ctrl @ p(λ) a, b;\n\n ctrl @ // <- quantumGateModifier\n p // <- quantumGateName\n λ // <- argument\n a, b // <- qubit\n \"\"\"\n self.visit_QuantumStatement(node)\n self._visit_list(node.modifiers, self.visit_QuantumGateModifier)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n self._visit_list(node.qubits, self.visit)\n if node.duration:\n self.visit(node.duration)\n\n def visit_QuantumGateModifier(self, node: ast.QuantumGateModifier, context=None):\n \"\"\"\n A quantum gate modifier\n\n Attributes:\n modifier: 'inv', 'pow', or 'ctrl'\n expression: only pow modifier has expression.\n\n Example::\n\n inv @\n pow(1/2)\n ctrl\n \"\"\"\n if node.argument:\n self.visit(node.argument)\n\n def visit_QuantumPhase(self, node: ast.QuantumPhase, context=None):\n \"\"\"\n A quantum phase instruction\n\n Example::\n\n ctrl @ gphase(λ) a;\n\n ctrl @ // <- quantumGateModifier\n λ // <- argument\n a // <- qubit\n\n \"\"\"\n self.visit_QuantumStatement(node)\n self._visit_list(node.modifiers, self.visit_QuantumGateModifier)\n self.visit(node.argument)\n self._visit_list(node.qubits, self.visit)\n\n # Not a full expression because it can only be used in limited contexts.\n def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement, context=None):\n \"\"\"\n A quantum measurement instruction\n\n Example::\n\n measure q;\n \"\"\"\n self.visit(node.qubit)\n\n # Note that this is not a QuantumStatement because it involves access to\n # classical bits.\n def visit_QuantumMeasurementStatement(\n self, node: ast.QuantumMeasurementStatement, context=None\n ):\n \"\"\"Stand-alone statement of a quantum measurement, potentially assigning the\n result to a classical variable. This is not the only statement that\n `measure` can appear in (it can also be in classical declaration statements\n and returns).\"\"\"\n self.visit_Statement(node)\n self.visit_QuantumMeasurement(node.measure)\n if node.target:\n self.visit(node.target)\n\n def visit_QuantumBarrier(self, node: ast.QuantumBarrier, context=None):\n \"\"\"\n A quantum barrier instruction\n\n Example::\n\n barrier q;\n \"\"\"\n self.visit_QuantumStatement(node)\n self._visit_list(node.qubits, self.visit)\n\n def visit_QuantumReset(self, node: ast.QuantumReset, context=None):\n \"\"\"\n A reset instruction.\n\n Example::\n\n reset q;\n \"\"\"\n\n self.visit_QuantumStatement(node)\n self.visit(node.qubits)\n\n def visit_ClassicalArgument(self, node: ast.ClassicalArgument, context=None):\n \"\"\"\n Classical argument for a gate or subroutine declaration\n \"\"\"\n self.visit(node.type)\n self.visit_Identifier(node.name)\n\n def visit_ExternArgument(self, node: ast.ExternArgument, context=None):\n \"\"\"Classical argument for an extern declaration.\"\"\"\n\n self.visit(node.type)\n\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration, context=None):\n \"\"\"\n Classical variable declaration\n\n Example::\n\n bit c;\n \"\"\"\n\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n if node.init_expression:\n self.visit(node.init_expression)\n\n def visit_IODeclaration(self, node: ast.IODeclaration, context=None):\n \"\"\"\n Input/output variable declaration\n\n Exampe::\n\n input angle[16] theta;\n output bit select;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n\n def visit_ConstantDeclaration(self, node: ast.ConstantDeclaration, context=None):\n \"\"\"\n Constant declaration\n\n Example::\n\n const int[16] n10;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n self.visit(node.init_expression)\n\n def visit_ClassicalType(self, node: ast.ClassicalType, context=None):\n \"\"\"\n Base class for classical type\n \"\"\"\n\n def visit_IntType(self, node: ast.IntType, context=None):\n \"\"\"\n Node representing a classical ``int`` (signed integer) type, with an\n optional precision.\n\n Example:\n\n int[8]\n int[16]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_UintType(self, node: ast.UintType, context=None):\n \"\"\"\n Node representing a classical ``uint`` (unsigned integer) type, with an\n optional precision.\n\n Example:\n\n uint[8]\n uint[16]\n \"\"\"\n\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_FloatType(self, node: ast.FloatType, context=None):\n \"\"\"\n Node representing the classical ``float`` type, with the particular IEEE-754\n floating-point size optionally specified.\n\n Example:\n\n float[16]\n float[64]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_ComplexType(self, node: ast.ComplexType, context=None):\n \"\"\"\n Complex ClassicalType. Its real and imaginary parts are based on other\n classical types.\n\n Example::\n\n complex[float]\n complex[float[32]]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.base_type:\n self.visit(node.base_type)\n\n def visit_AngleType(self, node: ast.AngleType, context=None):\n \"\"\"\n Node representing the classical ``angle`` type, with an optional precision.\n\n Example::\n\n angle[8]\n angle[16]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_BitType(self, node: ast.BitType, context=None):\n \"\"\"\n Node representing the classical ``bit`` type, with an optional size.\n\n Example::\n\n bit[8]\n creg[8]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_BoolType(self, node: ast.BoolType, context=None):\n \"\"\"\n Leaf node representing the Boolean classical type.\n \"\"\"\n self.visit_ClassicalType(node)\n\n def visit_ArrayType(self, node: ast.ArrayType, context=None):\n \"\"\"Type of arrays that include allocation of the storage.\n\n This is generally any array declared as a standard statement, but not\n arrays declared by being arguments to subroutines.\n \"\"\"\n self.visit_ClassicalType(node)\n self.visit(node.base_type)\n self._visit_list(node.dimensions, self.visit)\n\n def visit_ArrayReferenceType(self, node: ast.ArrayReferenceType, context=None):\n \"\"\"Type of arrays that are a reference to an array with allocated storage.\n\n This is generally any array declared as a subroutine argument. The\n dimensions can be either a list of expressions (one for each dimension), or\n a single expression, which is the number of dimensions.\n\n For example::\n\n // `a` will have dimensions `[IntegerLiteral(2)]` (with a list), because\n // it is a 1D array, with a length of 2.\n def f(const array[uint[8], 2] a) {}\n // `b` will have dimension `IntegerLiteral(3)` (no list), because it is\n // a 3D array, but we don't know the lengths of its dimensions.\n def f(const array[uint[8], #dim=3] b) {}\n \"\"\"\n\n self.visit_ClassicalType(node)\n self.visit(node.base_type)\n if isinstance(node.dimensions, list):\n self._visit_list(node.dimensions, self.visit)\n else:\n self.visit(node.dimensions)\n\n def visit_DurationType(self, node: ast.DurationType, context=None):\n \"\"\"\n Leaf node representing the ``duration`` type.\n \"\"\"\n self.visit_ClassicalType(node)\n\n def visit_StretchType(self, node: ast.StretchType, context=None) -> ast.StretchType:\n \"\"\"\n Leaf node representing the ``stretch`` type.\n \"\"\"\n self.visit_ClassicalType(node)\n\n def visit_CalibrationGrammarDeclaration(\n self, node: ast.CalibrationGrammarDeclaration, context=None\n ):\n \"\"\"\n Calibration grammar declaration\n\n Example::\n\n defcalgrammar \"openpulse\";\n \"\"\"\n\n def visit_CalibrationStatement(self, node: ast.CalibrationStatement, context=None):\n \"\"\"An inline ``cal`` statement for embedded pulse-grammar interactions.\n\n Example::\n\n cal {\n shift_phase(drive($0), theta);\n }\n \"\"\"\n self.visit_Statement(node)\n self._visit_list(node.body, self.visit)\n\n def visit_CalibrationBlock(self, node: ast.CalibrationBlock, context=None):\n self._visit_list(node.body, self.visit)\n\n def visit_CalibrationDefinition(\n self, node: ast.CalibrationDefinition, context=None\n ):\n \"\"\"\n Calibration definition\n\n Example::\n\n defcal rz(angle[20] theta) q {\n shift_phase drive(q), -theta;\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n self._visit_list(node.qubits, self.visit_Identifier)\n self._visit_list(node.body, self.visit)\n if node.return_type:\n self.visit(node.return_type)\n\n def visit_SubroutineDefinition(self, node: ast.SubroutineDefinition, context=None):\n \"\"\"\n Subroutine definition\n\n Example::\n\n def measure(qubit q, context=None) -> bit {\n s q;\n h q;\n return measure q;\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n self._visit_list(node.body, self.visit)\n if node.return_type:\n self.visit(node.return_type)\n\n def visit_QuantumArgument(self, node: ast.QuantumArgument, context=None):\n \"\"\"\n Quantum argument for a subroutine declaration\n \"\"\"\n self.visit_Identifier(node.name)\n if node.size:\n self.visit(node.size)\n\n def visit_ReturnStatement(self, node: ast.ReturnStatement, context=None):\n \"\"\"\n Classical or quantum return statement\n\n Example::\n\n return measure q;\n\n return a + b\n\n \"\"\"\n self.visit_Statement(node)\n if node.expression:\n self.visit(node.expression)\n\n def visit_BreakStatement(self, node: ast.BreakStatement, context=None):\n \"\"\"\n Break statement\n\n Example::\n\n break;\n \"\"\"\n self.visit_Statement(node)\n\n def visit_ContinueStatement(self, node: ast.ContinueStatement, context=None):\n \"\"\"\n Continue statement\n\n Example::\n\n continue;\n \"\"\"\n self.visit_Statement(node)\n\n def visit_EndStatement(self, node: ast.EndStatement, context=None):\n \"\"\"\n End statement\n\n Example::\n\n end;\n \"\"\"\n self.visit_Statement(node)\n\n def visit_BranchingStatement(self, node: ast.BranchingStatement, context=None):\n \"\"\"\n Branch (``if``) statement\n\n Example::\n\n if (temp == 1) {\n ry(-pi / 2) scratch[0];\n } else continue;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.condition)\n self._visit_list(node.if_block, self.visit)\n self._visit_list(node.else_block, self.visit)\n\n def visit_WhileLoop(self, node: ast.WhileLoop, context=None):\n \"\"\"\n While loop\n\n Example::\n\n while(~success) {\n reset magic;\n ry(pi / 4) magic;\n successdistill(magic, scratch);\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.while_condition)\n self._visit_list(node.block, self.visit)\n\n def visit_ForInLoop(self, node: ast.ForInLoop, context=None):\n \"\"\"\n For in loop\n\n Example::\n\n for i in [0: 2] {\n majority a[i], b[i + 1], a[i + 1];\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n self.visit(node.set_declaration)\n self._visit_list(node.block, self.visit)\n\n def visit_DelayInstruction(self, node: ast.DelayInstruction, context=None):\n \"\"\"\n Delay instruction\n\n Example::\n\n delay[start_stretch] $0;\n \"\"\"\n self.visit_QuantumStatement(node)\n self.visit(node.duration)\n self._visit_list(node.qubits, self.visit)\n\n def visit_Box(self, node: ast.Box, context=None):\n \"\"\"\n Timing box\n\n Example::\n\n box [maxdur] {\n delay[start_stretch] $0;\n x $0;\n }\n \"\"\"\n self.visit_QuantumStatement(node)\n if node.duration:\n self.visit(node.duration)\n self._visit_list(node.body, self.visit)\n\n def visit_DurationOf(self, node: ast.DurationOf, context=None):\n \"\"\"\n Duration Of\n\n Example::\n\n durationof({x $0;})\n \"\"\"\n self.visit_Expression(node)\n self._visit_list(node.target, self.visit)\n\n def visit_SizeOf(self, node: ast.SizeOf, context=None):\n \"\"\"``sizeof`` an array's dimensions.\"\"\"\n self.visit_Expression(node)\n self.visit(node.target)\n if node.index:\n self.visit(node.index)\n\n def visit_AliasStatement(self, node: ast.AliasStatement, context=None):\n \"\"\"\n Alias statement\n\n Example::\n\n let aqubits[0];\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.target)\n self.visit(node.value)\n\n def visit_ClassicalAssignment(self, node: ast.ClassicalAssignment, context=None):\n \"\"\"\n Classical assignment\n\n Example::\n\n a[0]1;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.lvalue)\n self.visit(node.rvalue)\n\n def visit_Pragma(self, node: ast.Pragma, context=None):\n \"\"\"\n Pragma\n Example::\n\n #pragma val1 val2 val3\n \"\"\"\n\n def visit_WaveformType(self, node: ast.WaveformType, context=None):\n self.visit_ClassicalType(node)\n\n def visit_PortType(self, node: ast.PortType, context=None):\n self.visit_ClassicalType(node)\n\n def visit_FrameType(self, node: ast.FrameType, context=None):\n self.visit_ClassicalType(node)" }, { "identifier": "LiteralVisitor", "path": "shipyard/visitors/literal_visitor.py", "snippet": "class LiteralVisitor:\n \"\"\"Class defining methods for visiting openQASM literal-nodes\"\"\"\n\n def visit_BitstringLiteral(self, node: ast.BitstringLiteral) -> str:\n \"\"\"\n BitstringLiteral node visitor:\n\n Args:\n node (ast.BitstringLiteral):\n openQASM bitstring literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n value = bin(node.value)[2:]\n if len(value) < node.width:\n value = \"0\" * (node.width - len(value)) + value\n return f'\"{value}\"'\n\n def visit_IntegerLiteral(self, node: ast.IntegerLiteral) -> str:\n \"\"\"\n IntegerLiteral node visitor:\n\n Args:\n node (ast.IntegerLiteral):\n openQASM integer literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return str(node.value)\n\n def visit_FloatLiteral(self, node: ast.IntegerLiteral) -> str:\n \"\"\"\n FloatLiteral node visitor:\n\n Args:\n node (ast.FloatLiteral):\n openQASM float literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return str(node.value)\n\n def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral) -> str:\n \"\"\"\n ImaginaryLiteral node visitor:\n\n Args:\n node (ast.ImaginaryLiteral):\n openQASM imaginary literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return str(node.value) + \"im\"\n\n def visit_BooleanLiteral(self, node: ast.BooleanLiteral) -> str:\n \"\"\"\n BooleanLiteral node visitor:\n\n Args:\n node (ast.BooleanLiteral):\n openQASM boolean literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return \"true\" if node.value else \"false\"\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral) -> str:\n \"\"\"\n DurationLiteral node visitor:\n\n Args:\n node (ast.DurationLiteral):\n openQASM duration literal node to visit\n\n Returns:\n str: string representation of the node value\n \"\"\"\n return f\"{node.value}{node.unit.name}\"\n\n # def visit_ArrayLiteral(self, node: ast.ArrayLiteral) -> None:\n # self._visit_sequence(node.values, context, start=\"{\", end=\"}\", separator=\", \")" }, { "identifier": "TypeVisitor", "path": "shipyard/visitors/type_visitor.py", "snippet": "class TypeVisitor:\n \"\"\"Class defining methods for visiting openQASM type-nodes\"\"\"\n\n def _visit_type_node(self, node: ast.ClassicalType) -> str:\n \"\"\"\n type node visitor:\n Returns the name of a Type node\n Example:\n node:ast.FloatType -> 'FLOAT'\n\n Args:\n node (ast.ClassicalType): node that is a subclass of ClassicalType\n\n Returns:\n str: name of the node type\n \"\"\"\n return str(node.__class__.__name__).upper().split(\"TYPE\", maxsplit=1)[0]\n\n def _visit_type_node_wrapper(self, node: ast.ClassicalType):\n return self._visit_type_node(node)\n\n visit_IntType = _visit_type_node_wrapper\n visit_UintType = _visit_type_node_wrapper\n visit_FloatType = _visit_type_node_wrapper\n visit_ComplexType = _visit_type_node_wrapper # todo expand to indicate base type\n visit_AngleType = _visit_type_node_wrapper\n visit_BitType = _visit_type_node_wrapper\n visit_BoolType = _visit_type_node_wrapper\n visit_ArrayType = (\n _visit_type_node_wrapper # todo expand to indicate type+size of array\n )\n\n def visit_ArrayReferenceType(self, node: ast.ArrayReferenceType) -> None:\n \"\"\"\n ToDo\n \"\"\"\n raise NotImplementedError\n\n visit_DurationType = _visit_type_node_wrapper\n visit_StretchType = _visit_type_node_wrapper\n\n visit_PortType = _visit_type_node_wrapper\n visit_FrameType = _visit_type_node_wrapper\n visit_WaveformType = _visit_type_node_wrapper" }, { "identifier": "CalScopedSymbolTable", "path": "shipyard/passes/semantic_analysis/scoped_symbol_table.py", "snippet": "class CalScopedSymbolTable(ScopedSymbolTable):\n \"\"\"\n Scoped Symbol Table for openPulse code, used when in 'cal' and 'defcal' blocks\n in openQASM programs and using the openPulse defcalgrammar\n \"\"\"\n\n _builtin_cal_symbol_lists = [BUILTIN_CAL_TYPES, BUILTIN_OPENPULSE, BUILTIN_ZI_WFM]\n\n def __init__(\n self,\n scope_name: str,\n enclosing_scope: \"ScopedSymbolTable\" = None,\n init_cal: bool = False,\n ) -> None:\n super().__init__(scope_name, enclosing_scope)\n if init_cal:\n self._init_cal_builtins()\n\n def _init_cal_builtins(self):\n for symbol_list in self._builtin_cal_symbol_lists:\n for symbol in symbol_list:\n self.insert(symbol)" }, { "identifier": "ScopedSymbolTable", "path": "shipyard/passes/semantic_analysis/scoped_symbol_table.py", "snippet": "class ScopedSymbolTable:\n \"\"\"\n Symbol Table for keeping track of symbols, defined in openQASM programs,\n and their scope.\n\n Used during Semantic Analysis of openQASM programs\n\n The symbol table is a managed dictionary, which should not be interacted with\n directly but rather using the 'insert' and 'lookup' methods of the class.\n\n Todo consider implementing __getitem__, __setitem__, items() and values() methods\n \"\"\"\n\n _builtin_symbol_lists = [BUILTIN_TYPES, BUILTIN_ZI_EXP, BUILTIN_ZI_FUNC]\n\n _builtin_functions = []\n _builtin_gates = [\"measure\"] # todo is this built in?\n\n def __init__(\n self,\n scope_name: str,\n enclosing_scope: \"ScopedSymbolTable\" = None,\n ) -> None:\n self._symbols: dict[str, Symbol] = {}\n self.scope_name = scope_name\n self.enclosing_scope: \"ScopedSymbolTable\" = enclosing_scope\n LOGGER.debug(\"Created scope named: %s\", self.scope_name)\n if enclosing_scope is None:\n self._init_builtins()\n\n def _init_builtins(self):\n for symbol_list in self._builtin_symbol_lists:\n for symbol in symbol_list:\n self.insert(symbol)\n\n def __str__(self) -> str:\n header1 = \"SCOPE (SCOPED SYMBOL TABLE)\"\n lines = [\"\\n\", header1, \"=\" * len(header1)]\n for header_name, header_value in (\n (\"Scope name\", self.scope_name),\n (\n \"Enclosing scope\",\n self.enclosing_scope.scope_name if self.enclosing_scope else None,\n ),\n ):\n lines.append(f\"{header_name:<16}: {header_value}\")\n header2 = \"Scope (Scoped symbol table) contents\"\n lines.extend([header2, \"-\" * len(header2)])\n lines.extend(\n f\"{key:>16}: {value.__repr__()}\" for key, value in self._symbols.items()\n )\n lines.append(\"\\n\")\n symbol_table_string = \"\\n\".join(lines)\n return symbol_table_string\n\n __repr__ = __str__\n\n def insert(self, symbol: Symbol):\n \"\"\"Inserts a symbol into the symbol table\n\n Args:\n symbol (Symbol): Symbol to insert into the table\n \"\"\"\n LOGGER.debug(\"Insert into %s: %s\", self.scope_name, symbol)\n self._symbols[symbol.name] = symbol\n\n def lookup(self, name: str, current_scope_only: bool = False) -> Symbol:\n \"\"\"looks up a symbol by name in the symbol table\n\n Args:\n name\n (str): the name of the symbol to look up in the symbol table\n current_scope_only (bool, optional):\n If True a symbol is only looked up in the current scope.\n Else, if it is not found within the current symbol table,\n it is looked up in any enclosing scopes\n\n Returns:\n Symbol:\n A Symbol with name matching the name being looked up,\n None:\n If a symbol with the name is not found\n \"\"\"\n LOGGER.debug(\"Lookup: %s. (Scope name: %s\", name, self.scope_name)\n # 'symbol' is either an instance of the Symbol class or None\n symbol = self._symbols.get(name, None)\n\n if symbol is not None:\n return symbol\n\n if current_scope_only:\n return None\n\n # recursively go up the chain and lookup the name\n if self.enclosing_scope is not None:\n return self.enclosing_scope.lookup(name)\n return None\n\n def keys(self, current_scope_only=False) -> list[str]:\n \"\"\"returns the name of all symbols in scope\n\n Args:\n current_scope_only (bool, optional):\n If true only returns the names of symbols in current scope.\n Defaults to False.\n\n Returns:\n list[str]: names of all the symbols in scope\n \"\"\"\n symbol_names = list(self._symbols.keys())\n if current_scope_only:\n return symbol_names\n if self.enclosing_scope is not None:\n symbol_names.extend(\n [\n name\n for name in self.enclosing_scope.keys()\n if name not in symbol_names\n ]\n )\n return symbol_names" }, { "identifier": "AliasSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class AliasSymbol(Symbol):\n \"\"\"A symbol that represents an alias of another symbol\"\"\"" }, { "identifier": "ClassicalSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class ClassicalSymbol(Symbol):\n \"\"\"A symbol that represents a classical variable\n\n the kind of the symbol should be the name of a builtin classical symbol\n (i.e. BuiltinSymbol/BuiltinCalSymbol but not QUBIT)\n \"\"\"\n\n _validate_classical = validator(\"kind\", allow_reuse=True)(\n kind_must_be_name_of_classical_type\n )" }, { "identifier": "ConstantSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class ConstantSymbol(Symbol):\n \"\"\"A symbol that represents a classical compile time constant\n\n the kind of the symbol should be the name of a builtin classical symbol\n (i.e. BuiltinSymbol/BuiltinCalSymbol but not QUBIT)\n \"\"\"\n\n _validate_classical = validator(\"kind\", allow_reuse=True)(\n kind_must_be_name_of_classical_type\n )" }, { "identifier": "DefcalSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class DefcalSymbol(GateSymbol):\n \"\"\"A symbol representing a calibration definition of an operation\n\n e.g., the physical pulses used to perfrom a gate operation\n or a measurement on a qubit\n\n for further reading\n https://openqasm.com/language/pulses.html\n \"\"\"" }, { "identifier": "ExternSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class ExternSymbol(SubroutineSymbol):\n \"\"\"A symbol representing external functions or ports,\n\n for further reading\n https://openqasm.com/language/classical.html#extern-function-calls\n \"\"\"" }, { "identifier": "GateSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class GateSymbol(SubroutineSymbol):\n \"\"\"A symbol representing a quantum gate operation\n\n a quantum gate represents the unitary quantum operation\n\n for further reading\n https://openqasm.com/language/gates.html\n \"\"\"\n\n qubits: list[QuantumSymbol] = Field(default_factory=lambda: [])" }, { "identifier": "IOSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class IOSymbol(Symbol):\n \"\"\"A symbol that represents Input/Output of a script,\n i.e. a value that will be provided at runtime or a value that will be returned\n from running the script.\n\n This behaviour is not currently implemented in our pipeline\n\n for further reading\n https://openqasm.com/language/directives.html#input-output\n \"\"\"\n\n _validate_classical = validator(\"kind\", allow_reuse=True)(\n kind_must_be_name_of_classical_type\n )" }, { "identifier": "LiteralSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class LiteralSymbol(ClassicalSymbol):\n \"\"\"A symbol that represents a Literal\"\"\"" }, { "identifier": "QuantumSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class QuantumSymbol(Symbol):\n \"\"\"\n A symbol representing quantum objects, i.e., either a qubit or a qubit register\n \"\"\"\n\n @validator(\"kind\")\n def kind_must_be_name_of_quantum_type(cls, kind: str) -> str:\n \"\"\"if the input string is a name of a quantum type it is returned else a\n validation error is raised\n\n Args:\n kind (str): should be the name of a quantum type\n\n Returns:\n str: input string if it is a name of a quantum type\n \"\"\"\n assert kind in _BUILTIN_QUANTUM_SYMBOL_NAMES\n return kind" }, { "identifier": "SubroutineSymbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class SubroutineSymbol(Symbol):\n \"\"\"A symbol representing subroutines\n\n for further reading\n https://openqasm.com/language/subroutines.html\n \"\"\"\n\n params: list[Symbol] = Field(default_factory=lambda: [])\n return_type: str = None\n\n @validator(\"return_type\")\n def return_classical_or_none(cls, return_type: str):\n \"\"\"If the return type is a classical type or an array it is returned\n in upper case format, else a ValidationError is raised\n\n Args:\n return_type (str): should be a name of a valid classical type or 'array'\n\n Returns:\n str: uppercase input string if valid classical type or 'ARRAY'\n \"\"\"\n if return_type is not None:\n return_type = return_type.upper()\n assert return_type in _BUILTIN_CLASSICAL_SYMBOL_NAMES + [\"ARRAY\"]\n return return_type" }, { "identifier": "Symbol", "path": "shipyard/passes/semantic_analysis/symbols.py", "snippet": "class Symbol(BaseModel):\n \"\"\"Base class for Symbols\"\"\"\n\n name: str\n kind: str = None\n\n @validator(\"kind\")\n def force_kind_uppercase(cls, kind: str) -> str:\n \"\"\"If the string 'kind' is not None make it uppercase\n\n Args:\n kind (str): a string (or None)\n\n Returns:\n str: the same string but uppercase (returns None if 'kind' is None)\n \"\"\"\n if kind is not None:\n return kind.upper()\n return kind" } ]
from contextlib import contextmanager from openpulse import ast from ...compiler_error import ErrorCode, SemanticError from ...logger import LOGGER from ...mangle import Mangler from ...utilities import ScopeContext from ...visitors import GenericVisitor, LiteralVisitor, TypeVisitor from .scoped_symbol_table import CalScopedSymbolTable, ScopedSymbolTable from .symbols import ( AliasSymbol, ClassicalSymbol, ConstantSymbol, DefcalSymbol, ExternSymbol, GateSymbol, IOSymbol, LiteralSymbol, QuantumSymbol, SubroutineSymbol, Symbol, )
11,471
""" Module that host the SemanticAnalyser QASMVisitor class that can be used to perform semantic analysis on openQASM Abstract Syntax Trees. """ # pylint: disable=R0904: # Too many public methods class SemanticAnalyzer(TypeVisitor, LiteralVisitor, GenericVisitor): """ QASMVisitor class that peforms semantic analysis on a openQASM Abstract Syntax Tree usage: qasm_ast = openpulse.parse(qasm_program_string) sa = SemanticAnalyser() sa.visit(qasm_ast) """ def __init__(self) -> None:
""" Module that host the SemanticAnalyser QASMVisitor class that can be used to perform semantic analysis on openQASM Abstract Syntax Trees. """ # pylint: disable=R0904: # Too many public methods class SemanticAnalyzer(TypeVisitor, LiteralVisitor, GenericVisitor): """ QASMVisitor class that peforms semantic analysis on a openQASM Abstract Syntax Tree usage: qasm_ast = openpulse.parse(qasm_program_string) sa = SemanticAnalyser() sa.visit(qasm_ast) """ def __init__(self) -> None:
self.current_scope: ScopedSymbolTable = None
9
2023-11-16 17:37:29+00:00
16k
raphaelreme/koft
src/experiments/track.py
[ { "identifier": "FakeDetector", "path": "src/detector.py", "snippet": "class FakeDetector(byotrack.Detector): # TODO: include weight\n def __init__(self, mu: torch.Tensor, noise=1.0, fpr=0.1, fnr=0.2, generate_outside_particles=True):\n self.noise = noise\n self.fpr = fpr\n self.fnr = fnr\n self.mu = mu\n self.n_particles = mu.shape[1]\n self.generate_outside_particles = generate_outside_particles\n\n def run(self, video: Iterable[np.ndarray]) -> Collection[byotrack.Detections]:\n detections_sequence = []\n\n for k, frame in enumerate(tqdm.tqdm(video)):\n frame = frame[..., 0] # Drop channel\n shape = torch.tensor(frame.shape)\n\n detected = torch.rand(self.n_particles) >= self.fnr # Miss some particles (randomly)\n positions = self.mu[k, detected] + torch.randn((detected.sum(), 2)) * self.noise\n positions = positions[(positions > 0).all(dim=-1)]\n positions = positions[(positions < shape - 1).all(dim=-1)]\n\n # Create fake detections\n # 1- Quickly compute the background mask\n mask = torch.tensor(cv2.GaussianBlur(frame, (33, 33), 15) > 0.2)\n mask_proportion = mask.sum().item() / mask.numel()\n\n # 2- Scale fpr by the mask proportion\n n_fake = int(len(positions) * (self.fpr + torch.randn(1).item() * self.fpr / 10) / mask_proportion)\n false_alarm = torch.rand(n_fake, 2) * (shape - 1)\n\n if not self.generate_outside_particles: # Filter fake detections outside the mask\n false_alarm = false_alarm[mask[false_alarm.long()[:, 0], false_alarm.long()[:, 1]]]\n\n positions = torch.cat((positions, false_alarm))\n\n # bbox = torch.cat((positions - 1, torch.zeros_like(positions) + 3), dim=-1)\n detections_sequence.append(\n byotrack.Detections(\n {\n \"position\": positions,\n # \"bbox\": bbox.round().to(torch.int32),\n \"shape\": shape,\n },\n frame_id=k,\n )\n )\n\n return detections_sequence" }, { "identifier": "DetectionMetric", "path": "src/metrics/detections.py", "snippet": "class DetectionMetric:\n \"\"\"\"\"\"\n\n def __init__(self, dist_thresh: float, greedy=True) -> None:\n self.dist_thresh = dist_thresh\n self.greedy = greedy\n self.lap_solver = pylapy.LapSolver()\n\n def compute_at(\n self,\n detections: byotrack.Detections,\n true_position: torch.Tensor,\n true_weight: Optional[torch.Tensor] = None,\n prob_thresh=0.0,\n weight_thresh=0.0,\n ) -> Dict[str, float]:\n \"\"\"Compute the precision, recall and f1 at a given probability and weight thresholds\"\"\"\n if true_weight is not None:\n true_position = true_position[true_weight > weight_thresh]\n\n predicted_position = detections.position[detections.confidence > prob_thresh]\n\n dist = torch.cdist(predicted_position, true_position)\n\n if self.greedy:\n dist[dist > self.dist_thresh] = torch.inf\n tp = self.lap_solver.solve(dist.numpy()).shape[0]\n else:\n tp = self.lap_solver.solve(dist.numpy(), self.dist_thresh).shape[0]\n\n n_pred = len(predicted_position)\n n_true = len(true_position)\n precision = tp / n_pred if n_pred else 1.0\n recall = tp / n_true if n_true else 1.0\n f1 = 2 * tp / (n_true + n_pred) if n_pred + n_true else 1.0\n\n return {\n \"precision\": precision,\n \"recall\": recall,\n \"f1\": f1,\n \"n_pred\": n_pred,\n \"n_true\": n_true,\n \"tp\": tp,\n }\n\n def average_precision_weight(\n self,\n detections: byotrack.Detections,\n true_position: torch.Tensor,\n true_weight: Optional[torch.Tensor] = None,\n prob_thresh=0.0,\n ) -> float:\n recalls = []\n precisions = []\n\n for weight_thresh in torch.linspace(0, 2.0, 201):\n metrics = self.compute_at(detections, true_position, true_weight, prob_thresh, weight_thresh.item())\n recalls.append(metrics[\"recall\"])\n precisions.append(metrics[\"precision\"])\n\n return compute_ap(recalls, precisions)\n\n def average_precision_prob(\n self,\n detections: byotrack.Detections,\n true_position: torch.Tensor,\n true_weight: Optional[torch.Tensor] = None,\n weight_thresh=0.0,\n ) -> float:\n recalls = []\n precisions = []\n\n for prob_thresh in torch.linspace(1.0, 0.0, 101):\n metrics = self.compute_at(detections, true_position, true_weight, prob_thresh.item(), weight_thresh)\n recalls.append(metrics[\"recall\"])\n precisions.append(metrics[\"precision\"])\n\n return compute_ap(recalls, precisions)" }, { "identifier": "compute_tracking_metrics", "path": "src/metrics/tracking.py", "snippet": "def compute_tracking_metrics(\n tracks: Collection[byotrack.Track], ground_truth: Dict[str, torch.Tensor]\n) -> Dict[str, float]:\n \"\"\"Compute [email protected] (consider that gt matches with pred if dist < 1.5 pixels)\n\n Also returns localization errors when matching at 4.5 pixels.\n\n We choose not to aggregate the HOTA performances at different thresholds, but rather choose one,\n and use LocA to measure localization errors. (Converted in pixels)\n\n Keys:\n HOTA: HOTA at 1.5 pixels\n DetA: Jacquard of detections\n DetPr: Precision of detections\n DetRe: Recall of detections\n AssA: Jacquard of associations\n AssPr: Precision of associations\n AssRe: Recall of associations\n Loca: Localization errors (but at 4.5 pixels)\n \"\"\"\n gt_data = simulator_to_eval(ground_truth[\"mu\"], ground_truth[\"weight\"])\n track_data = tracks_to_eval(tracks)\n data = {**gt_data, **track_data}\n add_similarity(data)\n\n metric = trackeval.metrics.hota.HOTA()\n metrics = metric.eval_sequence(data)\n\n # -6 => 0.7 similarity => 1 - 1.5 / 5\n return {\n \"HOTA\": float(metrics[\"HOTA\"][-6]),\n \"DetA\": float(metrics[\"DetA\"][-6]),\n \"DetPr\": float(metrics[\"DetPr\"][-6]),\n \"DetRe\": float(metrics[\"DetRe\"][-6]),\n \"AssA\": float(metrics[\"AssA\"][-6]),\n \"AssPr\": float(metrics[\"AssPr\"][-6]),\n \"AssRe\": float(metrics[\"AssRe\"][-6]),\n \"Loca\": 5 - 5 * float(metrics[\"LocA\"][1]), # Mean of pixel errors for TP associations\n }" }, { "identifier": "constant_kalman_filter", "path": "src/skt.py", "snippet": "def constant_kalman_filter(measurement_std: torch.Tensor, process_std: torch.Tensor, dim=2, order=1) -> KalmanFilter:\n \"\"\"Create a constant Velocity/Acceleration/Jerk Kalman Filter\n\n Create a kalman filter with a state containing the positions on each dimension (x, y, z, ...)\n with their derivatives up to `order`. The order-th derivatives are supposed constant.\n\n Let x be the positions for each dim and x^i the i-th derivatives of these positions\n Prediction follows:\n x^i_{t+1} = x^i_t + x^{i+1}_t, for i < order\n x^order_{t+1} = x^order_t\n\n Args:\n measurement_std (torch.Tensor): Std of the measurements\n 99.7% of measurements should fall within 3 std of the true position\n Shape: Broadcastable to dim, dtype: float64\n process_std (torch.Tensor): Process noise, a typical value is maximum diff between two consecutive\n order-th derivative. (Eg: for constant velocity -> Maximum acceleration between two frames)\n Shape: Broadcastable to dim, dtype: float64\n dim (int): Dimension of the motion (1d, 2d, 3d, ...)\n Default: 2\n order (int): Order of the filer (The order-th derivatives are constants)\n Default: 1 (Constant velocity)\n\n \"\"\"\n measurement_std = torch.broadcast_to(measurement_std, (dim,))\n process_std = torch.broadcast_to(process_std, (dim,))\n\n state_dim = (order + 1) * dim\n\n # Measurement model\n # We only measure the positions\n # Noise is independent and can have a different value in each direction\n measurement_matrix = torch.eye(dim, state_dim)\n measurement_noise = torch.eye(dim) * measurement_std**2\n\n # Process\n # Constant model\n # Noise in velocity estimation (which induce a noise in position estimation)\n process_matrix = torch.eye(state_dim) + torch.tensor(np.eye(state_dim, k=dim)).to(torch.float32)\n process_noise = torch.tensor(\n filterpy.common.Q_discrete_white_noise(order + 1, block_size=dim, order_by_dim=False)\n ).to(torch.float32) * torch.cat([process_std**2] * (order + 1))\n\n return KalmanFilter(process_matrix, measurement_matrix, process_noise, measurement_noise)" }, { "identifier": "Dist", "path": "src/skt.py", "snippet": "class Dist(enum.Enum):\n MAHALANOBIS = \"mahalanobis\"\n EUCLIDIAN = \"euclidian\"\n LIKELIHOOD = \"likelihood\"" }, { "identifier": "Method", "path": "src/skt.py", "snippet": "class Method(enum.Enum):\n \"\"\"Matching methods\n\n Opt: GDM with Jonker-volgenant algorithm (Linear assignement solver)\n Can be smooth thresholding or hard\n Greedy: Takes the best matches iteratively\n \"\"\"\n\n OPT_SMOOTH = \"opt_smooth\"\n OPT_HARD = \"opt_hard\"\n GREEDY = \"greedy\"" }, { "identifier": "MatchingConfig", "path": "src/skt.py", "snippet": "class MatchingConfig:\n thresh: float\n dist: Dist = Dist.MAHALANOBIS\n method: Method = Method.OPT_SMOOTH" }, { "identifier": "SimpleKalmanTracker", "path": "src/skt.py", "snippet": "class SimpleKalmanTracker(byotrack.Linker):\n \"\"\"Simple Kalman tracker (SKT)\"\"\"\n\n def __init__(self, kalman_filter: KalmanFilter, match_cfg: MatchingConfig) -> None:\n super().__init__()\n self.kalman_filter = kalman_filter\n self.tracks: List[PartialTrack] = []\n self.active_tracks: List[PartialTrack] = []\n self.state = GaussianState( # Current state of active tracks\n torch.zeros((0, self.kalman_filter.state_dim, 1)),\n torch.zeros((0, self.kalman_filter.state_dim, self.kalman_filter.state_dim)),\n )\n\n self.match_cfg = match_cfg\n\n def run(\n self, video: Iterable[np.ndarray], detections_sequence: Collection[byotrack.Detections]\n ) -> Collection[byotrack.Track]:\n # Reset tracks and states\n self.tracks = []\n self.active_tracks = []\n self.state = GaussianState(\n torch.zeros((0, self.kalman_filter.state_dim, 1)),\n torch.zeros((0, self.kalman_filter.state_dim, self.kalman_filter.state_dim)),\n ) # The first iteration will predict and associate with 0 tracks, leading to no association\n # Thus creating tracks for all detections in the first frame\n\n for detections in tqdm.tqdm(detections_sequence):\n self.update(detections)\n\n tracks = []\n for track in self.tracks + self.active_tracks:\n if track.track_state in (track.TrackState.DELETED, track.TrackState.INITIATED):\n continue # Ignore unconfirmed tracks\n tracks.append(\n byotrack.Track(\n track.start,\n track.points,\n track.track_id,\n )\n )\n return tracks\n\n def match(self, projection: GaussianState, measures: torch.Tensor) -> torch.Tensor:\n \"\"\"Match projection with measures using positions\n\n If velocity measure (KOFT) is available, we do not use it here (Even if it could be better)\n\n Args:\n projection (GaussianState): Projection for all tracks. Only supports 2D (dim_z = 2 or 4\n if velocities are included). Mean: (n, dim_z, 1), Cov: (n, dim_z, dim_z)\n measures (torch.Tensor): Measures to match with tracks. Only supports 2D. Measures can\n include velocities but it won't be used for matching. (Though could be an easy upgrade)\n Shape: (m, 2, 1) or (m, 4 ,1), dtype: float32\n\n Returns:\n torch.Tensor: Links between tracks and measures\n Shape: (L, 2), dtype: int32\n \"\"\"\n dist: torch.Tensor\n thresh: float\n\n if self.match_cfg.dist in (Dist.MAHALANOBIS, Dist.LIKELIHOOD):\n if projection.precision is None:\n # Register in case someone needs it afterwards (like kf.update)\n projection.precision = projection.covariance.inverse().contiguous()\n\n precision = projection.precision[:, None, :2, :2] # Handle 4d projection with speed. (n, 1, 2, 2)\n # We noticed that it is more efficient to use inv(cov)[:2, :2] rather than inv(cov[:2, :2])...\n # Need more investigatation but: This solution is equivalent to consider than the speed prediction\n # is perfect and using covariance between speed and position to quantify the errors on positions\n # precision != torch.linalg.inv(projection.covariance[:, None, :2, :2])\n\n diff = projection.mean[:, None, :2] - measures[None, :, :2] # Shape: (n, m, 2, 1)\n dist = diff.mT @ precision @ diff # Shape: (n, m, 1, 1)\n if self.match_cfg.dist == Dist.MAHALANOBIS:\n dist = dist[..., 0, 0]\n thresh = self.match_cfg.thresh**2 # No need to take the sqrt, let's compare to the sq thresh\n else: # likelihood\n log_det = torch.log(torch.det(projection.covariance))[:, None] # Shape (N, 1)\n # Dist = - log likelihood\n dist = 0.5 * (diff.shape[2] * torch.log(2 * torch.tensor(torch.pi)) + log_det + dist[..., 0, 0])\n thresh = -torch.log(torch.tensor(self.match_cfg.thresh)).item()\n else: # Euclidian\n dist = torch.cdist(projection.mean[:, :2, 0], measures[:, :2, 0])\n thresh = self.match_cfg.thresh\n\n if self.match_cfg.method == Method.GREEDY:\n links = greedy_assignment_solver(dist.numpy(), thresh)\n else:\n dist[dist > thresh] = torch.inf\n links = pylapy.LapSolver().solve(\n dist.numpy(),\n float(\"inf\") if self.match_cfg.method == Method.OPT_HARD else thresh,\n )\n\n return torch.tensor(links.astype(np.int32))\n\n def update(self, detections: byotrack.Detections):\n prior = self.kalman_filter.predict(self.state)\n projection = self.kalman_filter.project(prior)\n positions = detections.position[..., None].clone() # Shape m, d, 1\n\n # Association\n links = self.match(projection, positions)\n\n # Update linked kalman filter\n posterior = self.kalman_filter.update(\n GaussianState(prior.mean[links[:, 0]], prior.covariance[links[:, 0]]),\n positions[links[:, 1]],\n GaussianState(\n projection.mean[links[:, 0]],\n projection.covariance[links[:, 0]],\n projection.precision[links[:, 0]] if projection.precision is not None else None,\n ),\n )\n\n # Take prior by default if non-linked\n prior.mean[links[:, 0]] = posterior.mean\n prior.covariance[links[:, 0]] = posterior.covariance\n posterior = prior\n\n self._handle_tracks(posterior, positions, links, detections.frame_id)\n\n def _handle_tracks(\n self, posterior: GaussianState, measures: torch.Tensor, links: torch.Tensor, frame_id: int\n ) -> None:\n \"\"\"Handle tracks to save track data, start new tracks and delete lost ones\n\n Args:\n posterior (GaussianState): Posterior for all active tracks.\n Mean: (n, dim_x, 1), Cov: (n, dim_x, dim_x)\n measures (torch.Tensor): Measures (Only supports 2D). Measures can include velocities (KOFT)\n Shape: (m, 2, 1) or (m, 4 ,1), dtype: float32\n links (torch.Tensor): Links between tracks and measures\n Shape: (L, 2), dtype: int32\n frame_id (int): Current frame id\n\n \"\"\"\n\n # Save both state and measurement in partial tracks.\n i_to_j = torch.full((len(self.active_tracks),), -1, dtype=torch.int32)\n i_to_j[links[:, 0]] = links[:, 1]\n active_mask = torch.full((len(self.active_tracks),), False)\n still_active = []\n for i, track in enumerate(self.active_tracks):\n j = i_to_j[i]\n if j == -1:\n track.update(posterior.mean[i], posterior.covariance[i], None)\n else:\n track.update(posterior.mean[i], posterior.covariance[i], measures[j])\n\n if track.is_active():\n still_active.append(track)\n active_mask[i] = True\n else:\n self.tracks.append(track)\n\n # Restrict posterior states to active tracks\n posterior = GaussianState(posterior.mean[active_mask], posterior.covariance[active_mask])\n\n # Create new track for every unmatch detection\n measures[links[:, 1]] = torch.nan\n unmatched_measures = measures[~torch.isnan(measures).squeeze().any(dim=-1)]\n\n if not unmatched_measures.numel():\n self.state = posterior\n self.active_tracks = still_active\n return\n\n # Initial state at measures,. Unmeasured state ([velocity, ]acceleration, jerk) are initialize at 0\n # Variance for unmeasured state is the process_noise\n # Variance for measured state is the measurement_noise\n unmatched_state = GaussianState(\n torch.zeros((unmatched_measures.shape[0], self.kalman_filter.state_dim, 1)),\n torch.cat([self.kalman_filter.process_noise[None]] * unmatched_measures.shape[0]),\n )\n unmatched_state.mean[:, : unmatched_measures.shape[1]] = unmatched_measures\n unmatched_state.covariance[\n :, : unmatched_measures.shape[1], : unmatched_measures.shape[1]\n ] = self.kalman_filter.measurement_noise\n\n # Create a new active track for each new state created\n for i in range(unmatched_measures.shape[0]):\n still_active.append(\n PartialTrack(\n len(self.tracks) + len(still_active),\n frame_id,\n unmatched_state.mean[i],\n unmatched_state.covariance[i],\n unmatched_measures[i],\n )\n )\n\n # State is the posterior for all active tracks (concatenation of new tracks with old kept ones)\n self.active_tracks = still_active\n self.state = GaussianState(\n torch.cat((posterior.mean, unmatched_state.mean)),\n torch.cat((posterior.covariance, unmatched_state.covariance)),\n )" }, { "identifier": "PartialTrack", "path": "src/skt.py", "snippet": "class PartialTrack:\n \"\"\"Partial track class\n\n Partial tracks are created for each unlinked detections, and then updated with following detections.\n It requires CONFIRMED_AT consecutive detections to confirm the tracks (INITIATED => CONFIRMED). If a miss detection\n occurs, it deletes it (INITIATED => DELETED).\n\n Once confirmed, it is resilient to miss detections, waiting MAX_NON_MEASURE frames before ending the track\n (CONFIRMED => ENDED)\n\n Will also store the kalman data for analysis.\n \"\"\"\n\n MAX_NON_MEASURE = 3\n CONFIRMED_AT = 3\n\n class TrackState(enum.IntEnum):\n INITIATED = 0\n CONFIRMED = 1\n ENDED = 2\n DELETED = 3\n\n def __init__(\n self,\n track_id: int,\n start: int,\n mean: torch.Tensor,\n covariance: torch.Tensor,\n measure: torch.Tensor,\n points=(0, 1),\n ) -> None:\n self._points = points # Points data in state\n self.track_id = track_id\n self.start = start\n self.track_state = PartialTrack.TrackState.INITIATED\n self.last_measurement = 0\n self._mean = [mean.clone()]\n self._covariance = [covariance.clone()]\n self._measure = [measure.clone()]\n\n def __len__(self) -> int:\n return len(self._mean) - self.last_measurement\n\n def is_active(self) -> bool:\n return self.track_state < 2\n\n def update(self, mean: torch.Tensor, covariance: torch.Tensor, measure: Optional[torch.Tensor]) -> None:\n \"\"\"Should be called only if the track is active\"\"\"\n self._mean.append(mean.clone())\n self._covariance.append(covariance.clone())\n\n if measure is None: # Not associated with a measure\n self._measure.append(torch.full_like(self._measure[-1], torch.nan))\n self.last_measurement += 1\n\n if self.track_state == PartialTrack.TrackState.INITIATED:\n self.track_state = PartialTrack.TrackState.DELETED\n\n elif self.last_measurement >= self.MAX_NON_MEASURE: # Could also check the width of the state covariance\n self.track_state = PartialTrack.TrackState.ENDED\n\n return\n\n self._measure.append(measure.clone())\n self.last_measurement = 0\n\n if self.track_state == PartialTrack.TrackState.INITIATED:\n if len(self) >= self.CONFIRMED_AT:\n self.track_state = PartialTrack.TrackState.CONFIRMED\n\n @property\n def points(self) -> torch.Tensor:\n return torch.cat([mean[None, self._points, 0] for mean in self._mean[: len(self)]])" }, { "identifier": "constant_koft_filter", "path": "src/koft.py", "snippet": "def constant_koft_filter(\n pos_std: torch.Tensor, vel_std: torch.Tensor, process_std: torch.Tensor, dim=2, order=1\n) -> KalmanFilter:\n \"\"\"Create a constant Velocity/Acceleration/Jerk Kalman Filter with pos and velocity measurements\n\n Create a kalman filter with a state containing the positions on each dimension (x, y, z, ...)\n with their derivatives up to `order`. The order-th derivatives are supposed constant.\n\n Let x be the positions for each dim and x^i the i-th derivatives of these positions\n Prediction follows:\n x^i_{t+1} = x^i_t + x^{i+1}_t, for i < order\n x^order_{t+1} = x^order_t\n\n Args:\n measurement_std (torch.Tensor): Std of the measurements\n 99.7% of measurements should fall within 3 std of the true position\n Shape: Broadcastable to dim, dtype: float64\n process_std (torch.Tensor): Process noise, a typical value is maximum diff between two consecutive\n order-th derivative. (Eg: for constant velocity -> Maximum acceleration between two frames)\n Shape: Broadcastable to dim, dtype: float64\n dim (int): Dimension of the motion (1d, 2d, 3d, ...)\n Default: 2\n order (int): Order of the filer (The order-th derivatives are constants)\n Default: 1 (Constant velocity)\n\n \"\"\"\n\n assert order >= 1, \"Velocity is measured and has to be set\"\n\n measurement_std = torch.cat((torch.broadcast_to(pos_std, (dim,)), torch.broadcast_to(vel_std, (dim,))))\n process_std = torch.broadcast_to(process_std, (dim,))\n\n measure_dim = 2 * dim\n state_dim = (order + 1) * dim\n\n # Measurement model\n # We measure position and velocity\n # Noise is independent and can have a different value in each direction\n measurement_matrix = torch.eye(measure_dim, state_dim)\n measurement_noise = torch.eye(measure_dim) * measurement_std**2\n\n # Process\n # Constant model\n # Noise in velocity estimation (which induce a noise in position estimation)\n process_matrix = torch.eye(state_dim) + torch.tensor(np.eye(state_dim, k=dim)).to(torch.float32)\n process_noise = torch.tensor(\n filterpy.common.Q_discrete_white_noise(order + 1, block_size=dim, order_by_dim=False)\n ).to(torch.float32) * torch.cat([process_std**2] * (order + 1))\n\n return KalmanFilter(process_matrix, measurement_matrix, process_noise, measurement_noise)" }, { "identifier": "OptFlowExtraction", "path": "src/koft.py", "snippet": "class OptFlowExtraction(enum.Enum):\n \"\"\"Extraction of optical flow from different positions\"\"\"\n\n DETECTED = 0\n POSTERIOR = 1\n PRIOR = 2" }, { "identifier": "SingleUpdateKOFTracker", "path": "src/koft.py", "snippet": "class SingleUpdateKOFTracker(SimpleKalmanTracker):\n \"\"\"Kalman and Optical Flow tracker with a single update\n\n Update velocities only for matched tracks and measyre velocity from detected positions\n \"\"\"\n\n __ALWAYS_UPDATE_VEL = False\n\n def __init__(self, kalman_filter: KalmanFilter, opt_flow: OptFlow, match_cfg: MatchingConfig) -> None:\n super().__init__(kalman_filter, match_cfg)\n self.opt_flow = opt_flow\n self.flow = np.zeros((1, 1, 2))\n\n def run(\n self, video: Iterable[np.ndarray], detections_sequence: Collection[byotrack.Detections]\n ) -> Collection[byotrack.Track]:\n assert isinstance(video, Sequence), \"Only indexable videos are supported\"\n\n # Reset tracks and states\n self.tracks = []\n self.active_tracks = []\n self.state = GaussianState(\n torch.zeros((0, self.kalman_filter.state_dim, 1)),\n torch.zeros((0, self.kalman_filter.state_dim, self.kalman_filter.state_dim)),\n )\n\n # Extract initial frame and prepare for optflow\n frame = video[next(iter(detections_sequence)).frame_id][..., 0]\n src = self.opt_flow.prepare(frame)\n\n for detections in tqdm.tqdm(detections_sequence):\n try:\n # We could compute flow from t-1 to t, or t-1 to t+1\n # But it is much better to compute flow from\n # frame = video[max(detections.frame_id - 1, 0)]\n # src = self.opt_flow.prepare(frame)\n # frame = video[detections.frame_id][..., 0]\n frame = video[detections.frame_id + 1][..., 0]\n except IndexError:\n pass\n\n dest = self.opt_flow.prepare(frame)\n self.flow = self.opt_flow.calc(src, dest) # / 2 if computed from t-1 to t+1\n\n self.update(detections)\n\n src = dest\n\n tracks = []\n for track in self.tracks + self.active_tracks:\n if track.track_state in (track.TrackState.DELETED, track.TrackState.INITIATED):\n continue # Ignore unconfirmed tracks\n tracks.append(\n byotrack.Track(\n track.start,\n track.points,\n track.track_id,\n )\n )\n return tracks\n\n def update(self, detections: byotrack.Detections):\n prior = self.kalman_filter.predict(self.state)\n projection = self.kalman_filter.project(prior)\n positions = detections.position[..., None].clone() # Shape m, d, 1\n\n # Measures = positions + velocities\n velocities = self.opt_flow.flow_at(self.flow, positions[..., 0].numpy().astype(np.float64), self.opt_flow.scale)\n measures = torch.cat([positions, torch.tensor(velocities[..., None]).to(torch.float32)], dim=1)\n\n # Association\n links = self.match(projection, measures)\n\n if self.__ALWAYS_UPDATE_VEL: # Single update for everyone even unmatched tracks (updated with inf pos cov)\n # Add measures for unlinked state\n prior_velocities = self.opt_flow.flow_at(\n self.flow, prior.mean[:, :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n all_measures = torch.cat(\n [prior.mean[:, :2], torch.tensor(prior_velocities[..., None]).to(torch.float32)], dim=1\n )\n all_measures[links[:, 0]] = measures[links[:, 1]]\n\n # For unmatched tracks, uncertainty on measurements (which is the prior here) is set to inf\n # Note that dropping this helps => Future investigation here\n cov = projection.covariance.clone()\n projection.covariance[:, 0, 0] = torch.inf\n projection.covariance[:, 1, 1] = torch.inf\n projection.covariance[links[:, 0]] = cov[links[:, 0]]\n projection.precision = None\n\n # Update linked kalman filter\n posterior = self.kalman_filter.update(\n prior,\n all_measures,\n projection,\n )\n else: # Classic single update\n # Update linked kalman filter\n posterior = self.kalman_filter.update(\n GaussianState(prior.mean[links[:, 0]], prior.covariance[links[:, 0]]),\n measures[links[:, 1]],\n GaussianState(\n projection.mean[links[:, 0]],\n projection.covariance[links[:, 0]],\n projection.precision[links[:, 0]] if projection.precision is not None else None,\n ),\n )\n\n # Take prior by default if non-linked\n prior.mean[links[:, 0]] = posterior.mean\n prior.covariance[links[:, 0]] = posterior.covariance\n posterior = prior\n\n self._handle_tracks(posterior, measures, links, detections.frame_id)" }, { "identifier": "TwoUpdateKOFTracker", "path": "src/koft.py", "snippet": "class TwoUpdateKOFTracker(SingleUpdateKOFTracker):\n \"\"\"Kalman and Optical Flow tracker\"\"\"\n\n def __init__(\n self,\n kalman_filter: KalmanFilter,\n opt_flow: OptFlow,\n match_cfg: MatchingConfig,\n opt_flow_at=OptFlowExtraction.POSTERIOR,\n always_update_vel=True,\n ) -> None:\n super().__init__(kalman_filter, opt_flow, match_cfg)\n self.opt_flow_at = opt_flow_at\n self.always_update_vel = always_update_vel\n\n def update(self, detections: byotrack.Detections):\n projection = self.kalman_filter.project(\n self.state,\n # self.kalman_filter.measurement_matrix[:2], # Let's also project velocity (useful for matching)\n # self.kalman_filter.measurement_noise[:2, :2],\n )\n\n positions = detections.position[..., None].clone() # Shape m, d, 1\n\n # Association\n links = self.match(projection, positions)\n\n # First update (Update with associate detections positions)\n posterior = self.kalman_filter.update(\n GaussianState(self.state.mean[links[:, 0]], self.state.covariance[links[:, 0]]),\n positions[links[:, 1]],\n GaussianState(\n projection.mean[links[:, 0], :2],\n projection.covariance[links[:, 0], :2, :2],\n None, # /!\\ inv(cov[:2,:2]) != inv(cov)[:2, :2]\n ),\n self.kalman_filter.measurement_matrix[:2],\n self.kalman_filter.measurement_noise[:2, :2],\n )\n\n # Compute velocities\n velocities_measured = torch.tensor( # Measured velocities\n self.opt_flow.flow_at(self.flow, positions[..., 0].numpy().astype(np.float64), self.opt_flow.scale)\n )[..., None].to(torch.float32)\n\n if self.opt_flow_at == OptFlowExtraction.DETECTED:\n velocities = velocities_measured[links[:, 1]]\n elif self.opt_flow_at == OptFlowExtraction.POSTERIOR:\n velocities = torch.tensor(\n self.opt_flow.flow_at(\n self.flow, posterior.mean[..., :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n )[..., None].to(torch.float32)\n velocities_measured[links[:, 1]] = velocities\n else: # Prior\n velocities = torch.tensor(\n self.opt_flow.flow_at(\n self.flow, projection.mean[links[:, 0], :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n )[..., None].to(torch.float32)\n velocities_measured[links[:, 1]] = velocities\n\n # Update matched tracks with velocities\n posterior = self.kalman_filter.update(\n posterior,\n velocities,\n None,\n self.kalman_filter.measurement_matrix[2:],\n self.kalman_filter.measurement_noise[2:, 2:],\n )\n\n measures = torch.cat([positions, velocities_measured], dim=1)\n\n if self.always_update_vel:\n velocities = torch.tensor(\n self.opt_flow.flow_at(\n self.flow, projection.mean[:, :2, 0].numpy().astype(np.float64), self.opt_flow.scale\n )\n )[..., None].to(torch.float32)\n self.state = self.kalman_filter.update( # Update unmatched tracks with velocities\n self.state,\n velocities,\n None,\n self.kalman_filter.measurement_matrix[2:],\n self.kalman_filter.measurement_noise[2:, 2:],\n )\n\n # Take prior by default if non-linked, else posterior\n self.state.mean[links[:, 0]] = posterior.mean\n self.state.covariance[links[:, 0]] = posterior.covariance\n\n self._handle_tracks(self.state, measures, links, detections.frame_id)\n\n self.state = self.kalman_filter.predict(self.state)" }, { "identifier": "farneback", "path": "src/optical_flow.py", "snippet": "class OptFlow:\n def __init__(self, method: Callable[[np.ndarray, np.ndarray], np.ndarray], threshs=(0.0, 1.0), scale=2, blur=0.0):\n def prepare(self, frame: np.ndarray) -> np.ndarray:\n def calc(self, source: np.ndarray, destination: np.ndarray) -> np.ndarray:\n def flow_at(flow: np.ndarray, points: np.ndarray, scale: int) -> np.ndarray:\n def transform(self, flow: np.ndarray, points: np.ndarray) -> np.ndarray:" }, { "identifier": "enforce_all_seeds", "path": "src/utils.py", "snippet": "def enforce_all_seeds(seed: int, strict=True):\n \"\"\"Enforce all the seeds\n\n If strict you may have to define the following env variable:\n CUBLAS_WORKSPACE_CONFIG=:4096:8 (Increase a bit the memory foot print ~25Mo)\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n if strict:\n torch.backends.cudnn.benchmark = False # By default should already be to False\n torch.use_deterministic_algorithms(True)" } ]
import dataclasses import enum import pathlib import dacite import torch import tqdm # type: ignore import yaml # type: ignore import byotrack from typing import Collection, List from byotrack.implementation.detector.wavelet import WaveletDetector from byotrack.implementation.linker.icy_emht import EMHTParameters, IcyEMHTLinker, Motion from byotrack.implementation.linker.trackmate.trackmate import TrackMateLinker, TrackMateParameters from byotrack.implementation.refiner.interpolater import ForwardBackwardInterpolater from ..detector import FakeDetector from ..metrics.detections import DetectionMetric from ..metrics.tracking import compute_tracking_metrics from ..skt import constant_kalman_filter, Dist, Method, MatchingConfig, SimpleKalmanTracker, PartialTrack from ..koft import constant_koft_filter, OptFlowExtraction, SingleUpdateKOFTracker, TwoUpdateKOFTracker from ..optical_flow import farneback from ..utils import enforce_all_seeds
10,804
gap_closing_max_distance=thresh * 1.5, kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None, ), ) if self.tracking_method is TrackingMethod.SKT: kalman_filter = constant_kalman_filter( torch.tensor(self.kalman.detection_noise), torch.tensor(self.kalman.process_noise), self.kalman.dim, self.kalman.order, ) return SimpleKalmanTracker( kalman_filter, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method) ) # self.tracking_method is TrackingMethod.KOFT: kalman_filter = constant_koft_filter( torch.tensor(self.kalman.detection_noise), torch.tensor(self.kalman.of_noise), torch.tensor(self.kalman.process_noise), self.kalman.dim, self.kalman.order, ) if self.tracking_method is TrackingMethod.KOFTmm: return SingleUpdateKOFTracker( kalman_filter, farneback, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method) ) # <=> two updates, without updating vel for all tracks and using OptFlowExtraction at Detected pos # return TwoUpdateKOFTracker( # kalman_filter, # farneback, # MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method), # OptFlowExtraction.DETECTED, # False, # ) PartialTrack.MAX_NON_MEASURE = 5 if self.tracking_method is TrackingMethod.KOFTpp else 3 return TwoUpdateKOFTracker( kalman_filter, farneback, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method), OptFlowExtraction.POSTERIOR, self.kalman.always_update_velocities, ) def create_thresholds(self) -> List[float]: if self.tracking_method is TrackingMethod.EMHT: # XXX: EMHT struggle to converge in some scenarios with high frp and fnr. # On those where it converges 3.0 is the best, and it converges for 3.0 in all of them # So lets manually select [3.0] in high fpr/fnr. In other cases, let's keep the default grid search # return [3.0] return [3.0, 4.0, 5.0, 6.0] # MAHA if ( self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF) or self.kalman.dist is Dist.EUCLIDIAN ): return [3.0, 5.0, 7.0, 10.0, 15.0] if self.kalman.dist is Dist.MAHALANOBIS: return [0.5, 1.0, 2.0, 3.0, 4.0] # self.dist is Dist.LIKELIHOOD: return [1e-6, 1e-5, 1e-4, 1e-3, 1e-2] def main(name: str, cfg_data: dict) -> None: print("Running:", name) print(yaml.dump(cfg_data)) cfg = dacite.from_dict(ExperimentConfig, cfg_data, dacite.Config(cast=[pathlib.Path, tuple, enum.Enum])) enforce_all_seeds(cfg.seed) # Read video and ground truth video_path = cfg.simulation_path / "video.mp4" gt_path = cfg.simulation_path / "video_data.pt" video = byotrack.Video(video_path) video.set_transform(byotrack.VideoTransformConfig(aggregate=True, normalize=True, q_min=0.00, q_max=1.0)) ground_truth = torch.load(gt_path) # Detections detector = cfg.detection.create_detector(ground_truth["mu"]) detections_sequence = detector.run(video) # Evaluate detections step performances tp = 0.0 n_pred = 0.0 n_true = 0.0 for detections in detections_sequence: det_metrics = DetectionMetric(1.5).compute_at( detections, ground_truth["mu"][detections.frame_id], ground_truth["weight"][detections.frame_id] ) tp += det_metrics["tp"] n_pred += det_metrics["n_pred"] n_true += det_metrics["n_true"] print("=======Detection======") print("Recall", tp / n_true if n_true else 1.0) print("Precision", tp / n_pred if n_pred else 1.0) print("f1", 2 * tp / (n_true + n_pred) if n_pred + n_true else 1.0) refiner = ForwardBackwardInterpolater() metrics = {} best_thresh = 0.0 best_hota = 0.0 best_tracks: Collection[byotrack.Track] = [] for thresh in tqdm.tqdm(cfg.create_thresholds()): linker = cfg.create_linker(thresh) tracks = linker.run(video, detections_sequence) tracks = refiner.run(video, tracks) tqdm.tqdm.write(f"Built {len(tracks)} tracks") if len(tracks) == 0 or len(tracks) > ground_truth["mu"].shape[1] * 15: tqdm.tqdm.write(f"Threshold: {thresh} => Tracking failed (too few or too many tracks). Continuing...") continue
class DetectionMethod(enum.Enum): WAVELET = "wavelet" FAKE = "fake" @dataclasses.dataclass class WaveletConfig: k: float = 3.0 scale: int = 1 min_area: float = 10.0 @dataclasses.dataclass class FakeConfig: fpr: float = 0.1 # Bad detection rate fnr: float = 0.2 # Miss detection rate measurement_noise: float = 1.0 @dataclasses.dataclass class DetectionConfig: detector: DetectionMethod wavelet: WaveletConfig fake: FakeConfig # interactive = False # Could tweak the detector parameters interactively ? def create_detector(self, mu: torch.Tensor) -> byotrack.Detector: if self.detector == DetectionMethod.WAVELET: return WaveletDetector(self.wavelet.scale, self.wavelet.k, self.wavelet.min_area) return FakeDetector(mu, self.fake.measurement_noise, self.fake.fpr, self.fake.fnr) @dataclasses.dataclass class KalmanConfig: detection_noise: float of_noise: float process_noise: float # Miss evaluation of the process dist: Dist matching_method: Method always_update_velocities: bool = True dim: int = 2 order: int = 1 class TrackingMethod(enum.Enum): SKT = "skt" KOFT = "koft" KOFTmm = "koft--" KOFTpp = "koft++" TRACKMATE = "trackmate" TRACKMATE_KF = "trackmate-kf" EMHT = "emht" @dataclasses.dataclass class ExperimentConfig: seed: int simulation_path: pathlib.Path tracking_method: TrackingMethod detection: DetectionConfig kalman: KalmanConfig icy_path: pathlib.Path fiji_path: pathlib.Path def create_linker(self, thresh: float) -> byotrack.Linker: """Create a linker""" if self.tracking_method is TrackingMethod.EMHT: return IcyEMHTLinker( self.icy_path, EMHTParameters( gate_factor=thresh, motion=Motion.MULTI, tree_depth=2, ), ) if self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF): # As kalman tracking we let a gap of 2 consecutive miss detections # In that case, we allow 1.5 thresh return TrackMateLinker( self.fiji_path, TrackMateParameters( max_frame_gap=PartialTrack.MAX_NON_MEASURE, linking_max_distance=thresh, gap_closing_max_distance=thresh * 1.5, kalman_search_radius=thresh if self.tracking_method is TrackingMethod.TRACKMATE_KF else None, ), ) if self.tracking_method is TrackingMethod.SKT: kalman_filter = constant_kalman_filter( torch.tensor(self.kalman.detection_noise), torch.tensor(self.kalman.process_noise), self.kalman.dim, self.kalman.order, ) return SimpleKalmanTracker( kalman_filter, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method) ) # self.tracking_method is TrackingMethod.KOFT: kalman_filter = constant_koft_filter( torch.tensor(self.kalman.detection_noise), torch.tensor(self.kalman.of_noise), torch.tensor(self.kalman.process_noise), self.kalman.dim, self.kalman.order, ) if self.tracking_method is TrackingMethod.KOFTmm: return SingleUpdateKOFTracker( kalman_filter, farneback, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method) ) # <=> two updates, without updating vel for all tracks and using OptFlowExtraction at Detected pos # return TwoUpdateKOFTracker( # kalman_filter, # farneback, # MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method), # OptFlowExtraction.DETECTED, # False, # ) PartialTrack.MAX_NON_MEASURE = 5 if self.tracking_method is TrackingMethod.KOFTpp else 3 return TwoUpdateKOFTracker( kalman_filter, farneback, MatchingConfig(thresh, self.kalman.dist, self.kalman.matching_method), OptFlowExtraction.POSTERIOR, self.kalman.always_update_velocities, ) def create_thresholds(self) -> List[float]: if self.tracking_method is TrackingMethod.EMHT: # XXX: EMHT struggle to converge in some scenarios with high frp and fnr. # On those where it converges 3.0 is the best, and it converges for 3.0 in all of them # So lets manually select [3.0] in high fpr/fnr. In other cases, let's keep the default grid search # return [3.0] return [3.0, 4.0, 5.0, 6.0] # MAHA if ( self.tracking_method in (TrackingMethod.TRACKMATE, TrackingMethod.TRACKMATE_KF) or self.kalman.dist is Dist.EUCLIDIAN ): return [3.0, 5.0, 7.0, 10.0, 15.0] if self.kalman.dist is Dist.MAHALANOBIS: return [0.5, 1.0, 2.0, 3.0, 4.0] # self.dist is Dist.LIKELIHOOD: return [1e-6, 1e-5, 1e-4, 1e-3, 1e-2] def main(name: str, cfg_data: dict) -> None: print("Running:", name) print(yaml.dump(cfg_data)) cfg = dacite.from_dict(ExperimentConfig, cfg_data, dacite.Config(cast=[pathlib.Path, tuple, enum.Enum])) enforce_all_seeds(cfg.seed) # Read video and ground truth video_path = cfg.simulation_path / "video.mp4" gt_path = cfg.simulation_path / "video_data.pt" video = byotrack.Video(video_path) video.set_transform(byotrack.VideoTransformConfig(aggregate=True, normalize=True, q_min=0.00, q_max=1.0)) ground_truth = torch.load(gt_path) # Detections detector = cfg.detection.create_detector(ground_truth["mu"]) detections_sequence = detector.run(video) # Evaluate detections step performances tp = 0.0 n_pred = 0.0 n_true = 0.0 for detections in detections_sequence: det_metrics = DetectionMetric(1.5).compute_at( detections, ground_truth["mu"][detections.frame_id], ground_truth["weight"][detections.frame_id] ) tp += det_metrics["tp"] n_pred += det_metrics["n_pred"] n_true += det_metrics["n_true"] print("=======Detection======") print("Recall", tp / n_true if n_true else 1.0) print("Precision", tp / n_pred if n_pred else 1.0) print("f1", 2 * tp / (n_true + n_pred) if n_pred + n_true else 1.0) refiner = ForwardBackwardInterpolater() metrics = {} best_thresh = 0.0 best_hota = 0.0 best_tracks: Collection[byotrack.Track] = [] for thresh in tqdm.tqdm(cfg.create_thresholds()): linker = cfg.create_linker(thresh) tracks = linker.run(video, detections_sequence) tracks = refiner.run(video, tracks) tqdm.tqdm.write(f"Built {len(tracks)} tracks") if len(tracks) == 0 or len(tracks) > ground_truth["mu"].shape[1] * 15: tqdm.tqdm.write(f"Threshold: {thresh} => Tracking failed (too few or too many tracks). Continuing...") continue
metrics[thresh] = compute_tracking_metrics(tracks, ground_truth)
2
2023-11-10 10:18:39+00:00
16k
quantuminterface/qiclib
src/qiclib/code/qi_dataflow.py
[ { "identifier": "ForRange", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class ForRange(QiContextManager):\n \"\"\"Adds ForRange to program.\n If multiple cells are used inside body, a synchronisation between the cells is done before the ForRange as well as after the end of the body.\n If QiTimeVariable is used as var, loops starting at 0 are unrolled, to skip pulses/waits inside body using var as length.\n Raises exception if start, end and step are not set up properly.\"\"\"\n\n def __init__(\n self,\n var: _QiVariableBase,\n start: Union[_QiVariableBase, int, float],\n end: Union[_QiVariableBase, int, float],\n step: Union[int, float] = 1,\n ):\n from .qi_types import (\n _TypeConstraintReasonQiCommand,\n _IllegalTypeReason,\n _add_equal_constraints,\n )\n\n super().__init__()\n\n if not isinstance(var, _QiVariableBase):\n raise RuntimeError(\n \"Can only use QiVariables as control variable in ForRanges.\"\n )\n\n start_expr = QiExpression._from(start)\n end_expr = QiExpression._from(end)\n step_expr = QiExpression._from(step)\n\n var._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.FOR_RANGE)\n start_expr._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.FOR_RANGE\n )\n end_expr._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.FOR_RANGE)\n step_expr._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.FOR_RANGE\n )\n\n _add_equal_constraints(\n QiType.TIME,\n _TypeConstraintReasonQiCommand(ForRange),\n var,\n start_expr,\n end_expr,\n step_expr,\n )\n _add_equal_constraints(\n QiType.FREQUENCY,\n _TypeConstraintReasonQiCommand(ForRange),\n var,\n start_expr,\n end_expr,\n step_expr,\n )\n _add_equal_constraints(\n QiType.NORMAL,\n _TypeConstraintReasonQiCommand(ForRange),\n var,\n start_expr,\n end_expr,\n step_expr,\n )\n\n if not isinstance(start, _QiVariableBase) and not isinstance(\n end, _QiVariableBase\n ):\n if (start > end and step >= 0) or (start < end and step <= 0):\n raise ValueError(\"Definition of ForRange faulty\")\n\n self.var = var\n self.start = start_expr\n self.end = end_expr\n self.step = step_expr\n\n self.add_associated_variable(var)\n\n if isinstance(start, _QiVariableBase):\n self.add_associated_variable(start)\n\n if start.id == var.id:\n raise RuntimeError(\"Loop variable can not be used as start value\")\n\n if isinstance(end, _QiVariableBase):\n self.add_associated_variable(end)\n\n if end.id == var.id:\n raise RuntimeError(\"Loop variable can not be used as end value\")\n\n def __exit__(self, exception_type, exception_value, traceback):\n super().__exit__(exception_type, exception_value, traceback)\n check_variable = QiVarInForRange(self.var)\n self.accept(check_variable)\n\n def accept(self, visitor, *input):\n return visitor.visit_for_range(self, *input)\n\n @property\n def is_step_positive(self) -> bool:\n return self.step > 0\n\n def _stringify(self) -> str:\n return f\"ForRange({self.var}, {self.start}, {self.end}, {self.step})\"" }, { "identifier": "If", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class If(QiContextManager):\n \"\"\"\n Add conditional logic to the program.\n If multiple cells are used inside the body, a synchronization between the cells takes place before the If.\n\n :param condition: The condition to check\n\n Example\n -------\n\n .. code-block:: python\n\n with QiJob() as job:\n q = QiCells(1)\n x = QiIntVariable(1)\n with If(x > 1):\n ... # won't be executed\n\n The If statement is most commonly used to react to qubit states in real-time:\n\n .. code-block:: python\n\n from qiclib import jobs\n\n with QiJob() as job:\n q = QiCells(1)\n state = QiStateVariable()\n jobs.Readout(q[0], state_to=state)\n with If(state = 0):\n ... # Apply some conditional logic based on the qubit state\n \"\"\"\n\n def __init__(self, condition: Optional[QiCondition] = None):\n super().__init__()\n self._else_body: List[QiCommand] = []\n if condition is None:\n raise RuntimeError(\"No QiCondition given\")\n self.condition = condition\n\n for variable in condition.contained_variables:\n self.add_associated_variable(variable)\n\n def add_else_body(self, else_body):\n self._else_body = else_body.copy()\n\n def is_followed_by_else(self) -> bool:\n return len(self._else_body) != 0\n\n def accept(self, visitor, *input):\n return visitor.visit_if(self, *input)\n\n def _stringify(self) -> str:\n return f\"If({self.condition})\"" }, { "identifier": "Parallel", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class Parallel(QiContextManager):\n \"\"\"Pulses defined in body are united in one trigger command.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.entries: List[List[QiCommand]] = []\n\n def __exit__(self, exception_type, exception_value, traceback):\n temp = _QiJobReference._close_context()\n self.body += temp # So visitors also find commands in Parallel blocks.\n self.entries.append(temp)\n\n containing_cells = QiCMContainedCellVisitor()\n for command in temp:\n if not isinstance(\n command,\n (\n cQiPlay,\n cQiPlayReadout,\n cQiPlayFlux,\n cQiRotateFrame,\n cQiRecording,\n cQiWait,\n ),\n ):\n raise TypeError(\"Type not allowed inside Parallel()\", command)\n if (\n isinstance(command, (cQiRecording, cQiPlayReadout))\n and command.uses_state\n ):\n raise RuntimeError(\"Can not save to state variable inside Parallel\")\n\n try:\n if isinstance(command.length, _QiVariableBase):\n self._associated_variable_set.add(command.length)\n except KeyError:\n pass # length was QiCellProperty\n command.accept(containing_cells)\n\n self._relevant_cells.update(containing_cells.contained_cells)\n\n # If previous command is also parallel, combine by adding another parallel entry at previous command\n try:\n cmd = _QiJobReference.commands[-1]\n if isinstance(cmd, Parallel) and len(cmd.entries) < 2:\n cmd.entries.append(temp)\n cmd._associated_variable_set.update(self._associated_variable_set)\n else:\n _QiJobReference._add_command(self)\n except IndexError:\n _QiJobReference._add_command(self)\n\n class CmdTuple:\n def __init__(self, cmd: QiCommand, start: int, end: int, choke: bool = False):\n self.cmd = cmd\n self.start = start\n self.end = end\n self.choke_cmd = choke\n\n class TimeSlot:\n def __init__(self, cmd_tuples: List[Any], start, end):\n self.cmd_tuples: List[Parallel.CmdTuple] = cmd_tuples\n self.start: int = start\n self.end: int = end\n self.duration: float = 0.0\n\n def _clear_wait_commands(self, cmd_tuples: List[CmdTuple]):\n \"\"\"Clears cQiWait commands from cmd_tuples, if any trigger command is also in cmd_tuples\"\"\"\n contains_pulse = False\n\n for cmd_tuple in cmd_tuples:\n if isinstance(cmd_tuple.cmd, _cQiPlay_base):\n contains_pulse = True\n break\n\n return [\n cmd_tuple\n for cmd_tuple in cmd_tuples\n if isinstance(cmd_tuple.cmd, _cQiPlay_base) or contains_pulse is False\n ]\n\n def _clear_choke_commands(self, cmd_tuples: List[CmdTuple]):\n \"\"\"Clears choke commands, if at the same slot another Play or Readout command is present.\"\"\"\n\n contains_play = False\n contains_readout = False\n\n for cmd_tuple in cmd_tuples:\n if isinstance(cmd_tuple.cmd, cQiPlay) and cmd_tuple.choke_cmd is False:\n contains_play = True\n elif (\n isinstance(cmd_tuple.cmd, cQiPlayReadout)\n and cmd_tuple.choke_cmd is False\n ):\n contains_readout = True\n\n if contains_play is False and contains_readout is False:\n return cmd_tuples\n\n cleared_tuples = []\n\n for cmd_tuple in cmd_tuples:\n # if play command is present skip choke command for play\n if isinstance(cmd_tuple.cmd, cQiPlay):\n if cmd_tuple.choke_cmd is True and contains_play:\n continue\n\n # if PlayReadout command is present skip choke command for PlayReadout\n elif isinstance(cmd_tuple.cmd, cQiPlayReadout):\n if cmd_tuple.choke_cmd is True and contains_readout:\n continue\n\n cleared_tuples.append(cmd_tuple)\n\n return cleared_tuples\n\n def _create_time_slots(self, annotated_bodies: List[List[CmdTuple]], max_end: int):\n time_slot_list: List[Parallel.TimeSlot] = []\n for start in range(0, max_end):\n time_slot = self.TimeSlot([], start, start)\n\n # find tuples with start time == start\n for cmd_list in annotated_bodies:\n for cmd_tuple in cmd_list:\n if cmd_tuple.start == start:\n time_slot.cmd_tuples.append(cmd_tuple)\n time_slot.end = max(cmd_tuple.end, time_slot.end)\n cmd_list.remove(cmd_tuple)\n break # next cmd_list\n\n # next start value, if nothing was found\n if len(time_slot.cmd_tuples) == 0:\n continue\n\n time_slot.cmd_tuples = self._clear_wait_commands(time_slot.cmd_tuples)\n time_slot.cmd_tuples = self._clear_choke_commands(time_slot.cmd_tuples)\n\n # Add Wait command, if previous end value < start\n try:\n prev_time_slot = time_slot_list[-1]\n if prev_time_slot.end < start:\n length = util.conv_cycles_to_time(start - prev_time_slot.end)\n new_wait = self.CmdTuple(\n cQiWait(list(self._relevant_cells)[0], length),\n start=prev_time_slot.end,\n end=start,\n )\n time_slot_list.append(\n self.TimeSlot([new_wait], prev_time_slot.end, start)\n )\n except IndexError:\n pass\n\n # Adjust previous end time, if previous.end > start\n try:\n prev_time_slot = time_slot_list[-1]\n prev_time_slot.end = min(prev_time_slot.end, start)\n except IndexError:\n pass\n\n time_slot_list.append(time_slot)\n\n # Add final wait, if previous.end != max_end\n try:\n prev_time_slot = time_slot_list[-1]\n if prev_time_slot.end < max_end:\n length = util.conv_cycles_to_time(max_end - prev_time_slot.end)\n new_wait = self.CmdTuple(\n cQiWait(list(self._relevant_cells)[0], length),\n start=prev_time_slot.end,\n end=max_end,\n )\n time_slot_list.append(\n self.TimeSlot([new_wait], prev_time_slot.end, max_end)\n )\n except IndexError:\n pass\n\n # calculate duration of time slot\n for slot in time_slot_list:\n slot.duration = util.conv_cycles_to_time(slot.end - slot.start)\n\n return time_slot_list\n\n def _generate_command_body(self, cell, sequencer):\n \"\"\"Combines the parallel sequences to one command body.\"\"\"\n\n parallel_bodies: List[List[Parallel.CmdTuple]] = []\n\n max_end = 0\n\n # Generate annotated list of commands with start and end cycle\n for cmd_list in self.entries:\n commands: List[Parallel.CmdTuple] = []\n start: int = 0\n end: int = 0\n for cmd in cmd_list:\n var_pulse = False\n\n if cell not in cmd._relevant_cells:\n continue # skip commands for other cells\n\n if isinstance(cmd.length, _QiVariableBase):\n reg = sequencer.get_var_register(cmd.length)\n\n if reg.valid is False or reg.value is None:\n raise RuntimeError(\n \"Variable inside parallel not initialised or invalidated\"\n )\n\n length = reg.value\n\n if isinstance(cmd, (cQiPlay, cQiPlayReadout)):\n var_pulse = True\n else:\n length = util.conv_time_to_cycles(cmd.length, \"ceil\")\n\n if length == 0:\n continue # skip commands with length 0\n\n if isinstance(cmd, cQiRecording) or (\n isinstance(cmd, cQiPlayReadout)\n and isinstance(cmd.recording, cQiRecording)\n ):\n end += length + util.conv_time_to_cycles(\n sequencer.recording_delay, \"ceil\"\n )\n else:\n end += length\n\n cmd_duration = self.CmdTuple(cmd, start, end)\n commands.append(cmd_duration)\n\n if var_pulse:\n # Add parallel choke command after current command, if variable length is used\n parallel_choke = [self.CmdTuple(cmd, end, end + 1, choke=True)]\n parallel_bodies.append(parallel_choke)\n\n max_end = max(end + 1, max_end) # +1 to account for choke command\n else:\n max_end = max(end, max_end)\n\n start = end\n\n parallel_bodies.append(commands)\n\n return self._create_time_slots(parallel_bodies, max_end)\n\n def accept(self, visitor, *input):\n return visitor.visit_parallel(self, *input)\n\n def _stringify(self) -> str:\n return \"Parallel\"" }, { "identifier": "QiCell", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class QiCell:\n \"\"\"A QiCell is an abstract representation of the qubit/cell the program is run on.\n Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object.\n For a single :python:`QiCell`, use instead :python:`QiCells(1)`\n\n A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context.\n\n The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`.\n For this, index the :python:`QiCell` object using the name of the property:\n\n .. code-block:: python\n\n q: QiCell = ...\n t1_time = q[\"t1\"]\n\n The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a\n :class:`QiJob` and providing the actual sample.\n\n **Tasks of the QiCell**:\n\n - Saves the pulses needed for program execution.\n - Provides a dictionary functionality to define commonly used durations/properties.\n - Implements a Sequencer object, which contains the assembler program after compilation.\n\n :param cellID: A unique ID\n :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob`\n \"\"\"\n\n def __init__(self, cellID: int):\n if not isinstance(_QiJobReference, QiJob):\n raise RuntimeError(\"QiCell can't be used outside of QiJob.\")\n\n self.cellID = cellID\n self.manipulation_pulses: List[QiPulse] = []\n self.flux_pulses: List[QiPulse] = []\n self.readout_pulses: List[QiPulse] = []\n self._result_container: Dict[str, QiResult] = {}\n # The order in which recorded values are assigned to which result container\n self._result_recording_order: List[QiResult] = []\n self._unresolved_property: Set[QiCellProperty] = set()\n self._job_ref = _QiJobReference\n self._relevant_vars: Set[_QiVariableBase] = set()\n\n # These attributes are determined by dataflow analyses\n self._initial_manip_freq: float = None\n self._initial_readout_freq: float = None\n self._initial_rec_offset: float = None\n\n self._rec_length: Union[int, float, QiCellProperty] = None\n\n self._properties: Dict[QiCellProperty, Any] = {}\n\n def __getitem__(self, key):\n if _QiJobReference != self._job_ref:\n raise RuntimeError(\n \"Tried getting values for cells registered to other QiJob\"\n )\n\n prop = self._properties.get(key, QiCellProperty(self, key))\n\n if isinstance(prop, QiCellProperty):\n self._unresolved_property.add(key)\n return prop\n\n def __setitem__(self, key, value):\n if _QiJobReference != self._job_ref:\n raise RuntimeError(\n \"Tried setting values for cells registered to other QiJob\"\n )\n self._properties[key] = value\n\n def __call__(self, qic):\n return qic.cell[self.qic_cell]\n\n def get_properties(self):\n return self._properties.copy()\n\n def add_pulse(self, pulse: QiPulse):\n if pulse not in self.manipulation_pulses:\n self.manipulation_pulses.append(pulse)\n\n if len(self.manipulation_pulses) > 13:\n raise RuntimeError(\"Too many pulses in use\")\n\n return self.manipulation_pulses.index(pulse) + 1 # index 0 and 15 are reserved\n\n @property\n def initial_manipulation_frequency(self):\n if self._initial_manip_freq is None:\n if len(self.manipulation_pulses) > 0:\n warnings.warn(\n \"Manipulation pulses without frequency given, using 90 MHz.\"\n )\n return 90e6 # Default frequency\n freq = self._initial_manip_freq\n return freq() if isinstance(freq, QiCellProperty) else freq\n\n def add_recording_length(self, length):\n if self._rec_length is None:\n self._rec_length = length\n elif (\n not self._rec_length._equal_syntax(length)\n if isinstance(self._rec_length, QiExpression)\n else self._rec_length != length\n ):\n raise RuntimeError(\n f\"Cell {self.cellID}: Multiple definitions of recording length used.\"\n )\n\n def add_readout_pulse(self, pulse: QiPulse):\n if pulse not in self.readout_pulses:\n self.readout_pulses.append(pulse)\n\n if len(self.readout_pulses) > 13:\n raise RuntimeError(\"Too many pulses in use\")\n\n return self.readout_pulses.index(pulse) + 1 # index 0 and 15 are reserved\n\n @property\n def initial_readout_frequency(self):\n if self._initial_readout_freq is None:\n if len(self.readout_pulses) > 0:\n warnings.warn(\"Readout pulses without frequency given, using 30 MHz.\")\n return 30e6 # Default frequency\n freq = self._initial_readout_freq\n return freq() if isinstance(freq, QiCellProperty) else freq\n\n @property\n def recording_length(self):\n \"\"\"the length of the recording pulse\"\"\"\n if self._rec_length is not None:\n return (\n self._rec_length()\n if isinstance(self._rec_length, QiCellProperty)\n else self._rec_length\n )\n\n return 0\n\n @property\n def initial_recording_offset(self):\n \"\"\"the recording offset in seconds\"\"\"\n if self._initial_rec_offset is not None:\n return (\n self._initial_rec_offset()\n if isinstance(self._initial_rec_offset, QiCellProperty)\n else self._initial_rec_offset\n )\n\n return 0\n\n def get_result_container(self, result: str) -> QiResult:\n if result in self._result_container:\n return self._result_container[result] # was already added\n else:\n box = QiResult(result)\n box._cell = self\n self._result_container[result] = box\n return box\n\n def add_variable(self, var: _QiVariableBase):\n self._relevant_vars.add(var)\n\n def get_number_of_recordings(self):\n return len(self._result_recording_order)\n\n def set_default_readout(self, pulse):\n pass\n\n def reset(self):\n for container in self._result_container.values():\n container.data = []\n\n def data(\n self, name: Optional[str] = None\n ) -> Union[Dict[str, np.ndarray], np.ndarray]:\n \"\"\"\n Returns the data after running an experiment.\n\n When calling this function without a name, i.e., calling :python:`cell.data()`,\n returns a dictionary containing the results as numpy arrays.\n\n When calling this function with a name, i.e., calling :python:`cell.data(\"result_name\")`,\n returns the whole dictionary.\n\n :param name: The name of the data\n :return: A single result, or a dictionary of result names mapped to results.\n \"\"\"\n if name is None:\n result_dict = {}\n for key, container in self._result_container.items():\n result_dict.update({key: container.get()})\n return result_dict\n\n else:\n return self._result_container[name].get()\n\n def _resolve_properties(self, len_dict: Dict[QiCellProperty, Any]):\n keys = list(self._unresolved_property)\n\n missing_keys = self._unresolved_property.difference(len_dict.keys())\n if missing_keys:\n raise RuntimeError(\n f\"Cell {self.cellID}: Not all properties for job could be resolved. \"\n f\"Missing properties: {missing_keys}\"\n )\n\n for key in keys:\n self._properties[key] = len_dict[key]\n\n @property\n def has_unresolved_properties(self):\n return len(self._unresolved_property) > 0\n\n def _get_unresolved_properties(self):\n return [\n key\n for key in list(self._unresolved_property)\n if self._properties.get(key) is None\n ]\n\n def __str__(self) -> str:\n return f\"QiCell({self.cellID})\"" }, { "identifier": "QiCommand", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class QiCommand:\n \"\"\"Base class of every Job command.\n Provides _relevant_cells, containing every cell used for the execution of the command.\n Provides _associated_variable_set, containing every variable needed for the execution of the command.\n \"\"\"\n\n def __init__(self) -> None:\n self._associated_variable_set = QiVariableSet()\n self._relevant_cells: Set[QiCell] = set()\n\n @abstractmethod\n def accept(self, visitor, *input):\n raise RuntimeError(\n f\"{self.__class__} doesn't implement `accept`. This is a bug.\"\n )\n\n def is_variable_relevant(self, variable: _QiVariableBase) -> bool:\n return variable in self._associated_variable_set\n\n def add_associated_variable(self, x):\n if isinstance(x, _QiVariableBase):\n self._associated_variable_set.add(x)\n\n def __str__(self) -> str:\n return \"cQiCommand\"\n\n def _stringify(self) -> str:\n raise NotImplementedError(f\"_stringify not implemented for {repr(self)}\")" }, { "identifier": "QiContextManager", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class QiContextManager(QiCommand):\n \"\"\"Base Class for If, Else, ForRange and Parallel.\n Defines functions for storing commands.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.body: List[QiCommand] = []\n\n def __enter__(self):\n _QiJobReference._open_new_context()\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.body = _QiJobReference._close_context()\n _QiJobReference._add_command(self)\n\n def accept(self, visitor, *input):\n return visitor.visit_context_manager(self, *input)" }, { "identifier": "QiJob", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class QiJob:\n \"\"\"\n Container holding program, cells and qi_result containers for execution of program.\n Builds the job with its properties\n\n :param skip_nco_sync: if the NCO synchronization at the beginning should be skipped\n :param nco_sync_length: how long to wait after the nco synchronization\n \"\"\"\n\n def __init__(\n self,\n skip_nco_sync=False,\n nco_sync_length=0,\n ):\n self.qi_results: List[QiResult] = []\n self.cells = []\n self.skip_nco_sync = skip_nco_sync\n self.nco_sync_length = nco_sync_length\n\n self._description = _JobDescription()\n\n # Build\n self._performed_analyses = False\n self._build_done = False\n self._arranged_cells: List[Optional[QiCell]] = []\n self._var_reg_map: Dict[_QiVariableBase, Dict[QiCell, int]] = {}\n\n # Run\n self._custom_processing = None\n self._custom_data_handler = None\n\n def __enter__(self):\n # pylint: disable=global-statement\n global _QiJobReference\n _QiJobReference = self\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n for cmd in self.commands:\n cmd.accept(QiTypeFallbackVisitor())\n\n for cmd in self.commands:\n cmd.accept(QiPostTypecheckVisitor())\n\n _QiVariableBase.reset_str_id()\n\n # pylint: disable=global-statement\n global _QiJobReference\n _QiJobReference = None\n\n def _open_new_context(self):\n self._description.open_new_context()\n\n def _close_context(self):\n return self._description.close_context()\n\n def _add_command(self, command):\n self._description.add_command(command)\n\n @property\n def commands(self):\n \"\"\"returns the commands of the job\"\"\"\n return self._description._commands\n\n def _register_cells(self, cells: List[QiCell]):\n if len(self.cells) > 0:\n raise RuntimeError(\"Can only register one set of cells at a QiJob.\")\n\n self.cells = cells\n\n def _run_analyses(self):\n \"\"\"\n Executes needed (dataflow) analyses.\n These mutate the commands in QiJob by inserting additional instructions, therefore\n they should only run once, in order to avoid duplicate instructions.\n \"\"\"\n from .analysis.qi_insert_mem_parameters import (\n insert_recording_offset_store_commands,\n insert_manipulation_pulse_frequency_store_commands,\n insert_readout_pulse_frequency_store_commands,\n )\n\n if not self._performed_analyses:\n insert_recording_offset_store_commands(self)\n insert_manipulation_pulse_frequency_store_commands(self)\n insert_readout_pulse_frequency_store_commands(self)\n\n self._performed_analyses = True\n\n def _simulate_recordings(self) -> Dict[Any, List[cQiRecording]]:\n \"\"\"\n Simulates the order cQiRecording executions.\n The result of this simulation is used to disentangle the recordings buffer\n and reassociate the individual recording results with their corresponding Recording commands.\n It might return more elements than are recorded during the real execution.\n \"\"\"\n\n # We first check if there are Recording commands at positions which we can not simulate.\n # i.e. If-Else, ForRanges with start or end that are neither constant nor other loop variables.\n # If this is the case we cannot simulate the order.\n visitor = QiResultCollector()\n for cmd in self.commands:\n cmd.accept(visitor)\n\n if len(visitor.found_qi_results) == 0:\n return {cell: [] for cell in self.cells}\n elif visitor.recording_in_if:\n raise RuntimeError(\"Recording command within If-Else statement.\")\n\n # Next we simulate all loops and collect the respective Recording commands inside.\n from .qi_simulate import Simulator\n\n simulator = Simulator(self.cells)\n simulator._simulate(self.commands)\n\n return simulator.cell_recordings\n\n def _build_program(\n self, sample: Optional[QiSample] = None, cell_map: Optional[List[int]] = None\n ):\n if sample is not None and cell_map is not None:\n sample = sample._arrange_for_controller()\n sample = [sample[m] if m < len(sample) else None for m in cell_map]\n\n if cell_map is None:\n cell_map = list(range(len(self.cells)))\n\n # TODO Check that this works with None and right order now\n self._resolve_properties(sample)\n\n for cell in self.cells:\n if len(cell._get_unresolved_properties()) > 0:\n raise RuntimeError(\n f\"Unresolved properties {cell._get_unresolved_properties()} at cell {cell}\"\n )\n\n self._run_analyses()\n\n sim_result = self._simulate_recordings()\n for cell in self.cells:\n cell._result_recording_order = list(\n map(\n lambda x: x.result_box,\n filter(lambda x: x.result_box is not None, sim_result[cell]),\n )\n )\n\n prog_builder = QiProgramBuilder(\n self.cells,\n cell_map,\n self._description._commands.copy(),\n self.skip_nco_sync,\n self.nco_sync_length,\n )\n\n self.cell_seq_dict = prog_builder.build_program()\n self._var_reg_map = prog_builder.get_all_variables()\n self._build_done = True\n\n def _get_sequencer_codes(self):\n return [\n [\n instr.get_riscv_instruction()\n for instr in self.cell_seq_dict[cell].instruction_list\n ]\n for cell in self.cells\n ]\n\n def create_experiment(\n self,\n controller,\n sample: Optional[QiSample] = None,\n averages: int = 1,\n cell_map: Optional[List[int]] = None,\n data_collection=None,\n use_taskrunner=False,\n ):\n from ..experiment.qicode.base import QiCodeExperiment\n\n exp = QiCodeExperiment(\n *self._prepare_experiment_params(\n controller, sample, averages, cell_map, data_collection, use_taskrunner\n )\n )\n\n if data_collection is None:\n if self._custom_processing is not None:\n exp._taskrunner.update(self._custom_processing)\n if self._custom_data_handler is not None:\n exp._data_handler_factory = DataHandler.get_custom_wrapper_factory(\n self._custom_data_handler\n )\n\n # Provide a human-readable description of the execution\n if cell_map is None:\n cell_map = list(range(len(self.cells)))\n str_map = \", \".join([f\"q[{i}] -> sample[{m}]\" for i, m in enumerate(cell_map)])\n exp._job_representation = f\"{self}\\n\\nmapped as {str_map} to\\n\\n{sample}\"\n\n return exp\n\n def _prepare_experiment_params(\n self,\n controller,\n sample: Optional[QiSample] = None,\n averages: int = 1,\n cell_map: Optional[List[int]] = None,\n data_collection=None,\n use_taskrunner=False,\n ):\n if len(self.cells) > len(controller.cell):\n raise IndexError(\n f\"This job requires {len(self.cells)} cells but only \"\n f\"{len(controller.cell)} are available in the QiController.\"\n )\n\n if data_collection is None:\n if self._custom_processing is None:\n data_collection = \"average\"\n else:\n data_collection = \"custom\"\n\n # If float, convert averages to int\n averages = int(averages)\n\n if sample is None:\n sample = QiSample(len(controller.cell))\n elif len(sample) < len(self.cells):\n raise ValueError(\n \"Need to submit a QiSample with at least as many cells as the job \"\n f\"has ({len(self.cells)}), but only {len(sample)} provided.\"\n )\n\n if cell_map is None:\n # Use the first cells of the sample\n cell_map = list(range(len(self.cells)))\n else:\n if len(cell_map) != len(self.cells):\n raise ValueError(\n \"cell_map needs to have as many entries as the job has cells, but \"\n f\"{len(cell_map)} entries given and {len(self.cells)} required!\"\n )\n if len(set(cell_map)) != len(cell_map):\n raise ValueError(\"Duplicate values not allowed in cell_map!\")\n if any(m < 0 or m >= len(sample) for m in cell_map):\n raise IndexError(\n \"cell_map values can only point to valid indices within the passed\"\n f\" QiSample object, i.e. values between 0 and {len(sample) - 1}.\"\n )\n\n # Translate cell_map from sample cells (\"cells\") to QiController cells\n cell_map = [sample.cell_map[c] for c in cell_map]\n\n if any(c < 0 or c >= len(controller.cell) for c in cell_map):\n raise ValueError(\n \"The QiSample cell_map can only reference available QiController \"\n f\"cells, i.e. between 0 and {len(controller.cell) - 1}.\"\n )\n\n self._build_program(sample, cell_map)\n\n for_range_list = []\n\n for cell in self.cells:\n for_range_list.append(self.cell_seq_dict[cell]._for_range_list)\n\n return (\n controller,\n self.cells,\n self._get_sequencer_codes(),\n averages,\n for_range_list,\n cell_map,\n self._var_reg_map,\n data_collection,\n use_taskrunner,\n )\n\n def run(\n self,\n controller,\n sample: Optional[QiSample] = None,\n averages: int = 1,\n cell_map: Optional[List[int]] = None,\n data_collection=None,\n use_taskrunner=False,\n ):\n \"\"\"executes the job and returns the results\n\n :param controller: the QiController on which the job should be executed\n :param sample: the QiSample object used for execution of pulses and extracts parameters for the experiment\n :param averages: the number of executions that should be averaged, by default 1\n :param cell_map: A list containing the indices of the cells\n :param data_collection: the data_collection mode for the result, by default \"average\"\n :param use_taskrunner: if the execution should be handled by the Taskrunner\n Some advanced schemes and data_collection modes are currently only supported\n by the Taskrunner and not yet by a native control flow.\n \"\"\"\n exp = self.create_experiment(\n controller, sample, averages, cell_map, data_collection, use_taskrunner\n )\n exp.run()\n\n def run_with_data_callback(self, on_new_data: Callable[[dict], None]):\n pass\n\n def run_streamed(self):\n pass\n\n def set_custom_data_processing(\n self,\n file: str,\n params: Optional[List] = None,\n converter: Optional[Callable[[List], List]] = None,\n mode: Union[TaskRunner.DataMode, str] = TaskRunner.DataMode.INT32,\n data_handler: Optional[Callable[[List[QiCell], DataProvider], None]] = None,\n ):\n from qiclib.experiment.qicode.base import _TaskrunnerSettings\n\n if isinstance(mode, str):\n mode = TaskRunner.DataMode[mode.upper()]\n\n self._custom_processing = _TaskrunnerSettings(\n file, \"QiCode[Custom]\", params, mode, converter\n )\n self._custom_data_handler = data_handler\n\n def print_assembler(\n self,\n cells: Optional[QiCells] = None,\n cell_index=0,\n cell_map: Optional[List[int]] = None,\n ):\n \"\"\"\n Prints the commands as assembler code\n\n :param cells: the QiCells object for execution of pulses and saving result\n :param cell_index: the index of the cell in QiCells\n \"\"\"\n print(f\"Print program for cell index {cell_index}\")\n self._build_program(cells, cell_map)\n\n cell = self.cells[cell_index]\n\n self.cell_seq_dict[cell].print_assembler()\n\n def _resolve_properties(self, sample: QiSample):\n # Check if any job cell has unresolved properties -> if not, return\n if not any(cell.has_unresolved_properties for cell in self.cells):\n return\n\n if sample is None:\n raise ValueError(\"QiSample needs to be passed to resolve job properties!\")\n\n for i, cell in enumerate(self.cells):\n if cell.has_unresolved_properties:\n if i < len(sample) and sample[i] is not None:\n cell._resolve_properties(sample[i]._properties)\n else:\n raise ValueError(\n f\"Cell {i} of the job has unresolved properties but no QiSample \"\n \"cell is specified for it! Check your cell_map.\"\n )\n\n def __str__(self) -> str:\n from .qi_visitor import QiStringifyJob\n\n stringify_job = QiStringifyJob()\n return stringify_job.stringify(self)" } ]
from abc import abstractmethod from enum import Enum from typing import Optional, List, Set, Tuple, Union, Dict from copy import copy from qiclib.code.qi_var_definitions import ( _QiVariableBase, QiExpression, ) from .qi_jobs import ( ForRange, If, Parallel, QiCell, QiCommand, QiContextManager, QiJob, )
11,261
succ_neighbor = copy(pred_neighbor) succ_neighbor.node = self self.predecessors.add(pred_neighbor) pred.successors.add(succ_neighbor) class _CFG: """Constructs a control flow graph (CFG) from the commands of a QiJob. The end node does not contain a command, if the last top level command is an If-else or ForRange """ def __init__(self, job: QiJob): self.nodes: Set[_CFGNode] = set() start, end = recursive_build_sub_cfg(job.commands, self.nodes) self.end = _CFGNode(_CFGNode.Type.END, None, None, *end) self.start = _CFGNode(_CFGNode.Type.START, None, None) self.start.connect_successors( _CFGNode.Neighbor(start, _CFGNode.SrcEdgeType.NORMAL) ) def node_iterator(self): visited = set() stack = [self.start] while len(stack) > 0: node = stack.pop() visited.add(node) yield node for successor in node.successors: successor = successor.node if successor not in visited: stack.append(successor) def add_value(self, key, initial): for node in self.node_iterator(): if key not in node.value_map: node.value_map[key] = initial def dump_dot_graph(self, path): """Dump the current cfg topology as a dot file for inspecting and debugging purposes.""" with open(path, "w", encoding="utf-8") as f: f.write("\ndigraph {\n") queue = [self.start] node_visited_or_in_queue = set() node_visited_or_in_queue.add(self.start) while len(queue) > 0: node = queue.pop(0) node_attributes = "\n".join( [f"{name} = {value}" for name, value in node.value_map.items()] ) if node.type == _CFGNode.Type.COMMAND: if isinstance(node.command, QiCommand): node_text = f"{node.command._stringify()}" else: node_text = f"{node.command}" label = f"{node_text}\n{node_attributes}" shape = "box" elif node.type == _CFGNode.Type.START: label = f"start\n{node_attributes}" shape = "oval" elif node.type == _CFGNode.Type.END: label = f"end\n{node_attributes}" shape = "oval" escaped_label = label.translate(str.maketrans({'"': '\\"'})) f.write(f'\t{node.id} [shape={shape}, label="{escaped_label}"];\n') for successor in node.successors: src_edge_type = successor.src_edge_type dest_edge_type = successor.dest_edge_type successor = successor.node assert isinstance(successor, _CFGNode) label = [] if src_edge_type is not _CFGNode.SrcEdgeType.NORMAL: label.append(f"{src_edge_type}") if dest_edge_type is not _CFGNode.DestEdgeType.NORMAL: label.append(f"{dest_edge_type}") label = ", ".join(label) node_label = f'[label="{label}"]' f.write(f"\t{node.id} -> {successor.id} {node_label};\n") if successor not in node_visited_or_in_queue: queue.append(successor) node_visited_or_in_queue.add(successor) f.write("}") def recursive_build_sub_cfg( commands: List[QiCommand], nodes ) -> Tuple[_CFGNode, List[_CFGNode.Neighbor]]: """ Constructs the nodes and edges for a CFG containing provided commands. `nodes` accumulates all nodes of the CFG. """ assert len(commands) > 0 prev: List[_CFGNode.Neighbor] = [] for idx, command in enumerate(commands, 0):
# Copyright © 2017-2023 Quantum Interface ([email protected]) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This module provides basic infrastructure to perform dataflow analyses on qicode programs. Dataflow analyses are computed on the control flow graph (CFG) of a QiJob which should be created when necessary. The dataflow analysis itself is performed in using a standard worklist algorithm. The abstract domain is modeled using DataflowValue. Its merge function represents the supremum calculation. It is recommended to treat DataflowValues as immutable. """ class _CFGNode: class Type(Enum): START = 0 END = 1 COMMAND = 2 class SrcEdgeType(Enum): """CFG Edge information about the source node""" IF_TRUE = 0 IF_FALSE = 1 FOR_BODY = 2 FOR_END = 4 NORMAL = 5 def __str__(self): return { _CFGNode.SrcEdgeType.IF_TRUE: "if_true", _CFGNode.SrcEdgeType.IF_FALSE: "if_false", _CFGNode.SrcEdgeType.FOR_BODY: "for_true", _CFGNode.SrcEdgeType.FOR_END: "for_end", _CFGNode.SrcEdgeType.NORMAL: "normal", }[self] class DestEdgeType(Enum): """CFG Edge information about the destination node""" FOR_BODY_RETURN = 0 FOR_ENTRY = 1 NORMAL = 2 def __str__(self): return { _CFGNode.DestEdgeType.FOR_BODY_RETURN: "for_body_ret", _CFGNode.DestEdgeType.FOR_ENTRY: "for_entry", _CFGNode.DestEdgeType.NORMAL: "normal", }[self] class Neighbor: """Combination of node and both edge types. Each edge in the CFG is represented by an instance of this class""" def __init__( self, neighbor: "_CFGNode", src_edge_type: "_CFGNode.SrcEdgeType", dest_edge_type: Optional["_CFGNode.DestEdgeType"] = None, ): # Default argument didn't work for me in this case. if dest_edge_type is None: dest_edge_type = _CFGNode.DestEdgeType.NORMAL self.node = neighbor # Information about the edge for the src node # (for example, if this edge goes to the 'else' block of an 'if' statement.) self.src_edge_type = src_edge_type # Information about the edge for the destination node # (for example, if the edge loops back from the body of a for statement.) self.dest_edge_type = dest_edge_type _cfg_node_next_id = 1 def __init__( self, type: Union["_CFGNode.Type", QiCommand], instruction_list, index, *predecessors: "Tuple[_CFGNode, _CFGNode.SrcEdgeType]", ): if isinstance(type, QiCommand): self.type = _CFGNode.Type.COMMAND self.command = type else: assert isinstance(type, _CFGNode.Type) self.type = type # This field is used to associated arbitrary data with every node. # For example, a dataflow analysis might use this dictionary to # the nodes current abstract value. self.value_map: Dict[str, CellValues] = {} self.predecessors: Set[_CFGNode.Neighbor] = set() self.successors: Set[_CFGNode.Neighbor] = set() # Used to find commands in job command list, so we can insert new instruction before or after this # command. self.instruction_list = instruction_list self.instruction_index = index self.id = _CFGNode._cfg_node_next_id _CFGNode._cfg_node_next_id += 1 self.connect_predecessors(*predecessors) def connect_successors(self, *successors: "_CFGNode.Neighbor"): assert all(map(lambda x: isinstance(x, _CFGNode.Neighbor), successors)) for succ_neighbor in successors: succ = succ_neighbor.node pred_neighbor = copy(succ_neighbor) pred_neighbor.node = self self.successors.add(succ_neighbor) succ.predecessors.add(pred_neighbor) def connect_predecessors(self, *predecessors: "_CFGNode.Neighbor"): assert all(map(lambda x: isinstance(x, _CFGNode.Neighbor), predecessors)) for pred_neighbor in predecessors: pred = pred_neighbor.node succ_neighbor = copy(pred_neighbor) succ_neighbor.node = self self.predecessors.add(pred_neighbor) pred.successors.add(succ_neighbor) class _CFG: """Constructs a control flow graph (CFG) from the commands of a QiJob. The end node does not contain a command, if the last top level command is an If-else or ForRange """ def __init__(self, job: QiJob): self.nodes: Set[_CFGNode] = set() start, end = recursive_build_sub_cfg(job.commands, self.nodes) self.end = _CFGNode(_CFGNode.Type.END, None, None, *end) self.start = _CFGNode(_CFGNode.Type.START, None, None) self.start.connect_successors( _CFGNode.Neighbor(start, _CFGNode.SrcEdgeType.NORMAL) ) def node_iterator(self): visited = set() stack = [self.start] while len(stack) > 0: node = stack.pop() visited.add(node) yield node for successor in node.successors: successor = successor.node if successor not in visited: stack.append(successor) def add_value(self, key, initial): for node in self.node_iterator(): if key not in node.value_map: node.value_map[key] = initial def dump_dot_graph(self, path): """Dump the current cfg topology as a dot file for inspecting and debugging purposes.""" with open(path, "w", encoding="utf-8") as f: f.write("\ndigraph {\n") queue = [self.start] node_visited_or_in_queue = set() node_visited_or_in_queue.add(self.start) while len(queue) > 0: node = queue.pop(0) node_attributes = "\n".join( [f"{name} = {value}" for name, value in node.value_map.items()] ) if node.type == _CFGNode.Type.COMMAND: if isinstance(node.command, QiCommand): node_text = f"{node.command._stringify()}" else: node_text = f"{node.command}" label = f"{node_text}\n{node_attributes}" shape = "box" elif node.type == _CFGNode.Type.START: label = f"start\n{node_attributes}" shape = "oval" elif node.type == _CFGNode.Type.END: label = f"end\n{node_attributes}" shape = "oval" escaped_label = label.translate(str.maketrans({'"': '\\"'})) f.write(f'\t{node.id} [shape={shape}, label="{escaped_label}"];\n') for successor in node.successors: src_edge_type = successor.src_edge_type dest_edge_type = successor.dest_edge_type successor = successor.node assert isinstance(successor, _CFGNode) label = [] if src_edge_type is not _CFGNode.SrcEdgeType.NORMAL: label.append(f"{src_edge_type}") if dest_edge_type is not _CFGNode.DestEdgeType.NORMAL: label.append(f"{dest_edge_type}") label = ", ".join(label) node_label = f'[label="{label}"]' f.write(f"\t{node.id} -> {successor.id} {node_label};\n") if successor not in node_visited_or_in_queue: queue.append(successor) node_visited_or_in_queue.add(successor) f.write("}") def recursive_build_sub_cfg( commands: List[QiCommand], nodes ) -> Tuple[_CFGNode, List[_CFGNode.Neighbor]]: """ Constructs the nodes and edges for a CFG containing provided commands. `nodes` accumulates all nodes of the CFG. """ assert len(commands) > 0 prev: List[_CFGNode.Neighbor] = [] for idx, command in enumerate(commands, 0):
if isinstance(command, If):
1
2023-11-10 10:26:10+00:00
16k
jpcadena/fastapi-boilerplate
app/api/api_v1/router/auth.py
[ { "identifier": "get_redis_dep", "path": "app/api/deps.py", "snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependency injection on Redis\n :type redis_dependency: RedisDependency\n :return: The Redis connection instance as a generator\n :rtype: AsyncGenerator[Redis, None]\n \"\"\"\n async with redis_dependency as redis:\n yield redis" }, { "identifier": "get_current_user", "path": "app/api/oauth2_validation.py", "snippet": "async def get_current_user(\n token: Annotated[str, Depends(oauth2_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n access token\n :param token: The Access token from OAuth2PasswordBearer\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n token_service: TokenService = TokenService(redis, auth_settings)\n is_blacklisted: bool = await token_service.is_token_blacklisted(token)\n if is_blacklisted:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Token is blacklisted\",\n )\n return await authenticate_user(token, auth_settings, user_service, redis)" }, { "identifier": "get_refresh_current_user", "path": "app/api/oauth2_validation.py", "snippet": "async def get_refresh_current_user(\n refresh_token: Annotated[str, Depends(refresh_token_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n refresh token\n :param refresh_token: The Refresh token from OAuth2PasswordBearer\n :type refresh_token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n return await authenticate_user(\n refresh_token, auth_settings, user_service, redis\n )" }, { "identifier": "get_auth_settings", "path": "app/config/config.py", "snippet": "def get_init_settings() -> InitSettings:\ndef get_settings() -> Settings:\ndef get_sql_settings() -> SQLDatabaseSettings:\ndef get_auth_settings() -> AuthSettings:" }, { "identifier": "AuthSettings", "path": "app/config/db/auth_settings.py", "snippet": "class AuthSettings(BaseSettings):\n \"\"\"\n Settings class for authentication using JWT and Redis\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n MAX_REQUESTS: PositiveInt = 30\n RATE_LIMIT_DURATION: PositiveInt = 60\n BLACKLIST_EXPIRATION_SECONDS: PositiveInt = 3600\n API_V1_STR: str = \"/api/v1\"\n ALGORITHM: str = \"HS256\"\n AUTH_URL: str = \"api/v1/auth/\"\n TOKEN_URL: str = \"api/v1/auth/login\"\n OAUTH2_SCHEME: str = \"JWT\"\n OAUTH2_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate most of\" \" the API endpoints.\"\n )\n OAUTH2_REFRESH_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate\" \" most ofhe API endpoints.\"\n )\n TOKEN_USER_INFO_REGEX: str = (\n r\"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-\"\n r\"[0-9a-f]{4}-[0-9a-f]{12}:\\d{1,3}\\.\"\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\"\n )\n SUB_REGEX: str = (\n r\"^username:[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-\"\n r\"[89ab][0-9a-f]{3}-[0-9a-f]{12}$\"\n )\n HEADERS: dict[str, str] = {\"WWW-Authenticate\": \"Bearer\"}\n DETAIL: str = \"Could not validate credentials\"\n NO_CLIENT_FOUND: str = \"No client found on the request\"\n SECRET_KEY: str\n SERVER_URL: AnyHttpUrl\n SERVER_DESCRIPTION: str\n CACHE_SECONDS: PositiveInt = 3600\n ACCESS_TOKEN_EXPIRE_MINUTES: float\n REFRESH_TOKEN_EXPIRE_MINUTES: PositiveInt\n EMAIL_RESET_TOKEN_EXPIRE_HOURS: PositiveInt\n AUDIENCE: Optional[AnyHttpUrl] = None\n STRICT_TRANSPORT_SECURITY_MAX_AGE: PositiveInt\n\n @field_validator(\"AUDIENCE\", mode=\"before\")\n def assemble_audience(\n cls, v: Optional[str], info: ValidationInfo\n ) -> AnyHttpUrl:\n \"\"\"\n Combine server host and API_V1_STR to create the audience\n string.\n :param v: The value of audience attribute\n :type v: Optional[str]\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The AUDIENCE attribute\n :rtype: AnyHttpUrl\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return AnyHttpUrl(\n f'{str(info.data.get(\"SERVER_URL\"))[:-1]}:8000/'\n f'{info.data.get(\"TOKEN_URL\")}'\n )\n\n REDIS_SCHEME: str\n REDIS_HOST: str\n REDIS_USERNAME: str\n REDIS_PASSWORD: str\n REDIS_PORT: PositiveInt\n REDIS_DATABASE_URI: Optional[RedisDsn] = None\n\n @field_validator(\"REDIS_DATABASE_URI\", mode=\"before\")\n def assemble_redis_connection(\n cls, v: Optional[str], info: ValidationInfo\n ) -> RedisDsn:\n \"\"\"\n Assemble the cache database connection as URI string\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: Redis URI\n :rtype: RedisDsn\n \"\"\"\n # pylint: disable=no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return RedisDsn(\n str(\n Url.build(\n scheme=info.data.get(\"REDIS_SCHEME\", \"\"),\n username=info.data.get(\"REDIS_USERNAME\"),\n password=info.data.get(\"REDIS_PASSWORD\"),\n host=info.data.get(\"REDIS_HOST\", \"\"),\n port=info.data.get(\"REDIS_PORT\"),\n )\n )\n )" }, { "identifier": "InitSettings", "path": "app/config/init_settings.py", "snippet": "class InitSettings(BaseSettings):\n \"\"\"\n Init Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n case_sensitive=True,\n extra=\"allow\",\n )\n\n ITERATIONS: PositiveInt = 100000\n KEY_BYTES_LENGTH: PositiveInt = 32\n SALT_BYTES: PositiveInt = 16\n IV_BYTES: PositiveInt = 12\n PUBLIC_EXPONENT: PositiveInt = 65537\n RSA_KEY_BITS: PositiveInt = 2048\n SALUTE: str = \"Salute!\"\n ROOT_MSG: str = \"Hello, World!\"\n SERVER_NAME: str = \"FastAPI Boilerplate\"\n PROJECT_NAME: str = \"fastapi-boilerplate\"\n VERSION: str = \"1.0\"\n ENCODING: str = \"UTF-8\"\n DEFAULT_REGION: str = \"Guayas\"\n DEFAULT_COUNTRY: str = \"Ecuador\"\n OPENAPI_FILE_PATH: str = \"/openapi.json\"\n DATE_FORMAT: str = \"%Y-%m-%d\"\n DATETIME_FORMAT: str = \"%Y-%m-%d %H:%M:%S\"\n FILE_DATE_FORMAT: str = \"%d-%b-%Y-%H-%M-%S\"\n IMAGES_APP: str = \"images\"\n IMAGES_PATH: str = \"/assets/images\"\n IMAGES_DIRECTORY: str = \"assets/images\"\n EMAIL_TEMPLATES_DIR: str = \"templates\"\n PASSWORD_RECOVERY_SUBJECT: str = \"Password recovery for user\"\n NEW_ACCOUNT_SUBJECT: str = \"New account for user\"\n WELCOME_SUBJECT: str = \"Welcome to \"\n PASSWORD_CHANGED_CONFIRMATION_SUBJECT: str = (\n \"Successfully password \" \"changed for \"\n )\n DELETE_ACCOUNT_SUBJECT: str = \"Account deleted for \"\n LOG_FORMAT: str = (\n \"[%(name)s][%(asctime)s][%(levelname)s][%(module)s]\"\n \"[%(funcName)s][%(lineno)d]: %(message)s\"\n )\n PASSWORD_REGEX: str = (\n \"^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?\" \"[#?!@$%^&*-]).{8,14}$\"\n )\n\n SUMMARY: str = \"\"\"This backend project is FastAPI template.\n This project serves as the backend, which aims to provide a robust and\n reliable system to its users.\n This backend application plays a crucial role in providing the\n functionality for user authentication, real-time monitoring,\n data processing, and advanced alerting system. It is designed to ensure\n the scalability and maintainability of the mobile app,\n making it a vital part of the overall solution.\n \"\"\"\n DESCRIPTION: str = f\"\"\"**FastAPI**, **SQLAlchemy** and **Redis** helps you\n do awesome stuff. 🚀\n \\n\\n<img src=\"data:image/png;base64,{img_b64}\"/>\"\"\"\n LICENSE_INFO: dict[str, str] = {\n \"name\": \"MIT\",\n \"identifier\": \"MIT\",\n }\n TAGS_METADATA: list[dict[str, str]] = [\n {\n \"name\": \"user\",\n \"description\": f\"\"\"Operations with users, such as register, get,\n update and delete.\\n\\n<img src=\"data:image/png;base64,\n {users_b64}\" width=\"150\" height=\"100\"/>\"\"\",\n },\n {\n \"name\": \"auth\",\n \"description\": f\"\"\"The authentication logic is here as well as\n password recovery and reset.\n \\n\\n<img src=\"data:image/png;base64,{auth_b64}\" width=\"75\"\n height=\"75\"/>\"\"\",\n },\n ]\n USER_CREATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user create object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone number `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"5939876a4321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n \"region\": \"Andes\",\n \"country\": \"New York\",\n \"postal_code\": \"999999\",\n },\n },\n },\n }\n USER_UPDATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user update object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone numbers `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"59398x54321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n },\n },\n },\n }\n EMAIL_BODY_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** email object that works correctly.\",\n \"value\": \"[email protected]\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": 123,\n },\n }\n TOKEN_PAYLOAD_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** token payload object that works \"\n \"correctly.\",\n \"value\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"token\": \"123\",\n \"password\": \"abc123\",\n },\n },\n }\n AUTHORIZATION_HEADER_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** authorization token object that works \"\n \"correctly.\",\n \"value\": jwt.encode(\n claims=jsonable_encoder(\n {\n \"sub\": f\"username:{str(uuid4())}\",\n \"nationalities\": [\"ECU\"],\n \"email\": \"[email protected]\",\n \"nickname\": \"example\",\n \"preferred_username\": \"example\",\n \"given_name\": \"Some\",\n \"family_name\": \"Example\",\n \"middle_name\": \"One\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"updated_at\": datetime.now(),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n \"exp\": int(time.time()) + 1800,\n \"nbf\": int(time.time()) - 1,\n \"iat\": int(time.time()),\n }\n ),\n key=\"f52e826e62cdd364c86f129cb18db2fe2be93859c5104cac9585f\"\n \"305378dce65\",\n algorithm=\"HS256\",\n ),\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": \"123\",\n },\n }\n LIMIT_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** limit query parameter that works \"\n \"correctly.\",\n \"value\": 1,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert limit `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"5\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }\n SKIP_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** skip query parameter that works \"\n \"correctly.\",\n \"value\": 0,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert skip `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"20\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }" }, { "identifier": "Settings", "path": "app/config/settings.py", "snippet": "class Settings(BaseSettings):\n \"\"\"\n Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n SMTP_PORT: PositiveInt\n SMTP_HOST: str\n SMTP_USER: str\n SMTP_PASSWORD: str\n MAIL_SUBJECT: str\n MAIL_TIMEOUT: float\n EMAILS_FROM_EMAIL: Optional[EmailStr] = None\n EMAILS_FROM_NAME: Optional[str] = None\n SUPERUSER_EMAIL: EmailStr\n SUPERUSER_FIRST_NAME: str\n SUPERUSER_PASSWORD: str\n SUPERUSER_LAST_NAME: str\n SUPERUSER_STREET_ADDRESS: str\n SUPERUSER_LOCALITY: str\n SUPERUSER_POSTAL_CODE: str\n BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = []\n\n PUBLIC_KEY_PATH: FilePath\n PRIVATE_KEY_PATH: FilePath\n\n @field_validator(\n \"PUBLIC_KEY_PATH\", \"PRIVATE_KEY_PATH\", mode=\"before\", check_fields=True\n )\n def validate_key_paths(cls, key_path: FilePath) -> FilePath:\n \"\"\"\n Validate the provided key path.\n :param key_path: Provided key path\n :type key_path: FilePath\n :return: The validated key path\n :rtype: FilePath\n \"\"\"\n if not str(key_path).endswith(\".pem\"):\n raise ValueError(f\"{key_path} must have a .pem extension\")\n base_name: str = os.path.basename(key_path)\n if not base_name.endswith(\"key.pem\"):\n raise ValueError(\n f\"{key_path} must have a file name ending with 'key'\"\n )\n return key_path\n\n @field_validator(\"BACKEND_CORS_ORIGINS\", mode=\"before\")\n def assemble_cors_origins(\n cls, v: Union[str, list[str]]\n ) -> Union[list[str], str]:\n \"\"\"\n Assemble a list of allowed CORS origins.\n :param v: Provided CORS origins, either a string or a list of\n strings\n :type v: Union[str, list[str]]\n :return: List of Backend CORS origins to be accepted\n :rtype: Union[list[str], str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if isinstance(v, str) and not v.startswith(\"[\"):\n return [i.strip() for i in v.split(\",\")]\n if isinstance(v, list):\n return v\n raise ValueError(v)\n\n CONTACT_NAME: Optional[str] = None\n CONTACT_URL: Optional[AnyHttpUrl] = None\n CONTACT_EMAIL: Optional[EmailStr] = None\n CONTACT: Optional[dict[str, Any]] = None\n\n @field_validator(\"CONTACT\", mode=\"before\")\n def assemble_contact(\n cls, v: Optional[str], info: ValidationInfo\n ) -> dict[str, str]:\n \"\"\"\n Assemble contact information\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The contact attribute\n :rtype: dict[str, str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n contact: dict[str, Any] = {}\n if info.data.get(\"CONTACT_NAME\"):\n contact[\"name\"] = info.data.get(\"CONTACT_NAME\")\n if info.data.get(\"CONTACT_URL\"):\n contact[\"url\"] = info.data.get(\"CONTACT_URL\")\n if info.data.get(\"CONTACT_EMAIL\"):\n contact[\"email\"] = info.data.get(\"CONTACT_EMAIL\")\n return contact" }, { "identifier": "verify_password", "path": "app/core/security/password.py", "snippet": "def verify_password(hashed_password: str, plain_password: str) -> bool:\n \"\"\"\n Verifies if a plain text password matches a hashed password\n :param plain_password: The plain text password to verify\n :type plain_password: str\n :param hashed_password: The hashed password to compare against\n :type hashed_password: str\n :return: True if the passwords match, False otherwise\n :rtype: bool\n \"\"\"\n if not plain_password:\n raise_custom_error(\"Plain password cannot be empty or None\")\n if not hashed_password:\n raise_custom_error(\"Hashed password cannot be empty or None\")\n return crypt_context.verify(plain_password, hashed_password)" }, { "identifier": "NotFoundException", "path": "app/exceptions/exceptions.py", "snippet": "class NotFoundException(Exception):\n \"\"\"\n Not Found Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)" }, { "identifier": "ServiceException", "path": "app/exceptions/exceptions.py", "snippet": "class ServiceException(Exception):\n \"\"\"\n Service Layer Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)" }, { "identifier": "User", "path": "app/models/sql/user.py", "snippet": "class User(Base): # type: ignore\n \"\"\"\n User model class representing the \"users\" table\n \"\"\"\n\n __tablename__ = \"users\"\n\n id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n index=True,\n nullable=False,\n primary_key=True,\n unique=True,\n server_default=text(\"(gen_random_uuid())\"),\n comment=\"ID of the User\",\n )\n username: Mapped[str] = mapped_column(\n String(15),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Username to identify the user\",\n )\n email: Mapped[EmailStr] = mapped_column(\n String(320),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Preferred e-mail address of the User\",\n )\n first_name: Mapped[str] = mapped_column(\n String(50), nullable=False, comment=\"First name(s) of the User\"\n )\n middle_name: Mapped[str] = mapped_column(\n String(50), nullable=True, comment=\"Middle name(s) of the User\"\n )\n last_name: Mapped[str] = mapped_column(\n String(100), nullable=False, comment=\"Last name(s) of the User\"\n )\n password: Mapped[str] = mapped_column(\n String(60), nullable=False, comment=\"Hashed password of the User\"\n )\n gender: Mapped[Gender] = mapped_column(\n Enum(Gender), nullable=True, comment=\"Gender of the User\"\n )\n birthdate: Mapped[PastDate] = mapped_column(\n Date, nullable=True, comment=\"Birthday of the User\"\n )\n phone_number: Mapped[PhoneNumber] = mapped_column(\n String(20),\n nullable=True,\n comment=\"Preferred telephone number of the User\",\n )\n is_active: Mapped[bool] = mapped_column(\n Boolean(),\n default=True,\n nullable=False,\n server_default=text(\"true\"),\n comment=\"True if the user is active; otherwise false\",\n )\n is_superuser: Mapped[bool] = mapped_column(\n Boolean(),\n default=False,\n nullable=False,\n server_default=text(\"false\"),\n comment=\"True if the user is super user; otherwise false\",\n )\n created_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n default=datetime.now(),\n nullable=False,\n server_default=text(\"now()\"),\n comment=\"Time the User was created\",\n )\n updated_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n nullable=True,\n onupdate=text(\"now()\"),\n comment=\"Time the User was updated\",\n )\n address_id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n ForeignKey(\n \"users_address.id\",\n name=\"users_address_id_fkey\",\n ),\n nullable=False,\n comment=\"ID of the User's address\",\n )\n address: Mapped[\"Address\"] = relationship( # type: ignore\n \"Address\", back_populates=\"users\", lazy=\"joined\"\n )\n\n __table_args__ = (\n CheckConstraint(\n \"char_length(username) >= 4\", name=\"users_username_length\"\n ),\n CheckConstraint(\"char_length(email) >= 3\", name=\"users_email_length\"),\n CheckConstraint(\n sql_database_setting.DB_EMAIL_CONSTRAINT, name=\"users_email_format\"\n ),\n CheckConstraint(\n \"char_length(first_name) >= 1\", name=\"users_first_name_length\"\n ),\n CheckConstraint(\n \"char_length(last_name) >= 1\", name=\"users_last_name_length\"\n ),\n CheckConstraint(\"LENGTH(password) = 60\", name=\"users_password_length\"),\n CheckConstraint(\n sql_database_setting.DB_PHONE_NUMBER_CONSTRAINT,\n name=\"users_phone_number_format\",\n ),\n )" }, { "identifier": "Msg", "path": "app/schemas/external/msg.py", "snippet": "class Msg(BaseModel):\n \"\"\"\n Schema for representing a message.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra={\"example\": {\"msg\": \"Hello, World!!!\"}}\n )\n\n msg: str = Field(..., title=\"Message\", description=\"Message to display\")" }, { "identifier": "TokenResetPassword", "path": "app/schemas/external/token.py", "snippet": "class TokenResetPassword(BaseModel):\n \"\"\"\n Token Reset Password for Request based on Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra={\n \"example\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n }\n }\n )\n\n token: str = Field(\n ..., title=\"Token\", description=\"Access token\", min_length=30\n )\n password: str = Field(\n ...,\n title=\"New password\",\n description=\"New password to reset\",\n validate_default=True,\n min_length=8,\n max_length=14,\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password to be validated\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)" }, { "identifier": "TokenResponse", "path": "app/schemas/external/token.py", "snippet": "class TokenResponse(Token):\n \"\"\"\n Token for Response based on Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra=token_response_example,\n )\n\n token_type: str = Field(\n default=\"bearer\", title=\"Token type\", description=\"Type of the token\"\n )" }, { "identifier": "UserResponse", "path": "app/schemas/external/user.py", "snippet": "class UserResponse(UserID, UserBase, UserOptional, UserInDB):\n \"\"\"\n Schema for the response when retrieving a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_response_example,\n )" }, { "identifier": "UserUpdate", "path": "app/schemas/external/user.py", "snippet": "class UserUpdate(BaseModel):\n \"\"\"\n Schema for updating a User record.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_example,\n )\n\n username: Optional[str] = Field(\n default=None,\n title=\"Username\",\n description=\"Username to identify the user\",\n min_length=4,\n max_length=15,\n )\n email: Optional[EmailStr] = Field(\n default=None,\n title=\"Email\",\n description=\"Preferred e-mail address of the User\",\n )\n first_name: Optional[str] = Field(\n default=None,\n title=\"First name\",\n description=\"First name(s) of the User\",\n min_length=1,\n max_length=50,\n )\n middle_name: Optional[str] = Field(\n default=None,\n title=\"Middle Name\",\n description=\"Middle name(s) of the User\",\n max_length=50,\n )\n last_name: Optional[str] = Field(\n default=None,\n title=\"Last name\",\n description=\"Last name(s) of the User\",\n min_length=1,\n max_length=100,\n )\n password: Optional[str] = Field(\n default=None,\n title=\"New Password\",\n description=\"New Password of the User\",\n min_length=8,\n max_length=14,\n )\n gender: Optional[Gender] = Field(\n default=None, title=\"Gender\", description=\"Gender of the User\"\n )\n birthdate: Optional[date] = Field(\n default=None, title=\"Birthdate\", description=\"Birthday of the User\"\n )\n phone_number: Optional[PhoneNumber] = Field(\n default=None,\n title=\"Phone number\",\n description=\"Preferred telephone number of the User\",\n )\n address: Optional[Address] = Field(\n default=None, title=\"Address\", description=\"Address of the User\"\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password value to validate\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)" }, { "identifier": "UserUpdateResponse", "path": "app/schemas/external/user.py", "snippet": "class UserUpdateResponse(\n UserAuth, UserName, UserPassword, UserOptional, UserInDB\n):\n \"\"\"\n Schema for the response when updating a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_response_example,\n )" }, { "identifier": "UserAuth", "path": "app/schemas/infrastructure/user.py", "snippet": "class UserAuth(UserID, UserBaseAuth):\n \"\"\"\n User Auth that inherits from UserID.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_auth_example,\n )" }, { "identifier": "common_auth_procedure", "path": "app/services/infrastructure/auth.py", "snippet": "async def common_auth_procedure(\n user: User,\n client_ip: str,\n redis: Redis, # type: ignore\n auth_settings: AuthSettings,\n) -> TokenResponse:\n \"\"\"\n Common authentication procedure for login and refresh token based on\n token generation\n :param user: The user to authenticate\n :type user: User\n :param client_ip: The IP address of the client\n :type client_ip: str\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The token response object\n :rtype: TokenResponse\n \"\"\"\n auth_token = AuthService.auth_token(user, auth_settings)\n user_info = f\"{str(user.id)}:{client_ip}\"\n token = TokenDB(key=auth_token.refresh_token, user_info=user_info)\n token_service = TokenService(redis, auth_settings)\n token_set = await token_service.create_token(token)\n if not token_set:\n detail = \"Could not insert data in Authentication database\"\n logger.warning(detail)\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=detail\n )\n return TokenResponse(**auth_token.model_dump())" }, { "identifier": "TokenService", "path": "app/services/infrastructure/token.py", "snippet": "class TokenService:\n \"\"\"\n Service class for token operations in the authentication database\n \"\"\"\n\n def __init__(\n self,\n redis: Redis, # type: ignore\n auth_settings: AuthSettings,\n ):\n self._redis: Redis = redis # type: ignore\n self._refresh_token_expire_minutes: (\n PositiveInt\n ) = auth_settings.REFRESH_TOKEN_EXPIRE_MINUTES\n self._blacklist_expiration_seconds: PositiveInt = (\n PositiveInt(\n PositiveInt(auth_settings.ACCESS_TOKEN_EXPIRE_MINUTES) + 1\n )\n * 60\n ) # converting minutes to seconds\n\n @handle_redis_exceptions\n @benchmark\n async def create_token(self, token: Token) -> bool:\n \"\"\"\n Create a token in authentication database\n :param token: Token object with key and value\n :type token: Token\n :return: True if the token was inserted; otherwise false\n :rtype: bool\n \"\"\"\n try:\n inserted: bool = await self._redis.setex(\n token.key,\n self._refresh_token_expire_minutes,\n token.user_info,\n )\n except RedisError as r_exc:\n logger.error(\"Error at creating token. %s\", r_exc)\n raise r_exc\n return inserted\n\n @handle_redis_exceptions\n @benchmark\n async def get_token(self, key: str) -> Optional[str]:\n \"\"\"\n Read token from the authentication database\n :param key: The key to search for\n :type key: str\n :return: The refresh token\n :rtype: str\n \"\"\"\n try:\n value: str = str(await self._redis.get(key))\n except RedisError as r_exc:\n logger.error(\"Error at getting token. %s\", r_exc)\n raise r_exc\n return value\n\n @handle_redis_exceptions\n @benchmark\n async def blacklist_token(self, token_key: str) -> bool:\n \"\"\"\n Blacklist a given token.\n :param token_key: The token key to blacklist.\n :type token_key: str\n :return: True if the token was successfully blacklisted,\n otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: bool = await self._redis.setex(\n f\"blacklist:{token_key}\",\n self._blacklist_expiration_seconds,\n \"true\",\n )\n except RedisError as r_exc:\n logger.error(\"Error at blacklisting token. %s\", r_exc)\n raise r_exc\n return blacklisted\n\n @handle_redis_exceptions\n @benchmark\n async def is_token_blacklisted(self, token_key: str) -> bool:\n \"\"\"\n Check if a given token is blacklisted.\n :param token_key: The token key to verify.\n :type token_key: str\n :return: True if the token is blacklisted, otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: Optional[str] = await self._redis.get(\n f\"blacklist\" f\":{token_key}\"\n )\n except RedisError as r_exc:\n logger.error(\"Error at checking if token is blacklisted. %s\", r_exc)\n raise r_exc\n return bool(blacklisted)" }, { "identifier": "UserService", "path": "app/services/infrastructure/user.py", "snippet": "class UserService:\n \"\"\"\n Service class for user-related business logic.\n \"\"\"\n\n def __init__(\n self,\n user_repo: UserRepository,\n redis: Redis, # type: ignore\n ):\n self._user_repo: UserRepository = user_repo\n self._redis: Redis = redis # type: ignore\n self._cache_seconds: PositiveInt = auth_setting.CACHE_SECONDS\n\n async def get_user_by_id(self, user_id: UUID4) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique identifier\n :param user_id: The unique identifier of the user\n :type user_id: UUID4\n :return: User information\n :rtype: Optional[UserResponse]\n \"\"\"\n user: Optional[User]\n try:\n user = await self._user_repo.read_by_id(IdSpecification(user_id))\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n detail: str = f\"User with id {user_id} not found in the system.\"\n logger.error(detail)\n raise NotFoundException(detail)\n user_response: Optional[\n UserResponse\n ] = await model_to_response( # type: ignore\n user, UserResponse\n )\n return user_response\n\n async def get_login_user(self, username: str) -> User:\n \"\"\"\n Retrieve user information for login purposes by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: User\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_username(\n UsernameSpecification(username)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with username: {username}\")\n return user\n\n async def get_user_by_username(\n self, username: str\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: UserResponse\n \"\"\"\n try:\n user: User = await self.get_login_user(username)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_by_email(\n self, email: EmailStr\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique email.\n :param email: The email to retrieve User from\n :type email: EmailStr\n :return: User found in database\n :rtype: Optional[UserResponse]\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with email: {email}\")\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_id_by_email(self, email: EmailStr) -> UUID4:\n \"\"\"\n Read the user ID from the database with unique email.\n :param email: Email to retrieve User from\n :type email: EmailStr\n :return: User ID found in database\n :rtype: UUID4\n \"\"\"\n try:\n user_id: Optional[UUID4] = await self._user_repo.read_id_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user_id:\n raise ServiceException(f\"User ID not found with email: {email}\")\n return user_id\n\n async def register_user(\n self, user: Union[UserCreate, UserSuperCreate]\n ) -> Optional[UserCreateResponse]:\n \"\"\"\n Register a new user in the database\n :param user: Request object representing the user\n :type user: Union[UserCreate, UserSuperCreate]\n :return: Response object representing the created user in the\n database\n :rtype: UserCreateResponse\n \"\"\"\n try:\n created_user = await self._user_repo.create_user(user)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(\n created_user, UserCreateResponse # type: ignore\n )\n\n async def get_users(\n self, offset: Optional[NonNegativeInt], limit: Optional[PositiveInt]\n ) -> list[UserResponse]:\n \"\"\"\n Retrieve users' information from the table\n :param offset: Offset from where to start returning users\n :type offset: NonNegativeInt\n :param limit: Limit the number of results from query\n :type limit: PositiveInt\n :return: User information\n :rtype: list[UserResponse]\n \"\"\"\n try:\n users: list[User] = await self._user_repo.read_users(offset, limit)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n found_users: list[UserResponse] = [\n await model_to_response(user, UserResponse) # type: ignore\n for user in users\n ]\n return found_users\n\n async def update_user(\n self, user_id: UUID4, user: UserUpdate\n ) -> Optional[UserUpdateResponse]:\n \"\"\"\n Update user information from table\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :param user: Requested user information to update\n :type user: UserUpdate\n :return: User information\n :rtype: Optional[UserUpdateResponse]\n \"\"\"\n try:\n updated_user: Optional[User] = await self._user_repo.update_user(\n IdSpecification(user_id), user\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not updated_user:\n raise ServiceException(\n f\"User with user_id: {user_id} could not be updated\"\n )\n return await model_to_response(\n updated_user, UserUpdateResponse # type: ignore\n )\n\n async def delete_user(self, user_id: UUID4) -> dict[str, Any]:\n \"\"\"\n Deletes a user by its id\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :return: Data to confirmation info about the delete process\n :rtype: dict[str, Any]\n \"\"\"\n deleted: bool\n deleted_at: Optional[datetime]\n try:\n deleted = await self._user_repo.delete_user(\n IdSpecification(user_id)\n )\n deleted_at = datetime.now()\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n deleted = False\n deleted_at = None\n finally:\n return {\"ok\": deleted, \"deleted_at\": deleted_at}" }, { "identifier": "get_user_service", "path": "app/services/infrastructure/user.py", "snippet": "async def get_user_service(\n user_repo: Annotated[UserRepository, Depends(get_user_repository)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserService:\n \"\"\"\n Get an instance of the user service with the given repository.\n :param user_repo: User repository object for database connection\n :type user_repo: UserRepository\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: UserService instance with repository associated\n :rtype: UserService\n \"\"\"\n return UserService(user_repo, redis)" }, { "identifier": "send_password_changed_confirmation_email", "path": "app/tasks/email_tasks/email_tasks.py", "snippet": "@with_logging\nasync def send_password_changed_confirmation_email(\n email_to: EmailStr,\n username: str,\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n settings: Annotated[Settings, Depends(get_settings)],\n) -> bool:\n \"\"\"\n Send a password changed confirmation email\n :param email_to: The email address of the recipient with password\n changed\n :type email_to: EmailStr\n :param username: Username of the recipient\n :type username: str\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :return: True if the email was sent; otherwise false\n :rtype: bool\n \"\"\"\n subject: str = (\n f\"{init_settings.PASSWORD_CHANGED_CONFIRMATION_SUBJECT}\" f\" {username}\"\n )\n template_str: str = await build_email_template(\n \"password_changed_confirmation.html\", init_settings\n )\n is_sent: bool = await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"link\": f\"mailto:{settings.CONTACT_EMAIL}?subject=\"\n f\"{init_settings.PROJECT_NAME} password changed\",\n },\n settings=settings,\n )\n return is_sent" }, { "identifier": "send_reset_password_email", "path": "app/tasks/email_tasks/email_tasks.py", "snippet": "@with_logging\nasync def send_reset_password_email(\n email_to: EmailStr,\n username: str,\n token: str,\n settings: Annotated[Settings, Depends(get_settings)],\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> bool:\n \"\"\"\n Sends a password reset email to a user with the given email address\n :param email_to: The email address of the user\n :type email_to: EmailStr\n :param username: The username of the user\n :type username: str\n :param token: The reset password token generated for the user\n :type token: str\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: True if the email was sent successfully; False otherwise\n :rtype: bool\n \"\"\"\n subject: str = (\n f\"{init_settings.PROJECT_NAME} -\"\n f\" {init_settings.PASSWORD_RECOVERY_SUBJECT} {username}\"\n )\n template_str: str = await build_email_template(\n \"reset_password.html\", init_settings\n )\n link: str = (\n f\"{auth_settings.SERVER_URL}\"\n f\"{auth_settings.AUTH_URL}reset-password?token={token}\"\n )\n is_sent: bool = await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"valid_hours\": auth_settings.EMAIL_RESET_TOKEN_EXPIRE_HOURS,\n \"link\": link,\n },\n settings=settings,\n )\n return is_sent" }, { "identifier": "generate_password_reset_token", "path": "app/utils/security/password.py", "snippet": "def generate_password_reset_token(\n email: EmailStr,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> str:\n \"\"\"\n Generate a password reset token for the given email address.\n :param email: The email to generate the reset token for\n :type email: EmailStr\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The password reset token\n :rtype: str\n \"\"\"\n payload: dict[str, Any] = generate_password_reset_payload(\n email, auth_settings\n )\n return encode_jwt(payload, auth_settings)" }, { "identifier": "verify_password_reset_token", "path": "app/utils/security/password.py", "snippet": "def verify_password_reset_token(\n token: str,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> Optional[EmailStr]:\n \"\"\"\n Verify a password reset token and return the email address if valid.\n :param token: The JSON Web Token\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The email address\n :rtype: EmailStr\n \"\"\"\n decoded_token: Optional[dict[str, Any]] = decode_jwt(token, auth_settings)\n return decoded_token.get(\"sub\") if decoded_token else None" } ]
import logging from typing import Annotated, Any, Optional from fastapi import ( APIRouter, Body, Depends, Header, HTTPException, Path, Request, status, ) from fastapi.security import OAuth2PasswordRequestForm from pydantic import EmailStr from redis.asyncio import Redis from starlette.datastructures import Address from app.api.deps import get_redis_dep from app.api.oauth2_validation import get_current_user, get_refresh_current_user from app.config.config import ( get_auth_settings, get_init_settings, get_settings, init_setting, ) from app.config.db.auth_settings import AuthSettings from app.config.init_settings import InitSettings from app.config.settings import Settings from app.core.security.password import verify_password from app.exceptions.exceptions import NotFoundException, ServiceException from app.models.sql.user import User as UserDB from app.schemas.external.msg import Msg from app.schemas.external.token import TokenResetPassword, TokenResponse from app.schemas.external.user import ( UserResponse, UserUpdate, UserUpdateResponse, ) from app.schemas.infrastructure.user import UserAuth from app.services.infrastructure.auth import common_auth_procedure from app.services.infrastructure.token import TokenService from app.services.infrastructure.user import UserService, get_user_service from app.tasks.email_tasks.email_tasks import ( send_password_changed_confirmation_email, send_reset_password_email, ) from app.utils.security.password import ( generate_password_reset_token, verify_password_reset_token, )
14,314
UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "[email protected]"}, openapi_examples=init_setting.EMAIL_BODY_EXAMPLES, ), ], user_service: Annotated[UserService, Depends(get_user_service)], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password recovery. ## Parameter: - `email:` **Path parameter that references the email used to recover the password** - `type:` **EmailStr** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param user_service: Dependency method for User service object :type user_service: UserService :param settings: Dependency method for cached setting object :type settings: config.Settings :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ try: user: Optional[UserResponse] = await user_service.get_user_by_email( email ) except ServiceException as exc: logger.error(exc) user = None if user: password_reset_token: str = generate_password_reset_token( email, auth_settings ) await send_reset_password_email( user.email, user.username, password_reset_token, settings, init_settings, auth_settings, ) return Msg(msg="If the email is registered, a reset link will be sent.") @router.post("/reset-password", response_model=Msg) async def reset_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user_service: Annotated[UserService, Depends(get_user_service)], token_reset_password: Annotated[
""" Authentication API Router. This module provides login and password recovery functionality. """ logger: logging.Logger = logging.getLogger(__name__) router: APIRouter = APIRouter(prefix="/auth", tags=["auth"]) @router.post("/login", response_model=TokenResponse) async def login( request: Request, auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user: Annotated[OAuth2PasswordRequestForm, Depends()], user_service: Annotated[UserService, Depends(get_user_service)], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Endpoint to handle user login with OAuth2 authentication using request form. ## Parameter: - `user:` **Request body with username and password** - `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "[email protected]"}, openapi_examples=init_setting.EMAIL_BODY_EXAMPLES, ), ], user_service: Annotated[UserService, Depends(get_user_service)], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password recovery. ## Parameter: - `email:` **Path parameter that references the email used to recover the password** - `type:` **EmailStr** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param user_service: Dependency method for User service object :type user_service: UserService :param settings: Dependency method for cached setting object :type settings: config.Settings :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ try: user: Optional[UserResponse] = await user_service.get_user_by_email( email ) except ServiceException as exc: logger.error(exc) user = None if user: password_reset_token: str = generate_password_reset_token( email, auth_settings ) await send_reset_password_email( user.email, user.username, password_reset_token, settings, init_settings, auth_settings, ) return Msg(msg="If the email is registered, a reset link will be sent.") @router.post("/reset-password", response_model=Msg) async def reset_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user_service: Annotated[UserService, Depends(get_user_service)], token_reset_password: Annotated[
TokenResetPassword,
12
2023-11-17 00:32:32+00:00
16k
vitant-lang/CBAM-ASPP
train.py
[ { "identifier": "DeepLab", "path": "nets/deeplabv3_plus.py", "snippet": "class DeepLab(nn.Module):\n\tdef __init__(self, num_classes, backbone=\"mobilenet\", pretrained=True, downsample_factor=16):\n\t\tsuper(DeepLab, self).__init__()\n\t\tif backbone==\"xception\":\n\t\t\t#----------------------------------#\n\t\t\t# 获得两个特征层\n\t\t\t# 浅层特征 [128,128,256]\n\t\t\t# 主干部分 [30,30,2048]\n\t\t\t#----------------------------------#\n\t\t\tself.backbone = xception(downsample_factor=downsample_factor, pretrained=pretrained)\n\t\t\tin_channels = 2048\n\t\t\tlow_level_channels = 256\n\t\telif backbone==\"mobilenet\":\n\t\t\t#----------------------------------#\n\t\t\t# 获得两个特征层\n\t\t\t# 浅层特征 [128,128,24]\n\t\t\t# 主干部分 [30,30,320]\n\t\t\t#----------------------------------#\n\t\t\tself.backbone = MobileNetV2(downsample_factor=downsample_factor, pretrained=pretrained)\n\t\t\tin_channels = 320\n\t\t\tlow_level_channels = 24\n\t\telse:\n\t\t\traise ValueError('Unsupported backbone - `{}`, Use mobilenet, xception.'.format(backbone))\n\n\t\t#-----------------------------------------#\n\t\t# ASPP特征提取模块\n\t\t# 利用不同膨胀率的膨胀卷积进行特征提取\n\t\t#-----------------------------------------#\n\t\tself.aspp = ASPP(dim_in=in_channels, dim_out=256, rate=16//downsample_factor)\n\n\t\t#----------------------------------#\n\t\t# 浅层特征边\n\t\t#----------------------------------#\n\t\tself.shortcut_conv = nn.Sequential(\n\t\t\tnn.Conv2d(low_level_channels, 48, 1),\n\t\t\tnn.BatchNorm2d(48),\n\t\t\tnn.ReLU(inplace=True)\n\t\t)\n\n\t\tself.cat_conv = nn.Sequential(\n\t\t\tnn.Conv2d(48+256, 256, 3, stride=1, padding=1),\n\t\t\tnn.BatchNorm2d(256),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Dropout(0.5),\n\n\t\t\tnn.Conv2d(256, 256, 3, stride=1, padding=1),\n\t\t\tnn.BatchNorm2d(256),\n\t\t\tnn.ReLU(inplace=True),\n\n\t\t\tnn.Dropout(0.1),\n\t\t)\n\t\tself.cls_conv = nn.Conv2d(256, num_classes, 1, stride=1)\n\n\tdef forward(self, x):\n\t\tH, W = x.size(2), x.size(3)\n\t\t#-----------------------------------------#\n\t\t# 获得两个特征层\n\t\t# low_level_features: 浅层特征-进行卷积处理\n\t\t# x : 主干部分-利用ASPP结构进行加强特征提取\n\t\t#-----------------------------------------#\n\t\tlow_level_features, x = self.backbone(x)\n\n\n\t\tx = self.aspp(x)\n\t\tlow_level_features = self.shortcut_conv(low_level_features)\n\n\t\t#-----------------------------------------#\n\t\t# 将加强特征边上采样\n\t\t# 与浅层特征堆叠后利用卷积进行特征提取\n\t\t#-----------------------------------------#\n\t\tx = F.interpolate(x, size=(low_level_features.size(2), low_level_features.size(3)), mode='bilinear', align_corners=True)\n\t\tx = self.cat_conv(torch.cat((x, low_level_features), dim=1))\n\t\tx = self.cls_conv(x)\n\t\tx = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)\n\t\treturn x" }, { "identifier": "get_lr_scheduler", "path": "nets/deeplabv3_training.py", "snippet": "def get_lr_scheduler(lr_decay_type, lr, min_lr, total_iters, warmup_iters_ratio = 0.1, warmup_lr_ratio = 0.1, no_aug_iter_ratio = 0.3, step_num = 10):\n def yolox_warm_cos_lr(lr, min_lr, total_iters, warmup_total_iters, warmup_lr_start, no_aug_iter, iters):\n if iters <= warmup_total_iters:\n # lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start\n lr = (lr - warmup_lr_start) * pow(iters / float(warmup_total_iters), 2) + warmup_lr_start\n elif iters >= total_iters - no_aug_iter:\n lr = min_lr\n else:\n lr = min_lr + 0.5 * (lr - min_lr) * (\n 1.0 + math.cos(math.pi* (iters - warmup_total_iters) / (total_iters - warmup_total_iters - no_aug_iter))\n )\n return lr\n\n def step_lr(lr, decay_rate, step_size, iters):\n if step_size < 1:\n raise ValueError(\"step_size must above 1.\")\n n = iters // step_size\n out_lr = lr * decay_rate ** n\n return out_lr\n\n if lr_decay_type == \"cos\":\n warmup_total_iters = min(max(warmup_iters_ratio * total_iters, 1), 3)\n warmup_lr_start = max(warmup_lr_ratio * lr, 1e-6)\n no_aug_iter = min(max(no_aug_iter_ratio * total_iters, 1), 15)\n func = partial(yolox_warm_cos_lr ,lr, min_lr, total_iters, warmup_total_iters, warmup_lr_start, no_aug_iter)\n else:\n decay_rate = (min_lr / lr) ** (1 / (step_num - 1))\n step_size = total_iters / step_num\n func = partial(step_lr, lr, decay_rate, step_size)\n\n return func" }, { "identifier": "set_optimizer_lr", "path": "nets/deeplabv3_training.py", "snippet": "def set_optimizer_lr(optimizer, lr_scheduler_func, epoch):\n lr = lr_scheduler_func(epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr" }, { "identifier": "weights_init", "path": "nets/deeplabv3_training.py", "snippet": "def weights_init(net, init_type='normal', init_gain=0.02):\n def init_func(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and classname.find('Conv') != -1:\n if init_type == 'normal':\n torch.nn.init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n elif classname.find('BatchNorm2d') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n print('initialize network with %s type' % init_type)\n net.apply(init_func)" }, { "identifier": "LossHistory", "path": "utils/callbacks.py", "snippet": "class LossHistory():\n def __init__(self, log_dir, model, input_shape):\n self.log_dir = log_dir\n self.losses = []\n self.val_loss = []\n \n os.makedirs(self.log_dir)\n self.writer = SummaryWriter(self.log_dir)\n try:\n dummy_input = torch.randn(2, 3, input_shape[0], input_shape[1])\n self.writer.add_graph(model, dummy_input)\n except:\n pass\n\n def append_loss(self, epoch, loss, val_loss):\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n self.losses.append(loss)\n self.val_loss.append(val_loss)\n\n with open(os.path.join(self.log_dir, \"epoch_loss.txt\"), 'a') as f:\n f.write(str(loss))\n f.write(\"\\n\")\n with open(os.path.join(self.log_dir, \"epoch_val_loss.txt\"), 'a') as f:\n f.write(str(val_loss))\n f.write(\"\\n\")\n\n self.writer.add_scalar('loss', loss, epoch)\n self.writer.add_scalar('val_loss', val_loss, epoch)\n self.loss_plot()\n\n def loss_plot(self):\n iters = range(len(self.losses))\n\n plt.figure()\n plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')\n plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')\n try:\n if len(self.losses) < 25:\n num = 5\n else:\n num = 15\n \n plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')\n plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')\n except:\n pass\n\n plt.grid(True)\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.legend(loc=\"upper right\")\n\n plt.savefig(os.path.join(self.log_dir, \"epoch_loss.png\"))\n\n plt.cla()\n plt.close(\"all\")" }, { "identifier": "EvalCallback", "path": "utils/callbacks.py", "snippet": "class EvalCallback():\n def __init__(self, net, input_shape, num_classes, image_ids, dataset_path, log_dir, cuda, \\\n miou_out_path=\".temp_miou_out\", eval_flag=True, period=1):\n super(EvalCallback, self).__init__()\n \n self.net = net\n self.input_shape = input_shape\n self.num_classes = num_classes\n self.image_ids = image_ids\n self.dataset_path = dataset_path\n self.log_dir = log_dir\n self.cuda = cuda\n self.miou_out_path = miou_out_path\n self.eval_flag = eval_flag\n self.period = period\n \n self.image_ids = [image_id.split()[0] for image_id in image_ids]\n self.mious = [0]\n self.epoches = [0]\n if self.eval_flag:\n with open(os.path.join(self.log_dir, \"epoch_miou.txt\"), 'a') as f:\n f.write(str(0))\n f.write(\"\\n\")\n\n def get_miou_png(self, image):\n #---------------------------------------------------------#\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\n # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\n #---------------------------------------------------------#\n image = cvtColor(image)\n orininal_h = np.array(image).shape[0]\n orininal_w = np.array(image).shape[1]\n #---------------------------------------------------------#\n # 给图像增加灰条,实现不失真的resize\n # 也可以直接resize进行识别\n #---------------------------------------------------------#\n image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))\n #---------------------------------------------------------#\n # 添加上batch_size维度\n #---------------------------------------------------------#\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)\n\n with torch.no_grad():\n images = torch.from_numpy(image_data)\n if self.cuda:\n images = images.cuda()\n \n #---------------------------------------------------#\n # 图片传入网络进行预测\n #---------------------------------------------------#\n pr = self.net(images)[0]\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy()\n #--------------------------------------#\n # 将灰条部分截取掉\n #--------------------------------------#\n pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \\\n int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]\n #---------------------------------------------------#\n # 进行图片的resize\n #---------------------------------------------------#\n pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR)\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = pr.argmax(axis=-1)\n \n image = Image.fromarray(np.uint8(pr))\n return image\n \n def on_epoch_end(self, epoch, model_eval):\n if epoch % self.period == 0 and self.eval_flag:\n self.net = model_eval\n gt_dir = os.path.join(self.dataset_path, \"VOC2007/SegmentationClass/\")\n pred_dir = os.path.join(self.miou_out_path, 'detection-results')\n if not os.path.exists(self.miou_out_path):\n os.makedirs(self.miou_out_path)\n if not os.path.exists(pred_dir):\n os.makedirs(pred_dir)\n print(\"Get miou.\")\n for image_id in tqdm(self.image_ids):\n #-------------------------------#\n # 从文件中读取图像\n #-------------------------------#\n image_path = os.path.join(self.dataset_path, \"VOC2007/JPEGImages/\"+image_id+\".jpg\")\n image = Image.open(image_path)\n #------------------------------#\n # 获得预测txt\n #------------------------------#\n image = self.get_miou_png(image)\n image.save(os.path.join(pred_dir, image_id + \".png\"))\n \n print(\"Calculate miou.\")\n _, IoUs, _, _ = compute_mIoU(gt_dir, pred_dir, self.image_ids, self.num_classes, None) # 执行计算mIoU的函数\n temp_miou = np.nanmean(IoUs) * 100\n\n self.mious.append(temp_miou)\n self.epoches.append(epoch)\n\n with open(os.path.join(self.log_dir, \"epoch_miou.txt\"), 'a') as f:\n f.write(str(temp_miou))\n f.write(\"\\n\")\n \n plt.figure()\n plt.plot(self.epoches, self.mious, 'red', linewidth = 2, label='train miou')\n\n plt.grid(True)\n plt.xlabel('Epoch')\n plt.ylabel('Miou')\n plt.title('A Miou Curve')\n plt.legend(loc=\"upper right\")\n\n plt.savefig(os.path.join(self.log_dir, \"epoch_miou.png\"))\n plt.cla()\n plt.close(\"all\")\n\n print(\"Get miou done.\")\n shutil.rmtree(self.miou_out_path)" }, { "identifier": "DeeplabDataset", "path": "utils/dataloader.py", "snippet": "class DeeplabDataset(Dataset):\n def __init__(self, annotation_lines, input_shape, num_classes, train, dataset_path):\n super(DeeplabDataset, self).__init__()\n self.annotation_lines = annotation_lines\n self.length = len(annotation_lines)\n self.input_shape = input_shape\n self.num_classes = num_classes\n self.train = train\n self.dataset_path = dataset_path\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n annotation_line = self.annotation_lines[index]\n name = annotation_line.split()[0]\n\n #-------------------------------#\n # 从文件中读取图像\n #-------------------------------#\n jpg = Image.open(os.path.join(os.path.join(self.dataset_path, \"VOC2007/JPEGImages\"), name + \".jpg\"))\n png = Image.open(os.path.join(os.path.join(self.dataset_path, \"VOC2007/SegmentationClass\"), name + \".png\"))\n #-------------------------------#\n # 数据增强\n #-------------------------------#\n jpg, png = self.get_random_data(jpg, png, self.input_shape, random = self.train)\n\n jpg = np.transpose(preprocess_input(np.array(jpg, np.float64)), [2,0,1])\n png = np.array(png)\n png[png >= self.num_classes] = self.num_classes\n #-------------------------------------------------------#\n # 转化成one_hot的形式\n # 在这里需要+1是因为voc数据集有些标签具有白边部分\n # 我们需要将白边部分进行忽略,+1的目的是方便忽略。\n #-------------------------------------------------------#\n seg_labels = np.eye(self.num_classes + 1)[png.reshape([-1])]\n seg_labels = seg_labels.reshape((int(self.input_shape[0]), int(self.input_shape[1]), self.num_classes + 1))\n\n return jpg, png, seg_labels\n\n def rand(self, a=0, b=1):\n return np.random.rand() * (b - a) + a\n\n def get_random_data(self, image, label, input_shape, jitter=.3, hue=.1, sat=0.7, val=0.3, random=True):\n image = cvtColor(image)\n label = Image.fromarray(np.array(label))\n #------------------------------#\n # 获得图像的高宽与目标高宽\n #------------------------------#\n iw, ih = image.size\n h, w = input_shape\n\n if not random:\n iw, ih = image.size\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n\n image = image.resize((nw,nh), Image.BICUBIC)\n new_image = Image.new('RGB', [w, h], (128,128,128))\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\n\n label = label.resize((nw,nh), Image.NEAREST)\n new_label = Image.new('L', [w, h], (0))\n new_label.paste(label, ((w-nw)//2, (h-nh)//2))\n return new_image, new_label\n\n #------------------------------------------#\n # 对图像进行缩放并且进行长和宽的扭曲\n #------------------------------------------#\n new_ar = iw/ih * self.rand(1-jitter,1+jitter) / self.rand(1-jitter,1+jitter)\n scale = self.rand(0.25, 2)\n if new_ar < 1:\n nh = int(scale*h)\n nw = int(nh*new_ar)\n else:\n nw = int(scale*w)\n nh = int(nw/new_ar)\n image = image.resize((nw,nh), Image.BICUBIC)\n label = label.resize((nw,nh), Image.NEAREST)\n \n #------------------------------------------#\n # 翻转图像\n #------------------------------------------#\n flip = self.rand()<.5\n if flip: \n image = image.transpose(Image.FLIP_LEFT_RIGHT)\n label = label.transpose(Image.FLIP_LEFT_RIGHT)\n \n #------------------------------------------#\n # 将图像多余的部分加上灰条\n #------------------------------------------#\n dx = int(self.rand(0, w-nw))\n dy = int(self.rand(0, h-nh))\n new_image = Image.new('RGB', (w,h), (128,128,128))\n new_label = Image.new('L', (w,h), (0))\n new_image.paste(image, (dx, dy))\n new_label.paste(label, (dx, dy))\n image = new_image\n label = new_label\n\n image_data = np.array(image, np.uint8)\n\n #------------------------------------------#\n # 高斯模糊\n #------------------------------------------#\n blur = self.rand() < 0.25\n if blur: \n image_data = cv2.GaussianBlur(image_data, (5, 5), 0)\n\n #------------------------------------------#\n # 旋转\n #------------------------------------------#\n rotate = self.rand() < 0.25\n if rotate: \n center = (w // 2, h // 2)\n rotation = np.random.randint(-10, 11)\n M = cv2.getRotationMatrix2D(center, -rotation, scale=1)\n image_data = cv2.warpAffine(image_data, M, (w, h), flags=cv2.INTER_CUBIC, borderValue=(128,128,128))\n label = cv2.warpAffine(np.array(label, np.uint8), M, (w, h), flags=cv2.INTER_NEAREST, borderValue=(0))\n\n #---------------------------------#\n # 对图像进行色域变换\n # 计算色域变换的参数\n #---------------------------------#\n r = np.random.uniform(-1, 1, 3) * [hue, sat, val] + 1\n #---------------------------------#\n # 将图像转到HSV上\n #---------------------------------#\n hue, sat, val = cv2.split(cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV))\n dtype = image_data.dtype\n #---------------------------------#\n # 应用变换\n #---------------------------------#\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n image_data = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n image_data = cv2.cvtColor(image_data, cv2.COLOR_HSV2RGB)\n \n return image_data, label" }, { "identifier": "deeplab_dataset_collate", "path": "utils/dataloader.py", "snippet": "def deeplab_dataset_collate(batch):\n images = []\n pngs = []\n seg_labels = []\n for img, png, labels in batch:\n images.append(img)\n pngs.append(png)\n seg_labels.append(labels)\n images = torch.from_numpy(np.array(images)).type(torch.FloatTensor)\n pngs = torch.from_numpy(np.array(pngs)).long()\n seg_labels = torch.from_numpy(np.array(seg_labels)).type(torch.FloatTensor)\n return images, pngs, seg_labels" }, { "identifier": "download_weights", "path": "utils/utils.py", "snippet": "def download_weights(backbone, model_dir=\"./model_data\"):\n import os\n from torch.hub import load_state_dict_from_url\n \n download_urls = {\n 'mobilenet' : 'https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/mobilenet_v2.pth.tar',\n 'xception' : 'https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/xception_pytorch_imagenet.pth',\n }\n url = download_urls[backbone]\n \n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n load_state_dict_from_url(url, model_dir)" }, { "identifier": "show_config", "path": "utils/utils.py", "snippet": "def show_config(**kwargs):\n print('Configurations:')\n print('-' * 70)\n print('|%25s | %40s|' % ('keys', 'values'))\n print('-' * 70)\n for key, value in kwargs.items():\n print('|%25s | %40s|' % (str(key), str(value)))\n print('-' * 70)" }, { "identifier": "fit_one_epoch", "path": "utils/utils_fit.py", "snippet": "def fit_one_epoch(model_train, model, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, cuda, dice_loss, focal_loss, cls_weights, num_classes, \\\n fp16, scaler, save_period, save_dir, local_rank=0):\n total_loss = 0\n total_f_score = 0\n\n val_loss = 0\n val_f_score = 0\n\n if local_rank == 0:\n print('Start Train')\n pbar = tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)\n model_train.train()\n for iteration, batch in enumerate(gen):\n if iteration >= epoch_step: \n break\n imgs, pngs, labels = batch\n\n with torch.no_grad():\n weights = torch.from_numpy(cls_weights)\n if cuda:\n imgs = imgs.cuda(local_rank)\n pngs = pngs.cuda(local_rank)\n labels = labels.cuda(local_rank)\n weights = weights.cuda(local_rank)\n #----------------------#\n # 清零梯度\n #----------------------#\n optimizer.zero_grad()\n if not fp16:\n #----------------------#\n # 前向传播\n #----------------------#\n outputs = model_train(imgs)\n #----------------------#\n # 计算损失\n #----------------------#\n if focal_loss:\n loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes)\n else:\n loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes)\n\n if dice_loss:\n main_dice = Dice_loss(outputs, labels)\n loss = loss + main_dice\n\n with torch.no_grad():\n #-------------------------------#\n # 计算f_score\n #-------------------------------#\n _f_score = f_score(outputs, labels)\n\n #----------------------#\n # 反向传播\n #----------------------#\n loss.backward()\n optimizer.step()\n else:\n from torch.cuda.amp import autocast\n with autocast():\n #----------------------#\n # 前向传播\n #----------------------#\n outputs = model_train(imgs)\n #----------------------#\n # 计算损失\n #----------------------#\n if focal_loss:\n loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes)\n else:\n loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes)\n\n if dice_loss:\n main_dice = Dice_loss(outputs, labels)\n loss = loss + main_dice\n\n with torch.no_grad():\n #-------------------------------#\n # 计算f_score\n #-------------------------------#\n _f_score = f_score(outputs, labels)\n \n #----------------------#\n # 反向传播\n #----------------------#\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n total_loss += loss.item()\n total_f_score += _f_score.item()\n \n if local_rank == 0:\n pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1), \n 'f_score' : total_f_score / (iteration + 1),\n 'lr' : get_lr(optimizer)})\n pbar.update(1)\n\n if local_rank == 0:\n pbar.close()\n print('Finish Train')\n print('Start Validation')\n pbar = tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)\n\n model_train.eval()\n for iteration, batch in enumerate(gen_val):\n if iteration >= epoch_step_val:\n break\n imgs, pngs, labels = batch\n with torch.no_grad():\n weights = torch.from_numpy(cls_weights)\n if cuda:\n imgs = imgs.cuda(local_rank)\n pngs = pngs.cuda(local_rank)\n labels = labels.cuda(local_rank)\n weights = weights.cuda(local_rank)\n\n #----------------------#\n # 前向传播\n #----------------------#\n outputs = model_train(imgs)\n #----------------------#\n # 计算损失\n #----------------------#\n if focal_loss:\n loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes)\n else:\n loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes)\n\n if dice_loss:\n main_dice = Dice_loss(outputs, labels)\n loss = loss + main_dice\n #-------------------------------#\n # 计算f_score\n #-------------------------------#\n _f_score = f_score(outputs, labels)\n\n val_loss += loss.item()\n val_f_score += _f_score.item()\n \n if local_rank == 0:\n pbar.set_postfix(**{'val_loss' : val_loss / (iteration + 1),\n 'f_score' : val_f_score / (iteration + 1),\n 'lr' : get_lr(optimizer)})\n pbar.update(1)\n \n if local_rank == 0:\n pbar.close()\n print('Finish Validation')\n loss_history.append_loss(epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)\n eval_callback.on_epoch_end(epoch + 1, model_train)\n print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch))\n print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))\n \n #-----------------------------------------------#\n # 保存权值\n #-----------------------------------------------#\n if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:\n torch.save(model.state_dict(), os.path.join(save_dir, 'ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)))\n\n if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):\n print('Save best model to best_epoch_weights.pth')\n torch.save(model.state_dict(), os.path.join(save_dir, \"best_epoch_weights.pth\"))\n \n torch.save(model.state_dict(), os.path.join(save_dir, \"last_epoch_weights.pth\"))" } ]
import os import datetime import numpy as np import torch import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim as optim from torch.utils.data import DataLoader from nets.deeplabv3_plus import DeepLab from nets.deeplabv3_training import (get_lr_scheduler, set_optimizer_lr, weights_init) from utils.callbacks import LossHistory, EvalCallback from utils.dataloader import DeeplabDataset, deeplab_dataset_collate from utils.utils import download_weights, show_config from utils.utils_fit import fit_one_epoch from torch.cuda.amp import GradScaler as GradScaler
11,757
#------------------------------------------------------------------# # torch 1.2不支持amp,建议使用torch 1.7.1及以上正确使用fp16 # 因此torch1.2这里显示"could not be resolve" #------------------------------------------------------------------# if fp16: scaler = GradScaler() else: scaler = None model_train = model.train() #----------------------------# # 多卡同步Bn #----------------------------# if sync_bn and ngpus_per_node > 1 and distributed: model_train = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_train) elif sync_bn: print("Sync_bn is not support in one gpu or not distributed.") if Cuda: if distributed: #----------------------------# # 多卡平行运行 #----------------------------# model_train = model_train.cuda(local_rank) model_train = torch.nn.parallel.DistributedDataParallel(model_train, device_ids=[local_rank], find_unused_parameters=True) else: model_train = torch.nn.DataParallel(model) cudnn.benchmark = True model_train = model_train.cuda() #---------------------------# # 读取数据集对应的txt #---------------------------# with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/train.txt"),"r") as f: train_lines = f.readlines() with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),"r") as f: val_lines = f.readlines() num_train = len(train_lines) num_val = len(val_lines) if local_rank == 0: show_config( num_classes = num_classes, backbone = backbone, model_path = model_path, input_shape = input_shape, \ Init_Epoch = Init_Epoch, Freeze_Epoch = Freeze_Epoch, UnFreeze_Epoch = UnFreeze_Epoch, Freeze_batch_size = Freeze_batch_size, Unfreeze_batch_size = Unfreeze_batch_size, Freeze_Train = Freeze_Train, \ Init_lr = Init_lr, Min_lr = Min_lr, optimizer_type = optimizer_type, momentum = momentum, lr_decay_type = lr_decay_type, \ save_period = save_period, save_dir = save_dir, num_workers = num_workers, num_train = num_train, num_val = num_val ) #---------------------------------------------------------# # 总训练世代指的是遍历全部数据的总次数 # 总训练步长指的是梯度下降的总次数 # 每个训练世代包含若干训练步长,每个训练步长进行一次梯度下降。 # 此处仅建议最低训练世代,上不封顶,计算时只考虑了解冻部分 #----------------------------------------------------------# wanted_step = 1.5e4 if optimizer_type == "sgd" else 0.5e4 total_step = num_train // Unfreeze_batch_size * UnFreeze_Epoch if total_step <= wanted_step: if num_train // Unfreeze_batch_size == 0: raise ValueError('数据集过小,无法进行训练,请扩充数据集。') wanted_epoch = wanted_step // (num_train // Unfreeze_batch_size) + 1 print("\n\033[1;33;44m[Warning] 使用%s优化器时,建议将训练总步长设置到%d以上。\033[0m"%(optimizer_type, wanted_step)) print("\033[1;33;44m[Warning] 本次运行的总训练数据量为%d,Unfreeze_batch_size为%d,共训练%d个Epoch,计算出总训练步长为%d。\033[0m"%(num_train, Unfreeze_batch_size, UnFreeze_Epoch, total_step)) print("\033[1;33;44m[Warning] 由于总训练步长为%d,小于建议总步长%d,建议设置总世代为%d。\033[0m"%(total_step, wanted_step, wanted_epoch)) #------------------------------------------------------# # 主干特征提取网络特征通用,冻结训练可以加快训练速度 # 也可以在训练初期防止权值被破坏。 # Init_Epoch为起始世代 # Interval_Epoch为冻结训练的世代 # Epoch总训练世代 # 提示OOM或者显存不足请调小Batch_size #------------------------------------------------------# if True: UnFreeze_flag = False #------------------------------------# # 冻结一定部分训练 #------------------------------------# if Freeze_Train: for param in model.backbone.parameters(): param.requires_grad = False #-------------------------------------------------------------------# # 如果不冻结训练的话,直接设置batch_size为Unfreeze_batch_size #-------------------------------------------------------------------# batch_size = Freeze_batch_size if Freeze_Train else Unfreeze_batch_size #-------------------------------------------------------------------# # 判断当前batch_size,自适应调整学习率 #-------------------------------------------------------------------# nbs = 16 lr_limit_max = 5e-4 if optimizer_type == 'adam' else 1e-1 lr_limit_min = 3e-4 if optimizer_type == 'adam' else 5e-4 if backbone == "xception": lr_limit_max = 1e-4 if optimizer_type == 'adam' else 1e-1 lr_limit_min = 1e-4 if optimizer_type == 'adam' else 5e-4 Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max) Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2) #---------------------------------------# # 根据optimizer_type选择优化器 #---------------------------------------# optimizer = { 'adam' : optim.Adam(model.parameters(), Init_lr_fit, betas = (momentum, 0.999), weight_decay = weight_decay), 'sgd' : optim.SGD(model.parameters(), Init_lr_fit, momentum = momentum, nesterov=True, weight_decay = weight_decay) }[optimizer_type] #---------------------------------------# # 获得学习率下降的公式 #---------------------------------------# lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch) #---------------------------------------# # 判断每一个世代的长度 #---------------------------------------# epoch_step = num_train // batch_size epoch_step_val = num_val // batch_size if epoch_step == 0 or epoch_step_val == 0: raise ValueError("数据集过小,无法继续进行训练,请扩充数据集。")
''' 训练自己的语义分割模型一定需要注意以下几点: 1、训练前仔细检查自己的格式是否满足要求,该库要求数据集格式为VOC格式,需要准备好的内容有输入图片和标签 输入图片为.jpg图片,无需固定大小,传入训练前会自动进行resize。 灰度图会自动转成RGB图片进行训练,无需自己修改。 输入图片如果后缀非jpg,需要自己批量转成jpg后再开始训练。 标签为png图片,无需固定大小,传入训练前会自动进行resize。 由于许多同学的数据集是网络上下载的,标签格式并不符合,需要再度处理。一定要注意!标签的每个像素点的值就是这个像素点所属的种类。 网上常见的数据集总共对输入图片分两类,背景的像素点值为0,目标的像素点值为255。这样的数据集可以正常运行但是预测是没有效果的! 需要改成,背景的像素点值为0,目标的像素点值为1。 如果格式有误,参考:https://github.com/bubbliiiing/segmentation-format-fix 2、损失值的大小用于判断是否收敛,比较重要的是有收敛的趋势,即验证集损失不断下降,如果验证集损失基本上不改变的话,模型基本上就收敛了。 损失值的具体大小并没有什么意义,大和小只在于损失的计算方式,并不是接近于0才好。如果想要让损失好看点,可以直接到对应的损失函数里面除上10000。 训练过程中的损失值会保存在logs文件夹下的loss_%Y_%m_%d_%H_%M_%S文件夹中 3、训练好的权值文件保存在logs文件夹中,每个训练世代(Epoch)包含若干训练步长(Step),每个训练步长(Step)进行一次梯度下降。 如果只是训练了几个Step是不会保存的,Epoch和Step的概念要捋清楚一下。 ''' if __name__ == "__main__": #---------------------------------# # Cuda 是否使用Cuda # 没有GPU可以设置成False #---------------------------------# Cuda = True #---------------------------------------------------------------------# # distributed 用于指定是否使用单机多卡分布式运行 # 终端指令仅支持Ubuntu。CUDA_VISIBLE_DEVICES用于在Ubuntu下指定显卡。 # Windows系统下默认使用DP模式调用所有显卡,不支持DDP。 # DP模式: # 设置 distributed = False # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python train.py # DDP模式: # 设置 distributed = True # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py #---------------------------------------------------------------------# distributed = False #---------------------------------------------------------------------# # sync_bn 是否使用sync_bn,DDP模式多卡可用 #---------------------------------------------------------------------# sync_bn = False #---------------------------------------------------------------------# # fp16 是否使用混合精度训练 # 可减少约一半的显存、需要pytorch1.7.1以上 #---------------------------------------------------------------------# fp16 = False #-----------------------------------------------------# # num_classes 训练自己的数据集必须要修改的 # 自己需要的分类个数+1,如2+1 #-----------------------------------------------------# num_classes = 3 #---------------------------------# # 所使用的的主干网络: # mobilenet # xception #---------------------------------# backbone = "mobilenet" #----------------------------------------------------------------------------------------------------------------------------# # pretrained 是否使用主干网络的预训练权重,此处使用的是主干的权重,因此是在模型构建的时候进行加载的。 # 如果设置了model_path,则主干的权值无需加载,pretrained的值无意义。 # 如果不设置model_path,pretrained = True,此时仅加载主干开始训练。 # 如果不设置model_path,pretrained = False,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 #----------------------------------------------------------------------------------------------------------------------------# pretrained = False #----------------------------------------------------------------------------------------------------------------------------# # 权值文件的下载请看README,可以通过网盘下载。模型的 预训练权重 对不同数据集是通用的,因为特征是通用的。 # 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。 # 预训练权重对于99%的情况都必须要用,不用的话主干部分的权值太过随机,特征提取效果不明显,网络训练的结果也不会好 # 训练自己的数据集时提示维度不匹配正常,预测的东西都不一样了自然维度不匹配 # # 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。 # 同时修改下方的 冻结阶段 或者 解冻阶段 的参数,来保证模型epoch的连续性。 # # 当model_path = ''的时候不加载整个模型的权值。 # # 此处使用的是整个模型的权重,因此是在train.py进行加载的,pretrain不影响此处的权值加载。 # 如果想要让模型从主干的预训练权值开始训练,则设置model_path = '',pretrain = True,此时仅加载主干。 # 如果想要让模型从0开始训练,则设置model_path = '',pretrain = Fasle,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 # # 一般来讲,网络从0开始的训练效果会很差,因为权值太过随机,特征提取效果不明显,因此非常、非常、非常不建议大家从0开始训练! # 如果一定要从0开始,可以了解imagenet数据集,首先训练分类模型,获得网络的主干部分权值,分类模型的 主干部分 和该模型通用,基于此进行训练。 #----------------------------------------------------------------------------------------------------------------------------# model_path = "model_data/deeplab_mobilenetv2.pth" #---------------------------------------------------------# # downsample_factor 下采样的倍数8、16 # 8下采样的倍数较小、理论上效果更好。 # 但也要求更大的显存 #---------------------------------------------------------# downsample_factor = 8 #------------------------------# # 输入图片的大小 #------------------------------# input_shape = [512, 512] #----------------------------------------------------------------------------------------------------------------------------# # 训练分为两个阶段,分别是冻结阶段和解冻阶段。设置冻结阶段是为了满足机器性能不足的同学的训练需求。 # 冻结训练需要的显存较小,显卡非常差的情况下,可设置Freeze_Epoch等于UnFreeze_Epoch,此时仅仅进行冻结训练。 # # 在此提供若干参数设置建议,各位训练者根据自己的需求进行灵活调整: # (一)从整个模型的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:UnFreeze_Epoch可以在100-300之间调整。 # (二)从主干网络的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 120,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 120,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:由于从主干网络的预训练权重开始训练,主干的权值不一定适合语义分割,需要更多的训练跳出局部最优解。 # UnFreeze_Epoch可以在120-300之间调整。 # Adam相较于SGD收敛的快一些。因此UnFreeze_Epoch理论上可以小一点,但依然推荐更多的Epoch。 # (三)batch_size的设置: # 在显卡能够接受的范围内,以大为好。显存不足与数据集大小无关,提示显存不足(OOM或者CUDA out of memory)请调小batch_size。 # 受到BatchNorm层影响,batch_size最小为2,不能为1。 # 正常情况下Freeze_batch_size建议为Unfreeze_batch_size的1-2倍。不建议设置的差距过大,因为关系到学习率的自动调整。 #----------------------------------------------------------------------------------------------------------------------------# #------------------------------------------------------------------# # 冻结阶段训练参数 # 此时模型的主干被冻结了,特征提取网络不发生改变 # 占用的显存较小,仅对网络进行微调 # Init_Epoch 模型当前开始的训练世代,其值可以大于Freeze_Epoch,如设置: # Init_Epoch = 60、Freeze_Epoch = 50、UnFreeze_Epoch = 100 # 会跳过冻结阶段,直接从60代开始,并调整对应的学习率。 # (断点续练时使用) # Freeze_Epoch 模型冻结训练的Freeze_Epoch # (当Freeze_Train=False时失效) # Freeze_batch_size 模型冻结训练的batch_size # (当Freeze_Train=False时失效) #------------------------------------------------------------------# Init_Epoch = 0 Freeze_Epoch = 10 Freeze_batch_size = 8 #------------------------------------------------------------------# # 解冻阶段训练参数 # 此时模型的主干不被冻结了,特征提取网络会发生改变 # 占用的显存较大,网络所有的参数都会发生改变 # UnFreeze_Epoch 模型总共训练的epoch # Unfreeze_batch_size 模型在解冻后的batch_size #------------------------------------------------------------------# UnFreeze_Epoch = 20 Unfreeze_batch_size = 4 #------------------------------------------------------------------# # Freeze_Train 是否进行冻结训练 # 默认先冻结主干训练后解冻训练。 #------------------------------------------------------------------# Freeze_Train = True #------------------------------------------------------------------# # 其它训练参数:学习率、优化器、学习率下降有关 #------------------------------------------------------------------# #------------------------------------------------------------------# # Init_lr 模型的最大学习率 # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # Min_lr 模型的最小学习率,默认为最大学习率的0.01 #------------------------------------------------------------------# Init_lr = 7e-4 Min_lr = Init_lr * 0.01 #------------------------------------------------------------------# # optimizer_type 使用到的优化器种类,可选的有adam、sgd # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # momentum 优化器内部使用到的momentum参数 # weight_decay 权值衰减,可防止过拟合 # adam会导致weight_decay错误,使用adam时建议设置为0。 #------------------------------------------------------------------# optimizer_type = "sgd" momentum = 0.9 weight_decay = 1e-4 #1e-4 sgd是 #------------------------------------------------------------------# # lr_decay_type 使用到的学习率下降方式,可选的有'step'、'cos' #------------------------------------------------------------------# lr_decay_type = 'cos' #------------------------------------------------------------------# # save_period 多少个epoch保存一次权值 #------------------------------------------------------------------# save_period = 800 #------------------------------------------------------------------# # save_dir 权值与日志文件保存的文件夹 #------------------------------------------------------------------# save_dir = 'logs' #------------------------------------------------------------------# # eval_flag 是否在训练时进行评估,评估对象为验证集 # eval_period 代表多少个epoch评估一次,不建议频繁的评估 # 评估需要消耗较多的时间,频繁评估会导致训练非常慢 # 此处获得的mAP会与get_map.py获得的会有所不同,原因有二: # (一)此处获得的mAP为验证集的mAP。 # (二)此处设置评估参数较为保守,目的是加快评估速度。 #------------------------------------------------------------------# eval_flag = True eval_period = 400 #7.13开始跑 #10点40 #------------------------------------------------------------------# # VOCdevkit_path 数据集路径 #------------------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' #------------------------------------------------------------------# # 建议选项: # 种类少(几类)时,设置为True # 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True # 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False #------------------------------------------------------------------# dice_loss = False #------------------------------------------------------------------# # 是否使用focal loss来防止正负样本不平衡 #------------------------------------------------------------------# focal_loss = False #------------------------------------------------------------------# # 是否给不同种类赋予不同的损失权值,默认是平衡的。 # 设置的话,注意设置成numpy形式的,长度和num_classes一样。 # 如: # num_classes = 3 # cls_weights = np.array([1, 2, 3], np.float32) #------------------------------------------------------------------# cls_weights = np.ones([num_classes], np.float32) #------------------------------------------------------------------# # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 # 开启后会加快数据读取速度,但是会占用更多内存 # keras里开启多线程有些时候速度反而慢了许多 # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 #------------------------------------------------------------------# num_workers = 4 #------------------------------------------------------# # 设置用到的显卡 #------------------------------------------------------# ngpus_per_node = torch.cuda.device_count() if distributed: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) device = torch.device("cuda", local_rank) if local_rank == 0: print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") print("Gpu Device Count : ", ngpus_per_node) else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') local_rank = 0 #----------------------------------------------------# # 下载预训练权重 #----------------------------------------------------# if pretrained: if distributed: if local_rank == 0: download_weights(backbone) dist.barrier() else: download_weights(backbone) model = DeepLab(num_classes=num_classes, backbone=backbone, downsample_factor=downsample_factor, pretrained=pretrained) if not pretrained: weights_init(model) if model_path != '': #------------------------------------------------------# # 权值文件请看README,百度网盘下载 #------------------------------------------------------# if local_rank == 0: print('Load weights {}.'.format(model_path)) #------------------------------------------------------# # 根据预训练权重的Key和模型的Key进行加载 #------------------------------------------------------# model_dict = model.state_dict() pretrained_dict = torch.load(model_path, map_location = device) load_key, no_load_key, temp_dict = [], [], {} for k, v in pretrained_dict.items(): if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v): temp_dict[k] = v load_key.append(k) else: no_load_key.append(k) model_dict.update(temp_dict) model.load_state_dict(model_dict) #------------------------------------------------------# # 显示没有匹配上的Key #------------------------------------------------------# if local_rank == 0: print("\nSuccessful Load Key:", str(load_key)[:500], "……\nSuccessful Load Key Num:", len(load_key)) print("\nFail To Load Key:", str(no_load_key)[:500], "……\nFail To Load Key num:", len(no_load_key)) print("\n\033[1;33;44m温馨提示,head部分没有载入是正常现象,Backbone部分没有载入是错误的。\033[0m") #----------------------# # 记录Loss #----------------------# if local_rank == 0: time_str = datetime.datetime.strftime(datetime.datetime.now(),'%Y_%m_%d_%H_%M_%S') log_dir = os.path.join(save_dir, "loss_" + str(time_str)) loss_history = LossHistory(log_dir, model, input_shape=input_shape) else: loss_history = None #------------------------------------------------------------------# # torch 1.2不支持amp,建议使用torch 1.7.1及以上正确使用fp16 # 因此torch1.2这里显示"could not be resolve" #------------------------------------------------------------------# if fp16: scaler = GradScaler() else: scaler = None model_train = model.train() #----------------------------# # 多卡同步Bn #----------------------------# if sync_bn and ngpus_per_node > 1 and distributed: model_train = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_train) elif sync_bn: print("Sync_bn is not support in one gpu or not distributed.") if Cuda: if distributed: #----------------------------# # 多卡平行运行 #----------------------------# model_train = model_train.cuda(local_rank) model_train = torch.nn.parallel.DistributedDataParallel(model_train, device_ids=[local_rank], find_unused_parameters=True) else: model_train = torch.nn.DataParallel(model) cudnn.benchmark = True model_train = model_train.cuda() #---------------------------# # 读取数据集对应的txt #---------------------------# with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/train.txt"),"r") as f: train_lines = f.readlines() with open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),"r") as f: val_lines = f.readlines() num_train = len(train_lines) num_val = len(val_lines) if local_rank == 0: show_config( num_classes = num_classes, backbone = backbone, model_path = model_path, input_shape = input_shape, \ Init_Epoch = Init_Epoch, Freeze_Epoch = Freeze_Epoch, UnFreeze_Epoch = UnFreeze_Epoch, Freeze_batch_size = Freeze_batch_size, Unfreeze_batch_size = Unfreeze_batch_size, Freeze_Train = Freeze_Train, \ Init_lr = Init_lr, Min_lr = Min_lr, optimizer_type = optimizer_type, momentum = momentum, lr_decay_type = lr_decay_type, \ save_period = save_period, save_dir = save_dir, num_workers = num_workers, num_train = num_train, num_val = num_val ) #---------------------------------------------------------# # 总训练世代指的是遍历全部数据的总次数 # 总训练步长指的是梯度下降的总次数 # 每个训练世代包含若干训练步长,每个训练步长进行一次梯度下降。 # 此处仅建议最低训练世代,上不封顶,计算时只考虑了解冻部分 #----------------------------------------------------------# wanted_step = 1.5e4 if optimizer_type == "sgd" else 0.5e4 total_step = num_train // Unfreeze_batch_size * UnFreeze_Epoch if total_step <= wanted_step: if num_train // Unfreeze_batch_size == 0: raise ValueError('数据集过小,无法进行训练,请扩充数据集。') wanted_epoch = wanted_step // (num_train // Unfreeze_batch_size) + 1 print("\n\033[1;33;44m[Warning] 使用%s优化器时,建议将训练总步长设置到%d以上。\033[0m"%(optimizer_type, wanted_step)) print("\033[1;33;44m[Warning] 本次运行的总训练数据量为%d,Unfreeze_batch_size为%d,共训练%d个Epoch,计算出总训练步长为%d。\033[0m"%(num_train, Unfreeze_batch_size, UnFreeze_Epoch, total_step)) print("\033[1;33;44m[Warning] 由于总训练步长为%d,小于建议总步长%d,建议设置总世代为%d。\033[0m"%(total_step, wanted_step, wanted_epoch)) #------------------------------------------------------# # 主干特征提取网络特征通用,冻结训练可以加快训练速度 # 也可以在训练初期防止权值被破坏。 # Init_Epoch为起始世代 # Interval_Epoch为冻结训练的世代 # Epoch总训练世代 # 提示OOM或者显存不足请调小Batch_size #------------------------------------------------------# if True: UnFreeze_flag = False #------------------------------------# # 冻结一定部分训练 #------------------------------------# if Freeze_Train: for param in model.backbone.parameters(): param.requires_grad = False #-------------------------------------------------------------------# # 如果不冻结训练的话,直接设置batch_size为Unfreeze_batch_size #-------------------------------------------------------------------# batch_size = Freeze_batch_size if Freeze_Train else Unfreeze_batch_size #-------------------------------------------------------------------# # 判断当前batch_size,自适应调整学习率 #-------------------------------------------------------------------# nbs = 16 lr_limit_max = 5e-4 if optimizer_type == 'adam' else 1e-1 lr_limit_min = 3e-4 if optimizer_type == 'adam' else 5e-4 if backbone == "xception": lr_limit_max = 1e-4 if optimizer_type == 'adam' else 1e-1 lr_limit_min = 1e-4 if optimizer_type == 'adam' else 5e-4 Init_lr_fit = min(max(batch_size / nbs * Init_lr, lr_limit_min), lr_limit_max) Min_lr_fit = min(max(batch_size / nbs * Min_lr, lr_limit_min * 1e-2), lr_limit_max * 1e-2) #---------------------------------------# # 根据optimizer_type选择优化器 #---------------------------------------# optimizer = { 'adam' : optim.Adam(model.parameters(), Init_lr_fit, betas = (momentum, 0.999), weight_decay = weight_decay), 'sgd' : optim.SGD(model.parameters(), Init_lr_fit, momentum = momentum, nesterov=True, weight_decay = weight_decay) }[optimizer_type] #---------------------------------------# # 获得学习率下降的公式 #---------------------------------------# lr_scheduler_func = get_lr_scheduler(lr_decay_type, Init_lr_fit, Min_lr_fit, UnFreeze_Epoch) #---------------------------------------# # 判断每一个世代的长度 #---------------------------------------# epoch_step = num_train // batch_size epoch_step_val = num_val // batch_size if epoch_step == 0 or epoch_step_val == 0: raise ValueError("数据集过小,无法继续进行训练,请扩充数据集。")
train_dataset = DeeplabDataset(train_lines, input_shape, num_classes, True, VOCdevkit_path)
6
2023-11-17 13:25:28+00:00
16k
CmosWolf1/Code_implementation_for_paper_SKZC
diffusiondet/detector.py
[ { "identifier": "SetCriterionDynamicK", "path": "diffusiondet/loss.py", "snippet": "class SetCriterionDynamicK(nn.Module):\n \"\"\" This class computes the loss for DiffusionDet.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n def __init__(self, cfg, num_classes, matcher, weight_dict, eos_coef, losses, use_focal):\n \"\"\" Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.cfg = cfg\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n self.use_focal = use_focal\n self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS\n if self.use_fed_loss:\n self.fed_loss_num_classes = 50\n from detectron2.data.detection_utils import get_fed_loss_cls_weights\n cls_weight_fun = lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER) # noqa\n fed_loss_cls_weights = cls_weight_fun()\n assert (\n len(fed_loss_cls_weights) == self.num_classes\n ), \"Please check the provided fed_loss_cls_weights. Their size should match num_classes\"\n self.register_buffer(\"fed_loss_cls_weights\", fed_loss_cls_weights)\n\n if self.use_focal:\n self.focal_loss_alpha = cfg.MODEL.DiffusionDet.ALPHA\n self.focal_loss_gamma = cfg.MODEL.DiffusionDet.GAMMA\n else:\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer('empty_weight', empty_weight)\n\n # copy-paste from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/roi_heads/fast_rcnn.py#L356\n def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight):\n \"\"\"\n Args:\n gt_classes: a long tensor of shape R that contains the gt class label of each proposal.\n num_fed_loss_classes: minimum number of classes to keep when calculating federated loss.\n Will sample negative classes if number of unique gt_classes is smaller than this value.\n num_classes: number of foreground classes\n weight: probabilities used to sample negative classes\n Returns:\n Tensor:\n classes to keep when calculating the federated loss, including both unique gt\n classes and sampled negative classes.\n \"\"\"\n unique_gt_classes = torch.unique(gt_classes)\n prob = unique_gt_classes.new_ones(num_classes + 1).float()\n prob[-1] = 0\n if len(unique_gt_classes) < num_fed_loss_classes:\n prob[:num_classes] = weight.float().clone()\n prob[unique_gt_classes] = 0\n sampled_negative_classes = torch.multinomial(\n prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False\n )\n fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes])\n else:\n fed_loss_classes = unique_gt_classes\n return fed_loss_classes\n\n def loss_labels(self, outputs, targets, indices, num_boxes, log=False):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert 'pred_logits' in outputs\n src_logits = outputs['pred_logits']\n batch_size = len(targets)\n\n # idx = self._get_src_permutation_idx(indices)\n # target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(src_logits.shape[:2], self.num_classes,\n dtype=torch.int64, device=src_logits.device)\n src_logits_list = []\n target_classes_o_list = []\n # target_classes[idx] = target_classes_o\n for batch_idx in range(batch_size):\n valid_query = indices[batch_idx][0]\n gt_multi_idx = indices[batch_idx][1]\n if len(gt_multi_idx) == 0:\n continue\n bz_src_logits = src_logits[batch_idx]\n target_classes_o = targets[batch_idx][\"labels\"]\n target_classes[batch_idx, valid_query] = target_classes_o[gt_multi_idx]\n\n src_logits_list.append(bz_src_logits[valid_query])\n target_classes_o_list.append(target_classes_o[gt_multi_idx])\n\n if self.use_focal or self.use_fed_loss:\n num_boxes = torch.cat(target_classes_o_list).shape[0] if len(target_classes_o_list) != 0 else 1\n\n target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], self.num_classes + 1],\n dtype=src_logits.dtype, layout=src_logits.layout,\n device=src_logits.device)\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n\n gt_classes = torch.argmax(target_classes_onehot, dim=-1)\n target_classes_onehot = target_classes_onehot[:, :, :-1]\n\n src_logits = src_logits.flatten(0, 1)\n target_classes_onehot = target_classes_onehot.flatten(0, 1)\n if self.use_focal:\n cls_loss = sigmoid_focal_loss_jit(src_logits, target_classes_onehot, alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction=\"none\")\n else:\n cls_loss = F.binary_cross_entropy_with_logits(src_logits, target_classes_onehot, reduction=\"none\")\n if self.use_fed_loss:\n K = self.num_classes\n N = src_logits.shape[0]\n fed_loss_classes = self.get_fed_loss_classes(\n gt_classes,\n num_fed_loss_classes=self.fed_loss_num_classes,\n num_classes=K,\n weight=self.fed_loss_cls_weights,\n )\n fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1)\n fed_loss_classes_mask[fed_loss_classes] = 1\n fed_loss_classes_mask = fed_loss_classes_mask[:K]\n weight = fed_loss_classes_mask.view(1, K).expand(N, K).float()\n\n loss_ce = torch.sum(cls_loss * weight) / num_boxes\n else:\n loss_ce = torch.sum(cls_loss) / num_boxes\n\n losses = {'loss_ce': loss_ce}\n else:\n raise NotImplementedError\n\n return losses\n\n def loss_boxes(self, outputs, targets, indices, num_boxes):\n \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n \"\"\"\n assert 'pred_boxes' in outputs\n # idx = self._get_src_permutation_idx(indices)\n src_boxes = outputs['pred_boxes']\n\n batch_size = len(targets)\n pred_box_list = []\n pred_norm_box_list = []\n tgt_box_list = []\n tgt_box_xyxy_list = []\n for batch_idx in range(batch_size):\n valid_query = indices[batch_idx][0]\n gt_multi_idx = indices[batch_idx][1]\n if len(gt_multi_idx) == 0:\n continue\n bz_image_whwh = targets[batch_idx]['image_size_xyxy']\n bz_src_boxes = src_boxes[batch_idx]\n bz_target_boxes = targets[batch_idx][\"boxes\"] # normalized (cx, cy, w, h)\n bz_target_boxes_xyxy = targets[batch_idx][\"boxes_xyxy\"] # absolute (x1, y1, x2, y2)\n pred_box_list.append(bz_src_boxes[valid_query])\n pred_norm_box_list.append(bz_src_boxes[valid_query] / bz_image_whwh) # normalize (x1, y1, x2, y2)\n tgt_box_list.append(bz_target_boxes[gt_multi_idx])\n tgt_box_xyxy_list.append(bz_target_boxes_xyxy[gt_multi_idx])\n\n if len(pred_box_list) != 0:\n src_boxes = torch.cat(pred_box_list)\n src_boxes_norm = torch.cat(pred_norm_box_list) # normalized (x1, y1, x2, y2)\n target_boxes = torch.cat(tgt_box_list)\n target_boxes_abs_xyxy = torch.cat(tgt_box_xyxy_list)\n num_boxes = src_boxes.shape[0]\n\n losses = {}\n # require normalized (x1, y1, x2, y2)\n loss_bbox = F.l1_loss(src_boxes_norm, box_cxcywh_to_xyxy(target_boxes), reduction='none')\n losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n\n # loss_giou = giou_loss(box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))\n loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(src_boxes, target_boxes_abs_xyxy))\n losses['loss_giou'] = loss_giou.sum() / num_boxes\n else:\n losses = {'loss_bbox': outputs['pred_boxes'].sum() * 0,\n 'loss_giou': outputs['pred_boxes'].sum() * 0}\n\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n loss_map = {\n 'labels': self.loss_labels,\n 'boxes': self.loss_boxes,\n }\n assert loss in loss_map, f'do you really want to compute {loss} loss?'\n return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n def forward(self, outputs, targets):\n \"\"\" This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices, _ = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if 'aux_outputs' in outputs:\n for i, aux_outputs in enumerate(outputs['aux_outputs']):\n indices, _ = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n if loss == 'masks':\n # Intermediate masks losses are too costly to compute, we ignore them.\n continue\n kwargs = {}\n if loss == 'labels':\n # Logging is enabled only for the last layer\n kwargs = {'log': False}\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f'_{i}': v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses" }, { "identifier": "HungarianMatcherDynamicK", "path": "diffusiondet/loss.py", "snippet": "class HungarianMatcherDynamicK(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-k (dynamic) matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n def __init__(self, cfg, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, cost_mask: float = 1, use_focal: bool = False):\n \"\"\"Creates the matcher\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.use_focal = use_focal\n self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS\n self.ota_k = cfg.MODEL.DiffusionDet.OTA_K\n if self.use_focal:\n self.focal_loss_alpha = cfg.MODEL.DiffusionDet.ALPHA\n self.focal_loss_gamma = cfg.MODEL.DiffusionDet.GAMMA\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n def forward(self, outputs, targets):\n \"\"\" simOTA for detr\"\"\"\n with torch.no_grad():\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n # We flatten to compute the cost matrices in a batch\n if self.use_focal or self.use_fed_loss:\n out_prob = outputs[\"pred_logits\"].sigmoid() # [batch_size, num_queries, num_classes]\n out_bbox = outputs[\"pred_boxes\"] # [batch_size, num_queries, 4]\n else:\n out_prob = outputs[\"pred_logits\"].softmax(-1) # [batch_size, num_queries, num_classes]\n out_bbox = outputs[\"pred_boxes\"] # [batch_size, num_queries, 4]\n\n indices = []\n matched_ids = []\n assert bs == len(targets)\n for batch_idx in range(bs):\n bz_boxes = out_bbox[batch_idx] # [num_proposals, 4]\n bz_out_prob = out_prob[batch_idx]\n bz_tgt_ids = targets[batch_idx][\"labels\"]\n num_insts = len(bz_tgt_ids)\n if num_insts == 0: # empty object in key frame\n non_valid = torch.zeros(bz_out_prob.shape[0]).to(bz_out_prob) > 0\n indices_batchi = (non_valid, torch.arange(0, 0).to(bz_out_prob))\n matched_qidx = torch.arange(0, 0).to(bz_out_prob)\n indices.append(indices_batchi)\n matched_ids.append(matched_qidx)\n continue\n\n bz_gtboxs = targets[batch_idx]['boxes'] # [num_gt, 4] normalized (cx, xy, w, h)\n bz_gtboxs_abs_xyxy = targets[batch_idx]['boxes_xyxy']\n fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(\n box_xyxy_to_cxcywh(bz_boxes), # absolute (cx, cy, w, h)\n box_xyxy_to_cxcywh(bz_gtboxs_abs_xyxy), # absolute (cx, cy, w, h)\n expanded_strides=32\n )\n\n pair_wise_ious = ops.box_iou(bz_boxes, bz_gtboxs_abs_xyxy)\n\n # Compute the classification cost.\n if self.use_focal:\n alpha = self.focal_loss_alpha\n gamma = self.focal_loss_gamma\n neg_cost_class = (1 - alpha) * (bz_out_prob ** gamma) * (-(1 - bz_out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - bz_out_prob) ** gamma) * (-(bz_out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]\n elif self.use_fed_loss:\n # focal loss degenerates to naive one\n neg_cost_class = (-(1 - bz_out_prob + 1e-8).log())\n pos_cost_class = (-(bz_out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]\n else:\n cost_class = -bz_out_prob[:, bz_tgt_ids]\n\n # Compute the L1 cost between boxes\n # image_size_out = torch.cat([v[\"image_size_xyxy\"].unsqueeze(0) for v in targets])\n # image_size_out = image_size_out.unsqueeze(1).repeat(1, num_queries, 1).flatten(0, 1)\n # image_size_tgt = torch.cat([v[\"image_size_xyxy_tgt\"] for v in targets])\n\n bz_image_size_out = targets[batch_idx]['image_size_xyxy']\n bz_image_size_tgt = targets[batch_idx]['image_size_xyxy_tgt']\n\n bz_out_bbox_ = bz_boxes / bz_image_size_out # normalize (x1, y1, x2, y2)\n bz_tgt_bbox_ = bz_gtboxs_abs_xyxy / bz_image_size_tgt # normalize (x1, y1, x2, y2)\n cost_bbox = torch.cdist(bz_out_bbox_, bz_tgt_bbox_, p=1)\n\n cost_giou = -generalized_box_iou(bz_boxes, bz_gtboxs_abs_xyxy)\n\n # Final cost matrix\n cost = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou + 100.0 * (~is_in_boxes_and_center)\n # cost = (cost_class + 3.0 * cost_giou + 100.0 * (~is_in_boxes_and_center)) # [num_query,num_gt]\n cost[~fg_mask] = cost[~fg_mask] + 10000.0\n\n # if bz_gtboxs.shape[0]>0:\n indices_batchi, matched_qidx = self.dynamic_k_matching(cost, pair_wise_ious, bz_gtboxs.shape[0])\n\n indices.append(indices_batchi)\n matched_ids.append(matched_qidx)\n\n return indices, matched_ids\n\n def get_in_boxes_info(self, boxes, target_gts, expanded_strides):\n xy_target_gts = box_cxcywh_to_xyxy(target_gts) # (x1, y1, x2, y2)\n\n anchor_center_x = boxes[:, 0].unsqueeze(1)\n anchor_center_y = boxes[:, 1].unsqueeze(1)\n\n # whether the center of each anchor is inside a gt box\n b_l = anchor_center_x > xy_target_gts[:, 0].unsqueeze(0)\n b_r = anchor_center_x < xy_target_gts[:, 2].unsqueeze(0)\n b_t = anchor_center_y > xy_target_gts[:, 1].unsqueeze(0)\n b_b = anchor_center_y < xy_target_gts[:, 3].unsqueeze(0)\n # (b_l.long()+b_r.long()+b_t.long()+b_b.long())==4 [300,num_gt] ,\n is_in_boxes = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4)\n is_in_boxes_all = is_in_boxes.sum(1) > 0 # [num_query]\n # in fixed center\n center_radius = 2.5\n # Modified to self-adapted sampling --- the center size depends on the size of the gt boxes\n # https://github.com/dulucas/UVO_Challenge/blob/main/Track1/detection/mmdet/core/bbox/assigners/rpn_sim_ota_assigner.py#L212\n b_l = anchor_center_x > (target_gts[:, 0] - (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)\n b_r = anchor_center_x < (target_gts[:, 0] + (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)\n b_t = anchor_center_y > (target_gts[:, 1] - (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)\n b_b = anchor_center_y < (target_gts[:, 1] + (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)\n\n is_in_centers = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4)\n is_in_centers_all = is_in_centers.sum(1) > 0\n\n is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all\n is_in_boxes_and_center = (is_in_boxes & is_in_centers)\n\n return is_in_boxes_anchor, is_in_boxes_and_center\n\n def dynamic_k_matching(self, cost, pair_wise_ious, num_gt):\n matching_matrix = torch.zeros_like(cost) # [300,num_gt]\n ious_in_boxes_matrix = pair_wise_ious\n n_candidate_k = self.ota_k\n\n # Take the sum of the predicted value and the top 10 iou of gt with the largest iou as dynamic_k\n topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=0)\n dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)\n\n for gt_idx in range(num_gt):\n _, pos_idx = torch.topk(cost[:, gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)\n matching_matrix[:, gt_idx][pos_idx] = 1.0\n\n del topk_ious, dynamic_ks, pos_idx\n\n anchor_matching_gt = matching_matrix.sum(1)\n\n if (anchor_matching_gt > 1).sum() > 0:\n _, cost_argmin = torch.min(cost[anchor_matching_gt > 1], dim=1)\n matching_matrix[anchor_matching_gt > 1] *= 0\n matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1\n\n while (matching_matrix.sum(0) == 0).any():\n num_zero_gt = (matching_matrix.sum(0) == 0).sum()\n matched_query_id = matching_matrix.sum(1) > 0\n cost[matched_query_id] += 100000.0\n unmatch_id = torch.nonzero(matching_matrix.sum(0) == 0, as_tuple=False).squeeze(1)\n for gt_idx in unmatch_id:\n pos_idx = torch.argmin(cost[:, gt_idx])\n matching_matrix[:, gt_idx][pos_idx] = 1.0\n if (matching_matrix.sum(1) > 1).sum() > 0: # If a query matches more than one gt\n _, cost_argmin = torch.min(cost[anchor_matching_gt > 1],\n dim=1) # find gt for these queries with minimal cost\n matching_matrix[anchor_matching_gt > 1] *= 0 # reset mapping relationship\n matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1 # keep gt with minimal cost\n\n assert not (matching_matrix.sum(0) == 0).any()\n selected_query = matching_matrix.sum(1) > 0\n gt_indices = matching_matrix[selected_query].max(1)[1]\n assert selected_query.sum() == len(gt_indices)\n\n cost[matching_matrix == 0] = cost[matching_matrix == 0] + float('inf')\n matched_query_id = torch.min(cost, dim=0)[1]\n\n return (selected_query, gt_indices), matched_query_id" }, { "identifier": "DynamicHead", "path": "diffusiondet/head.py", "snippet": "class DynamicHead(nn.Module):\n\n def __init__(self, cfg, roi_input_shape):\n super().__init__()\n\n # Build RoI.\n box_pooler = self._init_box_pooler(cfg, roi_input_shape)\n self.box_pooler = box_pooler\n \n # Build heads.\n num_classes = cfg.MODEL.DiffusionDet.NUM_CLASSES\n d_model = cfg.MODEL.DiffusionDet.HIDDEN_DIM\n dim_feedforward = cfg.MODEL.DiffusionDet.DIM_FEEDFORWARD\n nhead = cfg.MODEL.DiffusionDet.NHEADS\n dropout = cfg.MODEL.DiffusionDet.DROPOUT\n activation = cfg.MODEL.DiffusionDet.ACTIVATION\n num_heads = cfg.MODEL.DiffusionDet.NUM_HEADS\n rcnn_head = RCNNHead(cfg, d_model, num_classes, dim_feedforward, nhead, dropout, activation)\n self.head_series = _get_clones(rcnn_head, num_heads)\n self.num_heads = num_heads\n self.return_intermediate = cfg.MODEL.DiffusionDet.DEEP_SUPERVISION\n\n # Gaussian random feature embedding layer for time\n self.d_model = d_model\n time_dim = d_model * 4\n self.time_mlp = nn.Sequential(\n SinusoidalPositionEmbeddings(d_model),\n nn.Linear(d_model, time_dim),\n nn.GELU(),\n nn.Linear(time_dim, time_dim),\n )\n\n # Init parameters.\n self.use_focal = cfg.MODEL.DiffusionDet.USE_FOCAL\n self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS\n self.num_classes = num_classes\n if self.use_focal or self.use_fed_loss:\n prior_prob = cfg.MODEL.DiffusionDet.PRIOR_PROB\n self.bias_value = -math.log((1 - prior_prob) / prior_prob)\n self._reset_parameters()\n\n def _reset_parameters(self):\n # init all parameters.\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n\n # initialize the bias for focal loss and fed loss.\n if self.use_focal or self.use_fed_loss:\n if p.shape[-1] == self.num_classes or p.shape[-1] == self.num_classes + 1:\n nn.init.constant_(p, self.bias_value)\n\n @staticmethod\n def _init_box_pooler(cfg, input_shape):\n\n in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES\n pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION\n pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)\n sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO\n pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE\n\n # If StandardROIHeads is applied on multiple feature maps (as in FPN),\n # then we share the same predictors and therefore the channel counts must be the same\n in_channels = [input_shape[f].channels for f in in_features]\n # Check all channel counts are equal\n assert len(set(in_channels)) == 1, in_channels\n\n box_pooler = ROIPooler(\n output_size=pooler_resolution,\n scales=pooler_scales,\n sampling_ratio=sampling_ratio,\n pooler_type=pooler_type,\n )\n return box_pooler\n\n def forward(self, features, init_bboxes, t, init_features):\n # assert t shape (batch_size)\n time = self.time_mlp(t)\n\n inter_class_logits = []\n inter_pred_bboxes = []\n\n bs = len(features[0])\n bboxes = init_bboxes\n num_boxes = bboxes.shape[1]\n\n if init_features is not None:\n init_features = init_features[None].repeat(1, bs, 1)\n proposal_features = init_features.clone()\n else:\n proposal_features = None\n \n for head_idx, rcnn_head in enumerate(self.head_series):\n class_logits, pred_bboxes, proposal_features = rcnn_head(features, bboxes, proposal_features, self.box_pooler, time)\n if self.return_intermediate:\n inter_class_logits.append(class_logits)\n inter_pred_bboxes.append(pred_bboxes)\n bboxes = pred_bboxes.detach()\n\n if self.return_intermediate:\n return torch.stack(inter_class_logits), torch.stack(inter_pred_bboxes)\n\n return class_logits[None], pred_bboxes[None]" }, { "identifier": "box_cxcywh_to_xyxy", "path": "diffusiondet/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\n x_c, y_c, w, h = x.unbind(-1)\n b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n (x_c + 0.5 * w), (y_c + 0.5 * h)]\n return torch.stack(b, dim=-1)" }, { "identifier": "box_xyxy_to_cxcywh", "path": "diffusiondet/util/box_ops.py", "snippet": "def box_xyxy_to_cxcywh(x):\n x0, y0, x1, y1 = x.unbind(-1)\n b = [(x0 + x1) / 2, (y0 + y1) / 2,\n (x1 - x0), (y1 - y0)]\n return torch.stack(b, dim=-1)" }, { "identifier": "nested_tensor_from_tensor_list", "path": "diffusiondet/util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)" } ]
import math import random import torch import torch.nn.functional as F from typing import List from collections import namedtuple from torch import nn from detectron2.layers import batched_nms from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess from detectron2.structures import Boxes, ImageList, Instances from .loss import SetCriterionDynamicK, HungarianMatcherDynamicK from .head import DynamicHead from .util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh from .util.misc import nested_tensor_from_tensor_list
10,838
return ModelPrediction(pred_noise, x_start), outputs_class, outputs_coord @torch.no_grad() def ddim_sample(self, batched_inputs, backbone_feats, images_whwh, images, clip_denoised=True, do_postprocess=True): batch = images_whwh.shape[0] shape = (batch, self.num_proposals, 4) total_timesteps, sampling_timesteps, eta, objective = self.num_timesteps, self.sampling_timesteps, self.ddim_sampling_eta, self.objective # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps times = torch.linspace(-1, total_timesteps - 1, steps=sampling_timesteps + 1) times = list(reversed(times.int().tolist())) time_pairs = list(zip(times[:-1], times[1:])) # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)] img = torch.randn(shape, device=self.device) ensemble_score, ensemble_label, ensemble_coord = [], [], [] x_start = None for time, time_next in time_pairs: time_cond = torch.full((batch,), time, device=self.device, dtype=torch.long) self_cond = x_start if self.self_condition else None preds, outputs_class, outputs_coord = self.model_predictions(backbone_feats, images_whwh, img, time_cond, self_cond, clip_x_start=clip_denoised) pred_noise, x_start = preds.pred_noise, preds.pred_x_start if self.box_renewal: # filter score_per_image, box_per_image = outputs_class[-1][0], outputs_coord[-1][0] threshold = 0.5 score_per_image = torch.sigmoid(score_per_image) value, _ = torch.max(score_per_image, -1, keepdim=False) keep_idx = value > threshold num_remain = torch.sum(keep_idx) pred_noise = pred_noise[:, keep_idx, :] x_start = x_start[:, keep_idx, :] img = img[:, keep_idx, :] if time_next < 0: img = x_start continue alpha = self.alphas_cumprod[time] alpha_next = self.alphas_cumprod[time_next] sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt() c = (1 - alpha_next - sigma ** 2).sqrt() noise = torch.randn_like(img) img = x_start * alpha_next.sqrt() + \ c * pred_noise + \ sigma * noise if self.box_renewal: # filter # replenish with randn boxes img = torch.cat((img, torch.randn(1, self.num_proposals - num_remain, 4, device=img.device)), dim=1) if self.use_ensemble and self.sampling_timesteps > 1: box_pred_per_image, scores_per_image, labels_per_image = self.inference(outputs_class[-1], outputs_coord[-1], images.image_sizes) ensemble_score.append(scores_per_image) ensemble_label.append(labels_per_image) ensemble_coord.append(box_pred_per_image) if self.use_ensemble and self.sampling_timesteps > 1: box_pred_per_image = torch.cat(ensemble_coord, dim=0) scores_per_image = torch.cat(ensemble_score, dim=0) labels_per_image = torch.cat(ensemble_label, dim=0) if self.use_nms: keep = batched_nms(box_pred_per_image, scores_per_image, labels_per_image, 0.5) box_pred_per_image = box_pred_per_image[keep] scores_per_image = scores_per_image[keep] labels_per_image = labels_per_image[keep] result = Instances(images.image_sizes[0]) result.pred_boxes = Boxes(box_pred_per_image) result.scores = scores_per_image result.pred_classes = labels_per_image results = [result] else: output = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]} box_cls = output["pred_logits"] box_pred = output["pred_boxes"] results = self.inference(box_cls, box_pred, images.image_sizes) if do_postprocess: processed_results = [] for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): height = input_per_image.get("height", image_size[0]) width = input_per_image.get("width", image_size[1]) r = detector_postprocess(results_per_image, height, width) processed_results.append({"instances": r}) return processed_results # forward diffusion def q_sample(self, x_start, t, noise=None): if noise is None: noise = torch.randn_like(x_start) sqrt_alphas_cumprod_t = extract(self.sqrt_alphas_cumprod, t, x_start.shape) sqrt_one_minus_alphas_cumprod_t = extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise def forward(self, batched_inputs, do_postprocess=True): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper` . Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * image: Tensor, image in (C, H, W) format. * instances: Instances Other information that's included in the original dicts, such as: * "height", "width" (int): the output resolution of the model, used in inference. See :meth:`postprocess` for details. """ images, images_whwh = self.preprocess_image(batched_inputs) if isinstance(images, (list, torch.Tensor)):
# ======================================== # Modified by Shoufa Chen # ======================================== # Modified by Peize Sun, Rufeng Zhang # Contact: {sunpeize, cxrfzhang}@foxmail.com # # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved __all__ = ["DiffusionDet"] ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start']) def exists(x): return x is not None def default(val, d): if exists(val): return val return d() if callable(d) else d def extract(a, t, x_shape): """extract the appropriate t index for a batch of indices""" batch_size = t.shape[0] out = a.gather(-1, t) return out.reshape(batch_size, *((1,) * (len(x_shape) - 1))) def cosine_beta_schedule(timesteps, s=0.008): """ cosine schedule as proposed in https://openreview.net/forum?id=-NEXDKk8gZ """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps, dtype=torch.float64) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0, 0.999) @META_ARCH_REGISTRY.register() class DiffusionDet(nn.Module): """ Implement DiffusionDet """ def __init__(self, cfg): super().__init__() self.device = torch.device(cfg.MODEL.DEVICE) self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES self.num_classes = cfg.MODEL.DiffusionDet.NUM_CLASSES self.num_proposals = cfg.MODEL.DiffusionDet.NUM_PROPOSALS self.hidden_dim = cfg.MODEL.DiffusionDet.HIDDEN_DIM self.num_heads = cfg.MODEL.DiffusionDet.NUM_HEADS # Build Backbone. self.backbone = build_backbone(cfg) self.size_divisibility = self.backbone.size_divisibility # build diffusion timesteps = 1000 sampling_timesteps = cfg.MODEL.DiffusionDet.SAMPLE_STEP self.objective = 'pred_x0' betas = cosine_beta_schedule(timesteps) alphas = 1. - betas alphas_cumprod = torch.cumprod(alphas, dim=0) alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.sampling_timesteps = default(sampling_timesteps, timesteps) assert self.sampling_timesteps <= timesteps self.is_ddim_sampling = self.sampling_timesteps < timesteps self.ddim_sampling_eta = 1. self.self_condition = False self.scale = cfg.MODEL.DiffusionDet.SNR_SCALE self.box_renewal = True self.use_ensemble = True self.register_buffer('betas', betas) self.register_buffer('alphas_cumprod', alphas_cumprod) self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod)) self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod)) self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod)) self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod)) self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1)) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod) # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', posterior_variance) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min=1e-20))) self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)) self.register_buffer('posterior_mean_coef2', (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod)) # Build Dynamic Head. self.head = DynamicHead(cfg=cfg, roi_input_shape=self.backbone.output_shape()) # Loss parameters: class_weight = cfg.MODEL.DiffusionDet.CLASS_WEIGHT giou_weight = cfg.MODEL.DiffusionDet.GIOU_WEIGHT l1_weight = cfg.MODEL.DiffusionDet.L1_WEIGHT no_object_weight = cfg.MODEL.DiffusionDet.NO_OBJECT_WEIGHT self.deep_supervision = cfg.MODEL.DiffusionDet.DEEP_SUPERVISION self.use_focal = cfg.MODEL.DiffusionDet.USE_FOCAL self.use_fed_loss = cfg.MODEL.DiffusionDet.USE_FED_LOSS self.use_nms = cfg.MODEL.DiffusionDet.USE_NMS # Build Criterion. matcher = HungarianMatcherDynamicK( cfg=cfg, cost_class=class_weight, cost_bbox=l1_weight, cost_giou=giou_weight, use_focal=self.use_focal ) weight_dict = {"loss_ce": class_weight, "loss_bbox": l1_weight, "loss_giou": giou_weight} if self.deep_supervision: aux_weight_dict = {} for i in range(self.num_heads - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) losses = ["labels", "boxes"] self.criterion = SetCriterionDynamicK( cfg=cfg, num_classes=self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, use_focal=self.use_focal,) pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1) pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1) self.normalizer = lambda x: (x - pixel_mean) / pixel_std self.to(self.device) def predict_noise_from_start(self, x_t, t, x0): return ( (extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) / extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) ) def model_predictions(self, backbone_feats, images_whwh, x, t, x_self_cond=None, clip_x_start=False): x_boxes = torch.clamp(x, min=-1 * self.scale, max=self.scale) x_boxes = ((x_boxes / self.scale) + 1) / 2 x_boxes = box_cxcywh_to_xyxy(x_boxes) x_boxes = x_boxes * images_whwh[:, None, :] outputs_class, outputs_coord = self.head(backbone_feats, x_boxes, t, None) x_start = outputs_coord[-1] # (batch, num_proposals, 4) predict boxes: absolute coordinates (x1, y1, x2, y2) x_start = x_start / images_whwh[:, None, :] x_start = box_xyxy_to_cxcywh(x_start) x_start = (x_start * 2 - 1.) * self.scale x_start = torch.clamp(x_start, min=-1 * self.scale, max=self.scale) pred_noise = self.predict_noise_from_start(x, t, x_start) return ModelPrediction(pred_noise, x_start), outputs_class, outputs_coord @torch.no_grad() def ddim_sample(self, batched_inputs, backbone_feats, images_whwh, images, clip_denoised=True, do_postprocess=True): batch = images_whwh.shape[0] shape = (batch, self.num_proposals, 4) total_timesteps, sampling_timesteps, eta, objective = self.num_timesteps, self.sampling_timesteps, self.ddim_sampling_eta, self.objective # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps times = torch.linspace(-1, total_timesteps - 1, steps=sampling_timesteps + 1) times = list(reversed(times.int().tolist())) time_pairs = list(zip(times[:-1], times[1:])) # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)] img = torch.randn(shape, device=self.device) ensemble_score, ensemble_label, ensemble_coord = [], [], [] x_start = None for time, time_next in time_pairs: time_cond = torch.full((batch,), time, device=self.device, dtype=torch.long) self_cond = x_start if self.self_condition else None preds, outputs_class, outputs_coord = self.model_predictions(backbone_feats, images_whwh, img, time_cond, self_cond, clip_x_start=clip_denoised) pred_noise, x_start = preds.pred_noise, preds.pred_x_start if self.box_renewal: # filter score_per_image, box_per_image = outputs_class[-1][0], outputs_coord[-1][0] threshold = 0.5 score_per_image = torch.sigmoid(score_per_image) value, _ = torch.max(score_per_image, -1, keepdim=False) keep_idx = value > threshold num_remain = torch.sum(keep_idx) pred_noise = pred_noise[:, keep_idx, :] x_start = x_start[:, keep_idx, :] img = img[:, keep_idx, :] if time_next < 0: img = x_start continue alpha = self.alphas_cumprod[time] alpha_next = self.alphas_cumprod[time_next] sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt() c = (1 - alpha_next - sigma ** 2).sqrt() noise = torch.randn_like(img) img = x_start * alpha_next.sqrt() + \ c * pred_noise + \ sigma * noise if self.box_renewal: # filter # replenish with randn boxes img = torch.cat((img, torch.randn(1, self.num_proposals - num_remain, 4, device=img.device)), dim=1) if self.use_ensemble and self.sampling_timesteps > 1: box_pred_per_image, scores_per_image, labels_per_image = self.inference(outputs_class[-1], outputs_coord[-1], images.image_sizes) ensemble_score.append(scores_per_image) ensemble_label.append(labels_per_image) ensemble_coord.append(box_pred_per_image) if self.use_ensemble and self.sampling_timesteps > 1: box_pred_per_image = torch.cat(ensemble_coord, dim=0) scores_per_image = torch.cat(ensemble_score, dim=0) labels_per_image = torch.cat(ensemble_label, dim=0) if self.use_nms: keep = batched_nms(box_pred_per_image, scores_per_image, labels_per_image, 0.5) box_pred_per_image = box_pred_per_image[keep] scores_per_image = scores_per_image[keep] labels_per_image = labels_per_image[keep] result = Instances(images.image_sizes[0]) result.pred_boxes = Boxes(box_pred_per_image) result.scores = scores_per_image result.pred_classes = labels_per_image results = [result] else: output = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]} box_cls = output["pred_logits"] box_pred = output["pred_boxes"] results = self.inference(box_cls, box_pred, images.image_sizes) if do_postprocess: processed_results = [] for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): height = input_per_image.get("height", image_size[0]) width = input_per_image.get("width", image_size[1]) r = detector_postprocess(results_per_image, height, width) processed_results.append({"instances": r}) return processed_results # forward diffusion def q_sample(self, x_start, t, noise=None): if noise is None: noise = torch.randn_like(x_start) sqrt_alphas_cumprod_t = extract(self.sqrt_alphas_cumprod, t, x_start.shape) sqrt_one_minus_alphas_cumprod_t = extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise def forward(self, batched_inputs, do_postprocess=True): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper` . Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * image: Tensor, image in (C, H, W) format. * instances: Instances Other information that's included in the original dicts, such as: * "height", "width" (int): the output resolution of the model, used in inference. See :meth:`postprocess` for details. """ images, images_whwh = self.preprocess_image(batched_inputs) if isinstance(images, (list, torch.Tensor)):
images = nested_tensor_from_tensor_list(images)
5
2023-11-17 02:37:37+00:00
16k
fg320/DEASC
examples/11C_3x1_farm_dyn_tuning_wso.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by pointing towards an input file.\n (FLORIS interface object).\n\n Args\n ----\n input file:(FLORIS .json input file).\n \"\"\"\n # Read and initialize input file\n self.input_file = input_file\n self.interface = floris_input_handler(self.input_file, path)\n\n # Assign wind farm model proporties\n self.D, self.H_hub, self.n_turbs = floris_properties(self)\n\n def set_aligned_layout(self, n_row, n_col, spac_x, spac_y, coordinates=False):\n \"\"\"\n Modify farm layout in aligned wind turbines with constant spacing,\n differing only from rows to columns. Flow field is also reinitialized.\n\n Args\n ----\n n_row: (float) number of turbine rows\n n_col: (float) number of turbine columns\n spac_x: (float) WT diam normalized turbines distance in x direction\n spac_y: (float) WT diam normalized turbines distance in y direction\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Input type check\n if not all(isinstance(i, int) for i in [n_row, n_col]) or \\\n not all(isinstance(j, (int, float)) for j in [spac_x, spac_y]):\n err_msg = \"Incorrect input value types\"\n raise ValueError(err_msg)\n\n # Calculate new coordinate farm layout\n layout_x = []\n layout_y = []\n for i in range(int(n_row)):\n for j in range(int(n_col)):\n layout_x.append(i * spac_x * self.D)\n layout_y.append(j * spac_y * self.D)\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def set_HR_layout(self, coordinates=False):\n \"\"\"\n Set Horns Rev wind farm layout to wind farm object and\n returns turbines' x and y coordinates if coordinates=True.\n\n Args\n ----\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Vestas V80 2 MW diameter check\n if self.D != 80:\n warning = \"Rotor diameter not from the Vestas V80 2 MW turbine\"\n warnings.warn(warning, UserWarning)\n\n n_rows = 10\n n_cols = 8\n spac_x = 7\n spac_y = 7\n angle = 6\n layout_x = []\n layout_y = []\n for i in range(int(n_rows)):\n for j in range(int(n_cols)):\n layout_x.append((i * spac_x * self.D) -\n (np.sin(np.radians(angle)) * j * spac_y * self.D))\n layout_y.append(j * spac_y * self.D * np.cos(np.radians(angle)))\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def farm_eval(self, yaw=None, ws=None, wd=None, ti=None, shear=None):\n \"\"\"\n Calculate farm flow field for given wind farm layout and input conditions.\n Return main outputs, such as yaw angles, turbines power, farm power, etc.\n\n Args\n ----\n yaw: (list, optional) turbines yaw angles (deg). Default to None.\n ws: (float, optional) input wind speeds (m/s). Default to None.\n wd: (float, optional) input wind directions (deg). Default to None.\n ti: (float, optional) input turbulence intensity. Default to None.\n shear: (float, optional) shear exponent. Default to None.\n\n Returns\n -------\n wf_pow: (float) WF power (MWatts).\n wt_pow: (np.array) WTs power (MWatts).\n wt_ti: (list) WTs turbulence intensity.\n wt_yaw: (np.array) WTs yaw angles (deg).\n \"\"\"\n # Main wind farm calculation\n wf_pow, wt_pow, wt_ti, wt_yaw, _ = floris_farm_eval(self,\n yaw,\n ws,\n wd,\n ti,\n shear)\n\n return (wf_pow, wt_pow, wt_ti, wt_yaw)\n\n def pow_yaw_sweep_1var(self, layout, var_info):\n \"\"\"\n Return wind farm power for a single yaw variable, either a\n single turbine or a single row of turbines. Sweep by row not possible\n for not aligned \"custom\" layouts.\n\n Args\n ----\n layout: (tuple)\n row: (integer) number of farm rows\n cols: (integer) number of farm columns\n or string \"custom\"\n var_info: (tuple)\n var_type: (string) \"T\" for turbine,\n \"R\" for row (not for custom layouts)\n var: (integer) turbine or row number\n var_value: (list of floats) variable values\n\n Returns\n -------\n obj_out: tuple\n obj: (list) objective values\n obj_func: (string) objective function\n var_info: (tuple) see input\n model: (string) model name\n \"\"\"\n # Extract inputs and check inputs\n var_type, var, var_value = var_info\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'R' and layout == \"custom\":\n err_msg = \"Row not allowed for custom layouts\"\n raise ValueError(err_msg)\n if var_type == 'R' and var > rows:\n err_msg = \"Row specified not in farm\"\n raise ValueError(err_msg)\n if var_type == 'T' and var > self.n_turbs:\n err_msg = \"Turbine specified not in farm\"\n raise ValueError(err_msg)\n\n # Calculations\n yaw_angles = np.array(floris_current_yaw(self))\n wf_pow = []\n\n for yaw_change in var_value:\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'T':\n yaw_angles[(var-1)] = yaw_change\n elif var_type == 'R':\n idx_1 = var*cols\n idx_0 = idx_1-cols\n yaw_angles[idx_0:idx_1] = yaw_change\n else:\n err_msg = \"var_type either 'T' or 'R'\"\n raise ValueError(err_msg)\n\n wf_pow_single, _, _, _ = self.farm_eval(yaw=yaw_angles)\n wf_pow.append(wf_pow_single)\n\n obj_out = (wf_pow, 'Farm Power')\n var_info = (var_type, var, var_value)\n print(\"Function exploration complete\")\n\n return obj_out, var_info" }, { "identifier": "WSOpt", "path": "deasc/wake_steering.py", "snippet": "class WSOpt:\n \"\"\"\n Class to perform wake steering optimization with a WfModel object, given an a-priori\n specified wind farm layout and specified atmopheric conditions. Optimization can have\n all/some turbines as variables, or rows for wind farms with equal columns. Optimizers\n available are the local SLSQP, where linear constraints can be added, and the global\n optimizer TuRBO.\n \"\"\"\n\n def __init__(self,\n wf_model,\n inflow,\n variables,\n var_bounds,\n var_initial,\n opt_method=\"SLSQP\",\n opt_options=None,\n obj_function=\"Farm Power\",\n constraints=(None, None, None),\n by_row=(False, None, None),\n tuning_dynamic=False\n ):\n \"\"\"\n Args\n ----\n wf_model: (WfModel)\n WfModel to perform wake steering optimization.\n inflow: (list) Inflow conditions for wake steering optimization.\n yaw_initial: (list) wind farm yaw angles (deg).\n (string) 'random' for random intial wind farm yaw angles.\n wd: (float) input wind directions (deg).\n ws: (float) input wind speeds (m/s).\n ti: (float) input turbulence intensity.\n shear: (float) shear exponent.\n variables: (list)\n List of turbines (or rows) to optimize. Naming convention starts from 1.\n var_bounds: (tuple)\n low_bound: (float) variable (yaw angle) lower bound.\n upp_bound: (float) variable (yaw angle) upper bound.\n var_initial:\n SLSQP: (list) list of initial variable values for each variable.\n (string) 'random' for random initial variable values.\n TURBO_1: (list of lists) list of n_init variable values lists\n (see TURBO_1 options).\n (string) 'LHS' latin hypercube sampling.\n TURBO_M: (string) 'LHS' latin hypercube sampling.\n opt_method: (string, optional) optimization method.\n 'SLSQP', 'TURBO_1 and 'TURBO_M' available.\n Default set to 'SLSQP'.\n opt_options: (dict , optional) optimization method options dictionary.\n Default set to None.\n opt_function: (string , optional) objective function. 'Farm Power' available\n Default set to 'Farm Power'.\n constraints: (tuple) Linear constraints definition. Limited to SLSQP.\n A: (matrix) linear constraint matrix.\n Default set to None.\n low_bound_constr: (float) lower non-normalized contraint bound.\n Default set to None.\n upp_bnd_constr: (float) upper non-normalized contraint bound.\n Default set to None.\n by_row : (tuple, optional) Optimization by row, requires all farm columns to have\n the same amount of rows.\n by_row_bool: (bool) True if optimization variables are wind farm rows,\n False if wind farm turbines. Default set to False.\n rows:: (int) wind farm rows. Default set to None.\n cols:: (int) wind farm columns. Default set to None.\n tuning_dynamic : (bool, optional)\n If True, include dynamic parameter tuning. See tuning_dynamic_initialize\n method. Default to False.\n \"\"\"\n # Opt Methods - Opt Options - Optimizers - Opt Functions\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\", \"TURBO_M\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-6,\n 'eps': 0.01},\n \"TURBO_1\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"},\n \"TURBO_M\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"n_trust_regions\": 2,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.optimizer_dict = {'SLSQP': self._optimizer_scipy,\n 'TURBO_1': self._optimizer_turbo_1,\n 'TURBO_M': self._optimizer_turbo_m}\n self.obj_function_dict = {'Farm Power': self._obj_function_power}\n\n # Optimization methods and optimizer\n self.opt_method = opt_method\n self._opt_method_settler()\n self.optimizer = self.optimizer_dict[self.opt_method]\n\n # Optimizer options\n self.opt_options = opt_options\n self._opt_options_settler()\n\n # Optimization function\n self.obj_function_name = obj_function\n self._obj_function_settler()\n\n # Wind farm conditions\n self.wf_model = wf_model\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.yaw_initial, self.wd, self.ws, self.ti, self.shear = inflow\n if not isinstance(self.yaw_initial, (list, np.ndarray)):\n if self.yaw_initial == 'random':\n self.yaw_initial = self._random_yaw_generator(self.wf_model.n_turbs,\n var_bounds)\n self._yaw_initial_input_handler()\n self.yaw_initial = np.array([float(item) for item in self.yaw_initial])\n\n # Optimization per wind turbine or per wind farm row\n self.by_row_bool = by_row[0]\n if self.by_row_bool:\n self.rows = by_row[1]\n self.cols = by_row[2]\n self._by_row_input_handler()\n\n # Variable bounds\n self.var_bounds = var_bounds\n self.low_bound, self.upp_bound = self.var_bounds\n self.low_bound_norm = norm(self.low_bound, self.low_bound, self.upp_bound)\n self.upp_bound_norm = norm(self.upp_bound, self.low_bound, self.upp_bound)\n self.var_bounds_norm = (self.low_bound_norm, self.upp_bound_norm)\n tmp = [self.var_bounds_norm for i in range(len(variables))]\n self.var_bounds_norm_list = tmp\n tmp = np.array([self.low_bound_norm for i in range(len(variables))])\n self.low_bound_norm_list = tmp\n tmp = np.array([self.upp_bound_norm for i in range(len(variables))])\n self.upp_bound_norm_list = tmp\n\n # Constraints\n self.A = constraints[0]\n self.low_bound_constr = constraints[1]\n self.upp_bound_constr = constraints[2]\n if self.A is not None:\n self._constraints_input_handler()\n self.low_bound_constr_norm = norm(self.low_bound_constr,\n self.low_bound,\n self.upp_bound)\n self.upp_bound_constr_norm = norm(self.upp_bound_constr,\n self.low_bound,\n self.upp_bound)\n\n # Yaw variables\n self.variables = variables\n self.var_initial = var_initial\n self._variables_input_handler()\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.opt_method == 'SLSQP' and self.var_initial == 'random':\n self.var_initial = self._random_yaw_generator(len(self.variables),\n self.var_bounds)\n self._var_initial_input_handler()\n self.var_initial_norm = self._var_initial_norm()\n\n # Dynamic tuning\n self.tuning_dyn_bool = tuning_dynamic\n self._tuning_dyn_bool_check()\n self.tuning_dyn_initialization = False\n\n self.opt_run = False\n\n def tuning_dyn_initialize(self, tuning_dyn_obj_list):\n \"\"\"\n Assign list of tuning dynamic objects TuningDyn to the WSOpt object.\n\n Args\n ----\n tuning_dyn_object: (list of TuningDyn objects)\n \"\"\"\n self.tuning_dyn_obj_list = tuning_dyn_obj_list\n self._tuning_dyn_init_input_handler()\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n tuning_dyn_obj.wso_compatibility_check(self)\n self.tuning_dyn_initialization = True\n\n def optimize_yaw(self):\n \"\"\"\n Optimize the yaw angle for the given WSOpt object.\n\n Returns\n -------\n opt_yaw_angles_vars: (ndarray) optimal yaw angles for the optimization variables.\n opt_yaw_angles_all: (ndarray) optimal yaw angles for all.wind farm turbines.\n \"\"\"\n # Tuning dynamic initialization check\n self._tuning_dyn_initialization_check()\n\n # Print optimization info\n self._print_info()\n\n # Wind farm power - no yaw\n self.wf_pow_noyaw = self._get_farm_power_noyaw()\n\n # Optimize\n self._iter_details_setup()\n self.opt_yaw_angles_vars, self.opt_yaw_angles_all = self.optimizer()\n self.opt_run = True\n\n return (self.opt_yaw_angles_vars, self.opt_yaw_angles_all)\n\n def get_optimization_details(self):\n \"\"\"\n Return optimization details: optimizer iterations details and objective function\n evaluations details. The two are identical for TURBO optimizers as an objective\n function evaluation corresponds to an optimizer iteration, different for SLSQP as\n additional objective function evaluations are required to approximate gradients.\n\n Returns\n -------\n iter_details: (tuple) optimizer iterations details.\n iter_yaw_angles: (list) list of yaw angles per optimizer iteration.\n iter_obj_func: (list) list of objective function per optimizer iteration.\n iter_farm_power: (list) list of farm power values per optimizer iteration.\n eval_details: (tuple) objective fucntion evaluations details.\n eval_yaw_angles: (list) list of yaw angles per evaluation.\n eval_obj_func: (list) list of objective function per evaluation.\n eval_farm_power: (list) list of farm power values per evaluation.\n \"\"\"\n iter_details = (self.iter_yaw_angles,\n self.iter_obj_func,\n self.iter_farm_power)\n eval_details = (self.eval_yaw_angles,\n self.eval_obj_func,\n self.eval_farm_power)\n return (iter_details, eval_details)\n\n # %% Private methods\n\n def _opt_method_settler(self):\n if self.opt_method not in self.opt_method_list:\n err_msg = \"Optimization method not recognized\"\n raise Exception(err_msg)\n\n def _opt_options_settler(self):\n if self.opt_options is None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n\n def _obj_function_settler(self):\n if self.obj_function_name in list(self.obj_function_dict.keys()):\n self.obj_function = self.obj_function_dict[self.obj_function_name]\n else:\n err_msg = \"Optimization function not recognized\"\n raise Exception(err_msg)\n\n def _random_yaw_generator(self, yaw_number, yaw_bounds):\n yaw_angles = []\n for i in range(yaw_number):\n x = random.choice(range(yaw_bounds[0], yaw_bounds[1]+1))\n yaw_angles.append(x)\n return yaw_angles\n\n def _yaw_initial_input_handler(self):\n if len(self.yaw_initial) != self.wf_model.n_turbs:\n err_msg = \"Initial yaw angles do not match turbine number\"\n raise Exception(err_msg)\n\n def _by_row_input_handler(self):\n if self.rows*self.cols != self.wf_model.n_turbs:\n err_msg = \"Farm rows and columns provided do not match turbine number\"\n raise Exception(err_msg)\n\n def _constraints_input_handler(self):\n if self.opt_method != 'SLSQP':\n err_msg = \"Linear constraints (on top of bounds) limited to SLSQP optimizer\"\n raise Exception(err_msg)\n\n def _variables_input_handler(self):\n if self.by_row_bool:\n for row in self.variables:\n if row > self.rows:\n err_msg = \"Row/s specified not in farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.rows:\n err_msg = \"Too many rows specified\"\n raise Exception(err_msg)\n else:\n for turb in self.variables:\n if turb > self.wf_model.n_turbs:\n err_msg = \"Turbine/s specified not in the farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.wf_model.n_turbs:\n err_msg = \"Too many turbines specified\"\n raise Exception(err_msg)\n if 0 in self.variables:\n err_msg = \"Turbine/row counting convention starts from 1\"\n raise Exception(err_msg)\n\n def _var_initial_input_handler(self):\n if self.opt_method == 'TURBO_1':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n pass\n elif self.var_initial == 'random':\n err_msg = \"Random initial variables limited to SLSQP optimizer\"\n raise Exception(err_msg)\n else:\n if len(self.var_initial) != self.opt_options[\"n_init\"]:\n err_msg = \"n_init initial variable lists are needed (see TURBO options)\"\n raise Exception(err_msg)\n elif len(self.var_initial[0]) != len(self.variables):\n err_msg = \"var_initial sublists length not equal number of variables\"\n raise Exception(err_msg)\n elif self.opt_method == 'TURBO_M':\n if self.var_initial != 'LHS':\n err_msg = \"TURBO_M optimizer requires LHS as initial sampling\"\n elif self.opt_method == 'SLSQP':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n err_msg = \"Latin Hypercube Sampling limited to TURBO optimizers\"\n raise Exception(err_msg)\n elif len(self.variables) != len(self.var_initial):\n err_msg = \"var_initial length needs to equal number of variables\"\n raise Exception(err_msg)\n\n def _var_initial_norm(self):\n if self.opt_method == \"SLSQP\":\n self.var_initial = np.array([float(item) for item in self.var_initial])\n var_initial_norm = norm(self.var_initial, self.low_bound, self.upp_bound)\n elif self.var_initial == 'LHS':\n var_initial_norm = None\n else:\n self.var_initial = np.array([np.array(x) for x in self.var_initial])\n var_initial_norm = []\n for x_list in self.var_initial:\n x_list_norm = []\n for x in x_list:\n x_norm = norm(x, self.low_bound, self.upp_bound)\n x_list_norm.append(x_norm)\n var_initial_norm.append(np.array(x_list_norm))\n return np.array(var_initial_norm)\n\n def _get_farm_power_noyaw(self):\n if (self.tuning_dyn_initialization and\n hasattr(self.tuning_dyn_obj_list[0], 'wf_pow_noyaw')):\n wf_pow_noyaw = self.tuning_dyn_obj_list[0].wf_pow_noyaw\n else:\n self.yaw_zero = np.full(shape=self.wf_model.n_turbs, fill_value=0.0)\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n # Tune parameters\n if self.tuning_dyn_initialization:\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, self.yaw_zero)\n\n wf_pow_noyaw = floris_calculate_farm_power(self.wf_model, self.yaw_zero)\n return wf_pow_noyaw\n\n def _print_info(self):\n print(\"=====================================================\")\n print(\"Optimizing wake redirection control...\")\n print(\"Optimization method: %s\" % (self.opt_method))\n print(\"Optimization function: %s \\n\" % (self.obj_function_name))\n if self.by_row_bool:\n print(\"Rows being optimized: \")\n print(self.variables)\n else:\n print(\"Turbines being optimized: \")\n print(self.variables)\n print(\"Number of variables to optimize = \", len(self.variables))\n print(\"=====================================================\")\n\n def _iter_details_setup(self):\n # Details for each obj function evaluation\n self.eval_yaw_angles = [] # deg\n self.eval_obj_func = []\n self.eval_farm_power = [] # MW\n\n # Details for each optimizer iteration\n self.iter_yaw_angles = [] # deg\n self.iter_obj_func = []\n self.iter_farm_power = [] # MW\n\n def _variables_to_farm_yaw(self, yaw_initial, var_values):\n yaw_angles = copy.deepcopy(yaw_initial)\n if self.by_row_bool:\n for i, row_idx in enumerate(self.variables):\n idx_1 = row_idx*self.cols\n idx_0 = idx_1-self.cols\n yaw_angles[idx_0:idx_1] = var_values[i]\n else:\n for i, turb_idx in enumerate(self.variables):\n yaw_angles[turb_idx-1] = var_values[i]\n return yaw_angles.tolist()\n\n # %% Optimizers\n\n def _optimizer_scipy(self):\n # Call back function for iter details\n def callback_func(xk):\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n # Linearly constrained case\n if self.A is not None:\n self.C = LinearConstraint(self.A,\n self.low_bound_constr_norm,\n self.upp_bound_constr_norm)\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n constraints=(self.C,),\n options=self.opt_options)\n # Unconstrained case\n else:\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n options=self.opt_options)\n # Extract optimal yaw angles for variables\n opt_yaw_angles_vars = unnorm(self.residual_plant.x,\n self.low_bound,\n self.upp_bound)\n # Extract optimal yaw angles for the entire farm\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Use best index because if total iterations reached, optimum not last evaluation\n eval_yaw_angles_lists = [x.tolist() for x in self.eval_yaw_angles]\n index_best = eval_yaw_angles_lists.index(opt_yaw_angles_all)\n opt_yaw_angles_all = np.array(opt_yaw_angles_all)\n self.obj_func_opt = self.eval_obj_func[index_best]\n self.farm_power_opt = self.eval_farm_power[index_best]\n\n # Add initial and last points to iteration details\n self.iter_yaw_angles.insert(0, self.eval_yaw_angles[0])\n self.iter_obj_func.insert(0, self.eval_obj_func[0])\n self.iter_farm_power.insert(0, self.eval_farm_power[0])\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_1(self):\n\n # TURBO initial sampling\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n X_init_provided = False\n X_init_same_norm = None\n else:\n X_init_provided = True\n X_init_same_norm = self.var_initial_norm\n\n # TURBO optimization\n turbo_1 = Turbo1(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n X_init_provided=X_init_provided,\n X_init_same=X_init_same_norm,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.obj_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_m(self):\n\n # TURBO optimization\n turbo_m = TurboM(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n )\n turbo_m.optimize()\n X = turbo_m.X # Evaluated points\n fX = turbo_m.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.cost_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n # %% Objective functions\n\n def _obj_function_power(self, var_norm):\n\n # Extract farm yaw angles\n var_unnorm = unnorm(var_norm, self.low_bound, self.upp_bound)\n yaw_angles = self._variables_to_farm_yaw(self.yaw_initial, var_unnorm)\n yaw_angles = np.array([float(item) for item in yaw_angles])\n\n # Tune parameters dynamically\n if self.tuning_dyn_initialization:\n # Set equal yaw angles in groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n yaw_angles = self.tuning_dyn_obj_list[0].set_yaw_groups(yaw_angles)\n # Tune parameters\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, yaw_angles)\n\n # Calculate negative of the farm power normalized by power for zero yaw\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n wf_pow = floris_calculate_farm_power(self.wf_model, yaw_angles)\n obj_function = (-1 * wf_pow / self.wf_pow_noyaw)\n\n # Update evalauation details\n self.eval_yaw_angles.append(yaw_angles)\n self.eval_obj_func.append(obj_function)\n self.eval_farm_power.append(wf_pow)\n\n return obj_function\n\n # %% Tuning Dynamic methods\n\n def _tuning_dyn_bool_check(self):\n if self.tuning_dyn_bool and self.by_row_bool:\n err_msg = \"Dynamic tuning not available for optimization by row.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_init_input_handler(self):\n if isinstance(self.tuning_dyn_obj_list, (list, np.ndarray)) is False:\n err_msg = \"TuningDyn objects need to be in a list, even if only one.\"\n raise Exception(err_msg)\n # Check dynamic grouping tuning objects have the same tuning groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n tuning_groups_first = self.tuning_dyn_obj_list[0].tuning_groups\n same_groups = all(obj.tuning_groups == tuning_groups_first\n for obj in self.tuning_dyn_obj_list)\n if same_groups is False:\n err_msg = \"TuningDyn objects have different groupings.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_initialization_check(self):\n if self.tuning_dyn_bool and self.tuning_dyn_initialization is False:\n err_msg = \"Tuning dynamic not initialized. See tuning_dyn_initialize method.\"\n raise Exception(err_msg)" }, { "identifier": "GPWrap", "path": "deasc/gp.py", "snippet": "class GPWrap:\n \"\"\"\n Wrapper class to create, modify and visualise Gaussian Processes for dynamic parameter\n tuning. Currently limited to a single output GP.\n \"\"\"\n\n def __init__(self, parameter_class, parameter_name, dimensions):\n self.parameter_class = parameter_class\n self.parameter_name = parameter_name\n self.dimensions = dimensions\n \"\"\"\n Args\n ----\n parameter_class: string\n Parameter class of the optimal parameter to fit.\n parameter_name: string\n Name of the optimal parameter to fit.\n dimensions: integer\n Dimensions/inputs/variables of the GP.\n \"\"\"\n\n def GP_so(self, yaw_data, param_data, num_restarts=50, noise=0.05):\n \"\"\"\n Construct and returns a single-output (SO) GP for the given input dataset\n (optimal parameter for a given yaw configuration).\n\n Args\n ----\n yaw_data: list of lists\n list of input yaw configurations for which parameter has been tuned\n param_data: list of lists\n for each yaw configuration in yaw_data, list containing the optimal parameter\n num_restarts: int\n number of random starts of the GP hyperparameter tuning optimization\n noise: float\n noise in output prediction. Default is 0.05\n\n Returns\n -------\n m: GPy single-output Gaussian Process model\n \"\"\"\n # Sample check on argument dimension\n if len(yaw_data[0]) != self.dimensions:\n err_msg = (\"Yaw input and GP dimensions do not match\")\n raise Exception(err_msg)\n if len(param_data[0]) != 1:\n err_msg = (\"Single-output GPs only\")\n raise Exception(err_msg)\n\n # Data structure arguments\n yaw_data_GP = np.array(yaw_data)\n param_data_GP = np.array(param_data)\n\n # GP model\n kernel = GPy.kern.RBF(input_dim=self.dimensions, variance=1., lengthscale=1.)\n self.m = GPy.models.GPRegression(yaw_data_GP,\n param_data_GP,\n kernel,\n noise_var=noise)\n\n # Hyperparameter tuning\n self.m.optimize(optimizer=None, # Default lbfgsb\n start=None,\n messages=False,\n max_iters=1000)\n self.m.optimize_restarts(num_restarts=num_restarts)\n return self.m\n\n def GP_so_plot(self, parameter_range_plot, yaw_range_plot):\n \"\"\"\n Plot a single-output (SO) GP model. 1D and 2D plots are generated for each\n variable combination.\n\n Args\n ----\n parameter_range: tuple\n range of the optimal parameter to plot\n parameter_range: tuple\n range of the yaw variables to plot\n \"\"\"\n # Plotting library choice and defaults values\n GPy.plotting.change_plotting_library('matplotlib')\n GPy.plotting.matplot_dep.defaults.data_2d = {'s': 0,\n 'edgecolors': 'none',\n 'linewidth': 0.0,\n 'cmap': cm.get_cmap('hot'),\n 'alpha': 0.5}\n\n # 1D Plots\n if self.dimensions == 1:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(5, 2.5))\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n fig = self.m.plot(figure=figure,\n col=1,\n row=1,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=True)\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n figsize = (5*n_cuts, 2.5*self.dimensions)\n figure = GPy.plotting.plotting_library().figure(self.dimensions,\n n_cuts,\n figsize=figsize)\n\n for dim_idx in range(self.dimensions):\n for i, slice_single in zip(range(n_cuts), slices):\n title = \"GP %s - $\\gamma_{others}$\" \\\n \"%.1f $^{\\circ}$\" % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (dim_idx+1)\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n inputs = []\n for j in range(self.dimensions):\n if j == dim_idx:\n pass\n else:\n inputs.append((j, slice_single))\n fig = self.m.plot(figure=figure,\n col=(i+1),\n row=(dim_idx+1),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=False)\n\n # 2D Plots\n # Countours are fine ##\n # Data points (training) plotted are off ##\n # double checked with GP and training database ##\n if self.dimensions == 1:\n pass\n elif self.dimensions == 2:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(3, 2.5))\n\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$\\gamma_{2}$ [deg]'\n\n fig = self.m.plot(figure=figure,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n plot_rows = self.dimensions-1\n plot_cols = self.dimensions-1\n combinations = list(itertools.combinations(\n list(range(0, self.dimensions)), 2))\n\n figsize = (3*plot_cols*len(slices), 2.5*plot_rows)\n figure = GPy.plotting.plotting_library().figure(plot_rows,\n plot_cols*len(slices),\n figsize=figsize)\n for i, slice_single in zip(range(n_cuts), slices):\n for comb_idx, comb in enumerate(combinations):\n title = 'GP %s - $\\gamma_{others}$' \\\n '%.1f $^{\\circ}$' % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (comb[0]+1)\n ylabel = '$\\gamma_{%i}$ [deg]' % (comb[1]+1)\n inputs = []\n for j in range(self.dimensions):\n if j in comb:\n pass\n else:\n inputs.append((j, slice_single))\n\n fig = self.m.plot(figure=figure,\n col=(comb[0]+1+plot_cols*i),\n row=(comb[1]),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))" }, { "identifier": "TuningDyn_Turbines", "path": "deasc/tuning_dynamic.py", "snippet": "class TuningDyn_Turbines(TuningDyn, TuningDyn_SharedMethods):\n \"\"\"Class for dynamic parameter tuning of single turbines within a wind farm.\"\"\"\n\n def __init__(self, param_class, param_name, tuning_turbines, GP_model):\n \"\"\"\n Args\n ----\n param_class: (string) tuning parameter class.\n param_name: (string) tuning parameter name.\n tuning_turbines: (list) list of turbines included in the tuning.\n GP_model: (GPy object) GP model with len(tuning_turbines) input dimensions.\n \"\"\"\n super().__init__(param_class, param_name)\n # Tuning info\n self.tuning_variables = tuning_turbines\n self.tuning_dimensions = len(self.tuning_variables)\n self.GP_model = GP_model\n self._GP_dimension_check(self.tuning_dimensions, self.GP_model)\n\n @property\n def tuning_turbines(self):\n \"\"\"List of the tuning turbines in the wind farm.\"\"\"\n return self.tuning_variables\n\n def wso_compatibility_check(self, wso_obj):\n \"\"\"\n Check compatibility with a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object to which dynamic parameter tuning is added.\n \"\"\"\n self._tuning_turbines_check(wso_obj, self.tuning_turbines)\n\n def tune_parameter(self, wso_obj, yaw_angles):\n \"\"\"\n Perform parameter tuning in a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object.\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n wf-model_tuned: (WfModel) tuned WfModel to use in the current iteration of the\n wake steering optimisation.\n \"\"\"\n # Extract WSOpt WfModel dictionary\n wf_model_dict = floris_extract_object_dict(wso_obj.wf_model)\n\n # Create and apply tuned WfModel dictionary\n GP_input = self._get_GP_input_turbines(self.tuning_turbines, yaw_angles)\n mu, var, = self.GP_model.predict_noiseless(np.array([GP_input]))\n optimal_parameter = mu[0][0]\n wf_model_dict_tuned = floris_param_change_object_dict(wf_model_dict,\n self.param_class,\n self.param_name,\n optimal_parameter)\n wf_model_tuned = floris_param_change_object(wso_obj.wf_model,\n wf_model_dict_tuned)\n return wf_model_tuned" }, { "identifier": "floris_extract_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_extract_object_dict(wf_model):\n \"\"\"Extract and return the current FLORIS object dictionary.\"\"\"\n return wf_model.interface.floris.as_dict()" }, { "identifier": "floris_param_change_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object_dict(wf_model_dict, param_class, param_name, param_value):\n \"\"\"\n Change FLORIS object with a new model parameter, return new FLORIS object dictionary.\n FLORIS object is not reinitialised (see function floris_parameter_change_object).\n \"\"\"\n wf_model_dict_new = copy.deepcopy(wf_model_dict)\n models_dict = floris_extract_models_dict(wf_model_dict_new)\n (wf_model_dict_new['wake'][param_class]\n [models_dict[param_class]][param_name]) = param_value\n return wf_model_dict_new" }, { "identifier": "floris_param_change_object", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object(wf_model, wf_model_dict_new):\n \"\"\"Change FLORIS object with new object dictionary. Also reinitialise farm layout.\"\"\"\n x_reinit, y_reinit = wf_model.interface.get_turbine_layout()\n wf_model.interface = FI(wf_model_dict_new)\n wf_model.interface.reinitialize(layout_x=x_reinit, layout_y=y_reinit)\n return wf_model" } ]
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import GPWrap from deasc import TuningDyn_Turbines from deasc.utils_floris import ( floris_extract_object_dict, floris_param_change_object_dict, floris_param_change_object )
11,610
""" This example shows wake steering optimisation on a 3x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of the two most upstream turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(3, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(3), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [1, 2] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # %% Dynamic tuning object # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_3x1_2dim.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append(key[:-1]) # Last turbine yaw angle is fixed, not a GP dimension param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=50, noise=0.05) # Tuning object initialisation tuning_dyn_obj = TuningDyn_Turbines(param_class=parameter_class, param_name=parameter_name, tuning_turbines=[1, 2], GP_model=GP_model) # %% Optimisation with dynamic tuning # Initialise wake steering object
""" This example shows wake steering optimisation on a 3x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of the two most upstream turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(3, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(3), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [1, 2] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # %% Dynamic tuning object # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_3x1_2dim.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append(key[:-1]) # Last turbine yaw angle is fixed, not a GP dimension param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=50, noise=0.05) # Tuning object initialisation tuning_dyn_obj = TuningDyn_Turbines(param_class=parameter_class, param_name=parameter_name, tuning_turbines=[1, 2], GP_model=GP_model) # %% Optimisation with dynamic tuning # Initialise wake steering object
wso_obj_tuning = WSOpt(wf_model=wf_model,
1
2023-11-10 18:13:27+00:00
16k
CPES-Power-and-Energy-Systems/interoperable-recommender-tso
energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/bayesian_optimization.py
[ { "identifier": "GaussianProcess", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class GaussianProcess(BaseEstimator, RegressorMixin):\n \"\"\"The legacy Gaussian Process model class.\n\n .. deprecated:: 0.18\n This class will be removed in 0.20.\n Use the :class:`GaussianProcessRegressor` instead.\n\n Read more in the :ref:`User Guide <gaussian_process>`.\n\n Parameters\n ----------\n regr : string or callable, optional\n A regression function returning an array of outputs of the linear\n regression functional basis. The number of observations n_samples\n should be greater than the size p of this basis.\n Default assumes a simple constant regression trend.\n Available built-in regression models are::\n\n 'constant', 'linear', 'quadratic'\n\n corr : string or callable, optional\n A stationary autocorrelation function returning the autocorrelation\n between two points x and x'.\n Default assumes a squared-exponential autocorrelation model.\n Built-in correlation models are::\n\n 'absolute_exponential', 'squared_exponential',\n 'generalized_exponential', 'cubic', 'linear'\n\n beta0 : double array_like, optional\n The regression weight vector to perform Ordinary Kriging (OK).\n Default assumes Universal Kriging (UK) so that the vector beta of\n regression weights is estimated using the maximum likelihood\n principle.\n\n storage_mode : string, optional\n A string specifying whether the Cholesky decomposition of the\n correlation matrix should be stored in the class (storage_mode =\n 'full') or not (storage_mode = 'light').\n Default assumes storage_mode = 'full', so that the\n Cholesky decomposition of the correlation matrix is stored.\n This might be a useful parameter when one is not interested in the\n MSE and only plan to estimate the BLUP, for which the correlation\n matrix is not required.\n\n verbose : boolean, optional\n A boolean specifying the verbose level.\n Default is verbose = False.\n\n theta0 : double array_like, optional\n An array with shape (n_features, ) or (1, ).\n The parameters in the autocorrelation model.\n If thetaL and thetaU are also specified, theta0 is considered as\n the starting point for the maximum likelihood estimation of the\n best set of parameters.\n Default assumes isotropic autocorrelation model with theta0 = 1e-1.\n\n thetaL : double array_like, optional\n An array with shape matching theta0's.\n Lower bound on the autocorrelation parameters for maximum\n likelihood estimation.\n Default is None, so that it skips maximum likelihood estimation and\n it uses theta0.\n\n thetaU : double array_like, optional\n An array with shape matching theta0's.\n Upper bound on the autocorrelation parameters for maximum\n likelihood estimation.\n Default is None, so that it skips maximum likelihood estimation and\n it uses theta0.\n\n normalize : boolean, optional\n Input X and observations y are centered and reduced wrt\n means and standard deviations estimated from the n_samples\n observations provided.\n Default is normalize = True so that data is normalized to ease\n maximum likelihood estimation.\n\n nugget : double or ndarray, optional\n Introduce a nugget effect to allow smooth predictions from noisy\n data. If nugget is an ndarray, it must be the same length as the\n number of data points used for the fit.\n The nugget is added to the diagonal of the assumed training covariance;\n in this way it acts as a Tikhonov regularization in the problem. In\n the special case of the squared exponential correlation function, the\n nugget mathematically represents the variance of the input values.\n Default assumes a nugget close to machine precision for the sake of\n robustness (nugget = 10. * MACHINE_EPSILON).\n\n optimizer : string, optional\n A string specifying the optimization algorithm to be used.\n Default uses 'fmin_cobyla' algorithm from scipy.optimize.\n Available optimizers are::\n\n 'fmin_cobyla', 'Welch'\n\n 'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.\n It consists in iterating over several one-dimensional optimizations\n instead of running one single multi-dimensional optimization.\n\n random_start : int, optional\n The number of times the Maximum Likelihood Estimation should be\n performed from a random starting point.\n The first MLE always uses the specified starting point (theta0),\n the next starting points are picked at random according to an\n exponential distribution (log-uniform on [thetaL, thetaU]).\n Default does not use random starting point (random_start = 1).\n\n random_state : int, RandomState instance or None, optional (default=None)\n The generator used to shuffle the sequence of coordinates of theta in\n the Welch optimizer. If int, random_state is the seed used by the\n random number generator; If RandomState instance, random_state is the\n random number generator; If None, the random number generator is the\n RandomState instance used by `np.random`.\n\n Attributes\n ----------\n theta_ : array\n Specified theta OR the best set of autocorrelation parameters (the \\\n sought maximizer of the reduced likelihood function).\n\n reduced_likelihood_function_value_ : array\n The optimal reduced likelihood function value.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.gaussian_process import GaussianProcess\n >>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T\n >>> y = (X * np.sin(X)).ravel()\n >>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)\n >>> gp.fit(X, y) # doctest: +ELLIPSIS\n GaussianProcess(beta0=None...\n ...\n\n Notes\n -----\n The presentation implementation is based on a translation of the DACE\n Matlab toolbox, see reference [NLNS2002]_.\n\n References\n ----------\n\n .. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.\n Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)\n http://imedea.uib-csic.es/master/cambioglobal/Modulo_V_cod101615/Lab/lab_maps/krigging/DACE-krigingsoft/dace/dace.pdf\n\n .. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,\n and M.D. Morris (1992). Screening, predicting, and computer\n experiments. Technometrics, 34(1) 15--25.`\n http://www.jstor.org/stable/1269548\n \"\"\"\n\n _regression_types = {\n 'constant': regression.constant,\n 'linear': regression.linear,\n 'quadratic': regression.quadratic}\n\n _correlation_types = {\n 'absolute_exponential': correlation.absolute_exponential,\n 'squared_exponential': correlation.squared_exponential,\n 'generalized_exponential': correlation.generalized_exponential,\n 'cubic': correlation.cubic,\n 'linear': correlation.linear}\n\n _optimizer_types = [\n 'fmin_cobyla',\n 'Welch']\n\n def __init__(self, regr='constant', corr='squared_exponential', beta0=None,\n storage_mode='full', verbose=False, theta0=1e-1,\n thetaL=None, thetaU=None, optimizer='fmin_cobyla',\n random_start=1, normalize=True,\n nugget=10. * MACHINE_EPSILON, random_state=None):\n\n self.regr = regr\n self.corr = corr\n self.beta0 = beta0\n self.storage_mode = storage_mode\n self.verbose = verbose\n self.theta0 = theta0\n self.thetaL = thetaL\n self.thetaU = thetaU\n self.normalize = normalize\n self.nugget = nugget\n self.optimizer = optimizer\n self.random_start = random_start\n self.random_state = random_state\n\n def fit(self, X, y):\n \"\"\"\n The Gaussian Process model fitting method.\n\n Parameters\n ----------\n X : double array_like\n An array with shape (n_samples, n_features) with the input at which\n observations were made.\n\n y : double array_like\n An array with shape (n_samples, ) or shape (n_samples, n_targets)\n with the observations of the output to be predicted.\n\n Returns\n -------\n gp : self\n A fitted Gaussian Process model object awaiting data to perform\n predictions.\n \"\"\"\n # Run input checks\n self._check_params()\n\n self.random_state = check_random_state(self.random_state)\n\n # Force data to 2D numpy.array\n X, y = check_X_y(X, y, multi_output=True, y_numeric=True)\n self.y_ndim_ = y.ndim\n if y.ndim == 1:\n y = y[:, np.newaxis]\n\n # Check shapes of DOE & observations\n n_samples, n_features = X.shape\n _, n_targets = y.shape\n\n # Run input checks\n self._check_params(n_samples)\n\n # Normalize data or don't\n if self.normalize:\n X_mean = np.mean(X, axis=0)\n X_std = np.std(X, axis=0)\n y_mean = np.mean(y, axis=0)\n y_std = np.std(y, axis=0)\n X_std[X_std == 0.] = 1.\n y_std[y_std == 0.] = 1.\n # center and scale X if necessary\n X = (X - X_mean) / X_std\n y = (y - y_mean) / y_std\n else:\n X_mean = np.zeros(1)\n X_std = np.ones(1)\n y_mean = np.zeros(1)\n y_std = np.ones(1)\n\n # Calculate matrix of distances D between samples\n D, ij = l1_cross_distances(X)\n if (np.min(np.sum(D, axis=1)) == 0. and self.corr != correlation.pure_nugget): # noqa\n raise Exception(\"Multiple input features cannot have the same\"\n \" target value.\")\n\n # Regression matrix and parameters\n F = self.regr(X)\n n_samples_F = F.shape[0]\n if F.ndim > 1:\n p = F.shape[1]\n else:\n p = 1\n if n_samples_F != n_samples:\n raise Exception(\"Number of rows in F and X do not match. Most \"\n \"likely something is going wrong with the \"\n \"regression model.\")\n if p > n_samples_F:\n raise Exception((\"Ordinary least squares problem is undetermined \"\n \"n_samples=%d must be greater than the \"\n \"regression model size p=%d.\") % (n_samples, p))\n if self.beta0 is not None:\n if self.beta0.shape[0] != p:\n raise Exception(\"Shapes of beta0 and F do not match.\")\n\n # Set attributes\n self.X = X\n self.y = y\n self.D = D\n self.ij = ij\n self.F = F\n self.X_mean, self.X_std = X_mean, X_std\n self.y_mean, self.y_std = y_mean, y_std\n\n # Determine Gaussian Process model parameters\n if self.thetaL is not None and self.thetaU is not None:\n # Maximum Likelihood Estimation of the parameters\n if self.verbose:\n print(\"Performing Maximum Likelihood Estimation of the \"\n \"autocorrelation parameters...\")\n self.theta_, self.reduced_likelihood_function_value_, par = \\\n self._arg_max_reduced_likelihood_function()\n if np.isinf(self.reduced_likelihood_function_value_):\n raise Exception(\"Bad parameter region. \"\n \"Try increasing upper bound\")\n\n else:\n # Given parameters\n if self.verbose:\n print(\"Given autocorrelation parameters. \"\n \"Computing Gaussian Process model parameters...\")\n self.theta_ = self.theta0\n self.reduced_likelihood_function_value_, par = \\\n self.reduced_likelihood_function()\n if np.isinf(self.reduced_likelihood_function_value_):\n raise Exception(\"Bad point. Try increasing theta0.\")\n\n self.beta = par['beta']\n self.gamma = par['gamma']\n self.sigma2 = par['sigma2']\n self.C = par['C']\n self.Ft = par['Ft']\n self.G = par['G']\n\n if self.storage_mode == 'light':\n # Delete heavy data (it will be computed again if required)\n # (it is required only when MSE is wanted in self.predict)\n if self.verbose:\n print(\"Light storage mode specified. \"\n \"Flushing autocorrelation matrix...\")\n self.D = None\n self.ij = None\n self.F = None\n self.C = None\n self.Ft = None\n self.G = None\n\n return self\n\n def predict(self, X, eval_MSE=False, batch_size=None):\n \"\"\"\n This function evaluates the Gaussian Process model at x.\n\n Parameters\n ----------\n X : array_like\n An array with shape (n_eval, n_features) giving the point(s) at\n which the prediction(s) should be made.\n\n eval_MSE : boolean, optional\n A boolean specifying whether the Mean Squared Error should be\n evaluated or not.\n Default assumes evalMSE = False and evaluates only the BLUP (mean\n prediction).\n\n batch_size : integer, optional\n An integer giving the maximum number of points that can be\n evaluated simultaneously (depending on the available memory).\n Default is None so that all given points are evaluated at the same\n time.\n\n Returns\n -------\n y : array_like, shape (n_samples, ) or (n_samples, n_targets)\n An array with shape (n_eval, ) if the Gaussian Process was trained\n on an array of shape (n_samples, ) or an array with shape\n (n_eval, n_targets) if the Gaussian Process was trained on an array\n of shape (n_samples, n_targets) with the Best Linear Unbiased\n Prediction at x.\n\n MSE : array_like, optional (if eval_MSE == True)\n An array with shape (n_eval, ) or (n_eval, n_targets) as with y,\n with the Mean Squared Error at x.\n \"\"\"\n check_is_fitted(self, \"X\")\n\n # Check input shapes\n X = check_array(X)\n n_eval, _ = X.shape\n n_samples, n_features = self.X.shape\n n_samples_y, n_targets = self.y.shape\n\n # Run input checks\n self._check_params(n_samples)\n\n if X.shape[1] != n_features:\n raise ValueError((\"The number of features in X (X.shape[1] = %d) \"\n \"should match the number of features used \"\n \"for fit() \"\n \"which is %d.\") % (X.shape[1], n_features))\n\n if batch_size is None:\n # No memory management\n # (evaluates all given points in a single batch run)\n\n # Normalize input\n X = (X - self.X_mean) / self.X_std\n\n # Initialize output\n y = np.zeros(n_eval)\n if eval_MSE:\n MSE = np.zeros(n_eval)\n\n # Get pairwise componentwise L1-distances to the input training set\n dx = manhattan_distances(X, Y=self.X, sum_over_features=False)\n # Get regression function and correlation\n f = self.regr(X)\n r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)\n\n # Scaled predictor\n y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)\n\n # Predictor\n y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)\n\n if self.y_ndim_ == 1:\n y = y.ravel()\n\n # Mean Squared Error\n if eval_MSE:\n C = self.C\n if C is None:\n # Light storage mode (need to recompute C, F, Ft and G)\n if self.verbose:\n print(\"This GaussianProcess used 'light' storage mode \"\n \"at instantiation. Need to recompute \"\n \"autocorrelation matrix...\")\n reduced_likelihood_function_value, par = \\\n self.reduced_likelihood_function()\n self.C = par['C']\n self.Ft = par['Ft']\n self.G = par['G']\n\n rt = linalg.solve_triangular(self.C, r.T, lower=True)\n\n if self.beta0 is None:\n # Universal Kriging\n u = linalg.solve_triangular(self.G.T,\n np.dot(self.Ft.T, rt) - f.T,\n lower=True)\n else:\n # Ordinary Kriging\n u = np.zeros((n_targets, n_eval))\n\n MSE = np.dot(self.sigma2.reshape(n_targets, 1),\n (1. - (rt ** 2.).sum(axis=0)\n + (u ** 2.).sum(axis=0))[np.newaxis, :])\n MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)\n\n # Mean Squared Error might be slightly negative depending on\n # machine precision: force to zero!\n MSE[MSE < 0.] = 0.\n\n if self.y_ndim_ == 1:\n MSE = MSE.ravel()\n\n return y, MSE\n\n else:\n\n return y\n\n else:\n # Memory management\n\n if type(batch_size) is not int or batch_size <= 0:\n raise Exception(\"batch_size must be a positive integer\")\n\n if eval_MSE:\n\n y, MSE = np.zeros(n_eval), np.zeros(n_eval)\n for k in range(max(1, int(n_eval / batch_size))):\n batch_from = k * batch_size\n batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])\n y[batch_from:batch_to], MSE[batch_from:batch_to] = \\\n self.predict(X[batch_from:batch_to],\n eval_MSE=eval_MSE, batch_size=None)\n\n return y, MSE\n\n else:\n\n y = np.zeros(n_eval)\n for k in range(max(1, int(n_eval / batch_size))):\n batch_from = k * batch_size\n batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])\n y[batch_from:batch_to] = \\\n self.predict(X[batch_from:batch_to],\n eval_MSE=eval_MSE, batch_size=None)\n\n return y\n\n def reduced_likelihood_function(self, theta=None):\n \"\"\"\n This function determines the BLUP parameters and evaluates the reduced\n likelihood function for the given autocorrelation parameters theta.\n\n Maximizing this function wrt the autocorrelation parameters theta is\n equivalent to maximizing the likelihood of the assumed joint Gaussian\n distribution of the observations y evaluated onto the design of\n experiments X.\n\n Parameters\n ----------\n theta : array_like, optional\n An array containing the autocorrelation parameters at which the\n Gaussian Process model parameters should be determined.\n Default uses the built-in autocorrelation parameters\n (ie ``theta = self.theta_``).\n\n Returns\n -------\n reduced_likelihood_function_value : double\n The value of the reduced likelihood function associated to the\n given autocorrelation parameters theta.\n\n par : dict\n A dictionary containing the requested Gaussian Process model\n parameters:\n\n - ``sigma2`` is the Gaussian Process variance.\n - ``beta`` is the generalized least-squares regression weights for\n Universal Kriging or given beta0 for Ordinary Kriging.\n - ``gamma`` is the Gaussian Process weights.\n - ``C`` is the Cholesky decomposition of the correlation\n matrix [R].\n - ``Ft`` is the solution of the linear equation system\n [R] x Ft = F\n - ``G`` is the QR decomposition of the matrix Ft.\n \"\"\"\n check_is_fitted(self, \"X\")\n\n if theta is None:\n # Use built-in autocorrelation parameters\n theta = self.theta_\n\n # Initialize output\n reduced_likelihood_function_value = - np.inf\n par = {}\n\n # Retrieve data\n n_samples = self.X.shape[0]\n D = self.D\n ij = self.ij\n F = self.F\n\n if D is None:\n # Light storage mode (need to recompute D, ij and F)\n D, ij = l1_cross_distances(self.X)\n if (np.min(np.sum(D, axis=1)) == 0.\n and self.corr != correlation.pure_nugget):\n raise Exception(\"Multiple X are not allowed\")\n F = self.regr(self.X)\n\n # Set up R\n r = self.corr(theta, D)\n R = np.eye(n_samples) * (1. + self.nugget)\n R[ij[:, 0], ij[:, 1]] = r\n R[ij[:, 1], ij[:, 0]] = r\n\n # Cholesky decomposition of R\n try:\n C = linalg.cholesky(R, lower=True)\n except linalg.LinAlgError:\n return reduced_likelihood_function_value, par\n\n # Get generalized least squares solution\n Ft = linalg.solve_triangular(C, F, lower=True)\n Q, G = linalg.qr(Ft, mode='economic')\n\n sv = linalg.svd(G, compute_uv=False)\n rcondG = sv[-1] / sv[0]\n if rcondG < 1e-10:\n # Check F\n sv = linalg.svd(F, compute_uv=False)\n condF = sv[0] / sv[-1]\n if condF > 1e15:\n raise Exception(\"F is too ill conditioned. Poor combination \"\n \"of regression model and observations.\")\n else:\n # Ft is too ill conditioned, get out (try different theta)\n return reduced_likelihood_function_value, par\n\n Yt = linalg.solve_triangular(C, self.y, lower=True)\n if self.beta0 is None:\n # Universal Kriging\n beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))\n else:\n # Ordinary Kriging\n beta = np.array(self.beta0)\n\n rho = Yt - np.dot(Ft, beta)\n sigma2 = (rho ** 2.).sum(axis=0) / n_samples\n # The determinant of R is equal to the squared product of the diagonal\n # elements of its Cholesky decomposition C\n detR = (np.diag(C) ** (2. / n_samples)).prod()\n\n # Compute/Organize output\n reduced_likelihood_function_value = - sigma2.sum() * detR\n par['sigma2'] = sigma2 * self.y_std ** 2.\n par['beta'] = beta\n par['gamma'] = linalg.solve_triangular(C.T, rho)\n par['C'] = C\n par['Ft'] = Ft\n par['G'] = G\n\n return reduced_likelihood_function_value, par\n\n def _arg_max_reduced_likelihood_function(self):\n \"\"\"\n This function estimates the autocorrelation parameters theta as the\n maximizer of the reduced likelihood function.\n (Minimization of the opposite reduced likelihood function is used for\n convenience)\n\n Parameters\n ----------\n self : All parameters are stored in the Gaussian Process model object.\n\n Returns\n -------\n optimal_theta : array_like\n The best set of autocorrelation parameters (the sought maximizer of\n the reduced likelihood function).\n\n optimal_reduced_likelihood_function_value : double\n The optimal reduced likelihood function value.\n\n optimal_par : dict\n The BLUP parameters associated to thetaOpt.\n \"\"\"\n\n # Initialize output\n best_optimal_theta = []\n best_optimal_rlf_value = []\n best_optimal_par = []\n\n if self.verbose:\n print(\"The chosen optimizer is: \" + str(self.optimizer))\n if self.random_start > 1:\n print(str(self.random_start) + \" random starts are required.\")\n\n percent_completed = 0.\n\n # Force optimizer to fmin_cobyla if the model is meant to be isotropic\n if self.optimizer == 'Welch' and self.theta0.size == 1:\n self.optimizer = 'fmin_cobyla'\n\n if self.optimizer == 'fmin_cobyla':\n\n def minus_reduced_likelihood_function(log10t):\n return - self.reduced_likelihood_function(\n theta=10. ** log10t)[0]\n\n constraints = []\n for i in range(self.theta0.size):\n constraints.append(lambda log10t, i=i:\n log10t[i] - np.log10(self.thetaL[0, i]))\n constraints.append(lambda log10t, i=i:\n np.log10(self.thetaU[0, i]) - log10t[i])\n\n for k in range(self.random_start):\n\n if k == 0:\n # Use specified starting point as first guess\n theta0 = self.theta0\n else:\n # Generate a random starting point log10-uniformly\n # distributed between bounds\n log10theta0 = (np.log10(self.thetaL)\n + self.random_state.rand(*self.theta0.shape)\n * np.log10(self.thetaU / self.thetaL))\n theta0 = 10. ** log10theta0\n\n # Run Cobyla\n try:\n log10_optimal_theta = \\\n optimize.fmin_cobyla(minus_reduced_likelihood_function,\n np.log10(theta0).ravel(),\n constraints)\n except ValueError as ve:\n print(\"Optimization failed. Try increasing the ``nugget``\")\n raise ve\n\n optimal_theta = 10. ** log10_optimal_theta\n optimal_rlf_value, optimal_par = \\\n self.reduced_likelihood_function(theta=optimal_theta)\n\n # Compare the new optimizer to the best previous one\n if k > 0:\n if optimal_rlf_value > best_optimal_rlf_value:\n best_optimal_rlf_value = optimal_rlf_value\n best_optimal_par = optimal_par\n best_optimal_theta = optimal_theta\n else:\n best_optimal_rlf_value = optimal_rlf_value\n best_optimal_par = optimal_par\n best_optimal_theta = optimal_theta\n if self.verbose and self.random_start > 1:\n if (20 * k) / self.random_start > percent_completed:\n percent_completed = (20 * k) / self.random_start\n print(\"%s completed\" % (5 * percent_completed))\n\n optimal_rlf_value = best_optimal_rlf_value\n optimal_par = best_optimal_par\n optimal_theta = best_optimal_theta\n\n elif self.optimizer == 'Welch':\n\n # Backup of the given attributes\n theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU\n corr = self.corr\n verbose = self.verbose\n\n # This will iterate over fmin_cobyla optimizer\n self.optimizer = 'fmin_cobyla'\n self.verbose = False\n\n # Initialize under isotropy assumption\n if verbose:\n print(\"Initialize under isotropy assumption...\")\n self.theta0 = check_array(self.theta0.min())\n self.thetaL = check_array(self.thetaL.min())\n self.thetaU = check_array(self.thetaU.max())\n theta_iso, optimal_rlf_value_iso, par_iso = \\\n self._arg_max_reduced_likelihood_function()\n optimal_theta = theta_iso + np.zeros(theta0.shape)\n\n # Iterate over all dimensions of theta allowing for anisotropy\n if verbose:\n print(\"Now improving allowing for anisotropy...\")\n for i in self.random_state.permutation(theta0.size):\n if verbose:\n print(\"Proceeding along dimension %d...\" % (i + 1))\n self.theta0 = check_array(theta_iso)\n self.thetaL = check_array(thetaL[0, i])\n self.thetaU = check_array(thetaU[0, i])\n\n def corr_cut(t, d):\n return corr(check_array(np.hstack(\n [\n optimal_theta[0][0:i], t[0],\n optimal_theta[0][(i + 1)::]\n ]\n )), d)\n\n self.corr = corr_cut\n optimal_theta[0, i], optimal_rlf_value, optimal_par = \\\n self._arg_max_reduced_likelihood_function()\n\n # Restore the given attributes\n self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU\n self.corr = corr\n self.optimizer = 'Welch'\n self.verbose = verbose\n\n else:\n\n raise NotImplementedError(\"This optimizer ('%s') is not \"\n \"implemented yet. Please contribute!\"\n % self.optimizer)\n\n return optimal_theta, optimal_rlf_value, optimal_par\n\n def _check_params(self, n_samples=None):\n\n # Check regression model\n if not callable(self.regr):\n if self.regr in self._regression_types:\n self.regr = self._regression_types[self.regr]\n else:\n raise ValueError(\"regr should be one of %s or callable, \"\n \"%s was given.\"\n % (self._regression_types.keys(), self.regr))\n\n # Check regression weights if given (Ordinary Kriging)\n if self.beta0 is not None:\n self.beta0 = np.atleast_2d(self.beta0)\n if self.beta0.shape[1] != 1:\n # Force to column vector\n self.beta0 = self.beta0.T\n\n # Check correlation model\n if not callable(self.corr):\n if self.corr in self._correlation_types:\n self.corr = self._correlation_types[self.corr]\n else:\n raise ValueError(\"corr should be one of %s or callable, \"\n \"%s was given.\"\n % (self._correlation_types.keys(), self.corr))\n\n # Check storage mode\n if self.storage_mode != 'full' and self.storage_mode != 'light':\n raise ValueError(\"Storage mode should either be 'full' or \"\n \"'light', %s was given.\" % self.storage_mode)\n\n # Check correlation parameters\n self.theta0 = np.atleast_2d(self.theta0)\n lth = self.theta0.size\n\n if self.thetaL is not None and self.thetaU is not None:\n self.thetaL = np.atleast_2d(self.thetaL)\n self.thetaU = np.atleast_2d(self.thetaU)\n if self.thetaL.size != lth or self.thetaU.size != lth:\n raise ValueError(\"theta0, thetaL and thetaU must have the \"\n \"same length.\")\n if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):\n raise ValueError(\"The bounds must satisfy O < thetaL <= \"\n \"thetaU.\")\n\n elif self.thetaL is None and self.thetaU is None:\n if np.any(self.theta0 <= 0):\n raise ValueError(\"theta0 must be strictly positive.\")\n\n elif self.thetaL is None or self.thetaU is None:\n raise ValueError(\"thetaL and thetaU should either be both or \"\n \"neither specified.\")\n\n # Force verbose type to bool\n self.verbose = bool(self.verbose)\n\n # Force normalize type to bool\n self.normalize = bool(self.normalize)\n\n # Check nugget value\n self.nugget = np.asarray(self.nugget)\n if np.any(self.nugget) < 0.:\n raise ValueError(\"nugget must be positive or zero.\")\n if (n_samples is not None\n and self.nugget.shape not in [(), (n_samples,)]):\n raise ValueError(\"nugget must be either a scalar \"\n \"or array of length n_samples.\")\n\n # Check optimizer\n if self.optimizer not in self._optimizer_types:\n raise ValueError(\"optimizer should be one of %s\"\n % self._optimizer_types)\n\n # Force random_start type to int\n self.random_start = int(self.random_start)" }, { "identifier": "UtilityFunction", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class UtilityFunction(object):\n \"\"\"\n An object to compute the acquisition functions.\n \"\"\"\n\n def __init__(self, kind, kappa, xi):\n \"\"\"\n If UCB is to be used, a constant kappa is needed.\n \"\"\"\n self.kappa = kappa\n self.xi = xi\n if kind not in ['ucb', 'ei', 'poi']:\n err = \"The utility function \" \\\n \"{} has not been implemented, \" \\\n \"please choose one of ucb, ei, or poi.\".format(kind)\n raise NotImplementedError(err)\n else:\n self.kind = kind\n\n def utility(self, x, gp, y_max):\n if self.kind == 'ucb':\n return self._ucb(x, gp, self.kappa)\n if self.kind == 'ei':\n return self._ei(x, gp, y_max, self.xi)\n if self.kind == 'poi':\n return self._poi(x, gp, y_max, self.xi)\n\n @staticmethod\n def _ucb(x, gp, kappa):\n mean, var = gp.predict(x, eval_MSE=True)\n return mean + kappa * np.sqrt(var)\n\n @staticmethod\n def _ei(x, gp, y_max, xi):\n mean, var = gp.predict(x, eval_MSE=True)\n\n # Avoid points with zero variance\n var = np.maximum(var, 1e-9 + 0 * var)\n\n z = (mean - y_max - xi) / np.sqrt(var)\n return (mean - y_max - xi) * norm.cdf(z) + np.sqrt(var) * norm.pdf(z)\n\n @staticmethod\n def _poi(x, gp, y_max, xi):\n mean, var = gp.predict(x, eval_MSE=True)\n\n # Avoid points with zero variance\n var = np.maximum(var, 1e-9 + 0 * var)\n\n z = (mean - y_max - xi) / np.sqrt(var)\n return norm.cdf(z)" }, { "identifier": "unique_rows", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "def unique_rows(a):\n \"\"\"\n A functions to trim repeated rows that may appear when optimizing.\n This is necessary to avoid the sklearn GP object from breaking\n\n :param a: array to trim repeated rows from\n\n :return: mask of unique rows\n \"\"\"\n\n # Sort array and kep track of where things should go back to\n order = np.lexsort(a.T)\n reorder = np.argsort(order)\n\n a = a[order]\n diff = np.diff(a, axis=0)\n ui = np.ones(len(a), 'bool')\n ui[1:] = (diff != 0).any(axis=1)\n\n return ui[reorder]" }, { "identifier": "PrintLog", "path": "energy_app/packages/forecast-api/forecast_api/models/optimization/opt_algorithms/bayesian_opt/helpers.py", "snippet": "class PrintLog(object):\n\n def __init__(self, params):\n\n self.ymax = None\n self.xmax = None\n self.params = params\n self.ite = 1\n\n self.start_time = datetime.now()\n self.last_round = datetime.now()\n\n # sizes of parameters name and all\n self.sizes = [max(len(ps), 7) for ps in params]\n\n # Sorted indexes to access parameters\n self.sorti = sorted(range(len(self.params)),\n key=self.params.__getitem__)\n\n def reset_timer(self):\n self.start_time = datetime.now()\n self.last_round = datetime.now()\n\n def print_header(self, initialization=True):\n\n if initialization:\n print(\"{}Initialization{}\".format(BColours.RED,\n BColours.ENDC))\n else:\n print(\"{}Bayesian Optimization{}\".format(BColours.RED,\n BColours.ENDC))\n\n print(BColours.BLUE + \"-\" * (29 + sum([s + 5 for s in self.sizes]))\n + BColours.ENDC)\n\n print(\"{0:>{1}}\".format(\"Step\", 5), end=\" | \")\n print(\"{0:>{1}}\".format(\"Time\", 6), end=\" | \")\n print(\"{0:>{1}}\".format(\"Value\", 10), end=\" | \")\n\n for index in self.sorti:\n print(\"{0:>{1}}\".format(self.params[index],\n self.sizes[index] + 2),\n end=\" | \")\n print('')\n\n def print_step(self, x, y, warning=False):\n\n print(\"{:>5d}\".format(self.ite), end=\" | \")\n\n m, s = divmod((datetime.now() - self.last_round).total_seconds(), 60)\n print(\"{:>02d}m{:>02d}s\".format(int(m), int(s)), end=\" | \")\n\n if self.ymax is None or self.ymax < y:\n self.ymax = y\n self.xmax = x\n print(\"{0}{2: >10.5f}{1}\".format(BColours.MAGENTA,\n BColours.ENDC,\n y),\n end=\" | \")\n\n for index in self.sorti:\n print(\"{0}{2: >{3}.{4}f}{1}\".format(BColours.GREEN,\n BColours.ENDC,\n x[index],\n self.sizes[index] + 2,\n min(self.sizes[index] - 3,\n 6 - 2)),\n end=\" | \")\n else:\n print(\"{: >10.5f}\".format(y), end=\" | \")\n for index in self.sorti:\n print(\"{0: >{1}.{2}f}\".format(x[index],\n self.sizes[index] + 2,\n min(self.sizes[index] - 3,\n 6 - 2)),\n end=\" | \")\n\n if warning:\n print(\"{}Warning: Test point chose at \"\n \"random due to repeated sample.{}\".format(BColours.RED,\n BColours.ENDC))\n\n print()\n\n self.last_round = datetime.now()\n self.ite += 1\n\n def print_summary(self):\n pass" } ]
import numpy as np from .helpers import GaussianProcess from scipy.optimize import minimize from .helpers import UtilityFunction, unique_rows, PrintLog
11,581
# Updates the flag self.initialized = True def explore(self, points_dict): """ Method to explore user defined points :param points_dict: :return: """ # Consistency check param_tup_lens = [] for key in self.keys: param_tup_lens.append(len(list(points_dict[key]))) if all([e == param_tup_lens[0] for e in param_tup_lens]): pass else: raise ValueError('The same number of initialization points ' 'must be entered for every parameter.') # Turn into list of lists all_points = [] for key in self.keys: all_points.append(points_dict[key]) # Take transpose of list self.init_points = list(map(list, zip(*all_points))) def initialize(self, points_dict): """ Method to introduce point for which the target function value is known :param points_dict: :return: """ for target in points_dict: self.y_init.append(target) all_points = [] for key in self.keys: all_points.append(points_dict[target][key]) self.x_init.append(all_points) def set_bounds(self, new_bounds): """ A method that allows changing the lower and upper searching bounds :param new_bounds: A dictionary with the parameter name and its new bounds """ # Update the internal object stored dict self.pbounds.update(new_bounds) # Loop through the all bounds and reset the min-max bound matrix for row, key in enumerate(self.pbounds.keys()): # Reset all entries, even if the same. self.bounds[row] = self.pbounds[key] def maximize(self, init_points=5, n_iter=25, acq='ei', kappa=2.576, xi=0.0, **gp_params): """ Main optimization method. Parameters ---------- :param init_points: Number of randomly chosen points to sample the target function before fitting the gp. :param n_iter: Total number of times the process is to repeated. Note that currently this methods does not have stopping criteria (due to a number of reasons), therefore the total number of points to be sampled must be specified. :param acq: Acquisition function to be used, defaults to Expected Improvement. :param gp_params: Parameters to be passed to the Scikit-learn Gaussian Process object Returns ------- :return: Nothing """ # Reset timer self.plog.reset_timer() # Set acquisition function self.util = UtilityFunction(kind=acq, kappa=kappa, xi=xi) # Initialize x, y and find current y_max if not self.initialized: if self.verbose: self.plog.print_header() self.init(init_points) y_max = self.Y.max() # Set parameters if any was passed self.gp.set_params(**gp_params) # Find unique rows of X to avoid GP from breaking
""" BAYESIAN OPTIMIZATION MODULE - Version 0.1.0 Created by Fernando Nogueira (fmfn). Available in - https://github.com/fmfn/BayesianOptimization """ __author__ = 'fmfn' def acq_max(ac, gp, y_max, bounds): """ A function to find the maximum of the acquisition function using the 'L-BFGS-B' method. Parameters ---------- :param ac: The acquisition function object that return its point-wise value. :param gp: A gaussian process fitted to the relevant data. :param y_max: The current maximum known value of the target function. :param bounds: The variables bounds to limit the search of the acq max. Returns ------- :return: x_max, The arg max of the acquisition function. """ # Start with the lower bound as the argmax x_max = bounds[:, 0] max_acq = None x_tries = np.random.uniform(bounds[:, 0], bounds[:, 1], size=(100, bounds.shape[0])) for x_try in x_tries: # Find the minimum of minus the acquisition function res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max), x_try.reshape(1, -1), bounds=bounds, method="L-BFGS-B") # Store it if better than previous minimum(maximum). if max_acq is None or -res.fun >= max_acq: x_max = res.x max_acq = -res.fun # Clip output to make sure it lies within the bounds. Due to floating # point technicalities this is not always the case. return np.clip(x_max, bounds[:, 0], bounds[:, 1]) def matern52(theta, d): """ Matern 5/2 correlation model.:: theta, d --> r(theta, d) = (1+sqrt(5)*r + 5/3*r^2)*exp(-sqrt(5)*r) n where r = sqrt(sum (d_i)^2 / (theta_i)^2 ) i = 1 Parameters ---------- theta : array_like An array with shape 1 (isotropic) or n (anisotropic) giving the autocorrelation parameter(s). d : array_like An array with shape (n_eval, n_features) giving the componentwise distances between locations x and x' at which the correlation model should be evaluated. Returns ------- r : array_like An array with shape (n_eval, ) containing the values of the autocorrelation modle. """ theta = np.asarray(theta, dtype=np.float) d = np.asarray(d, dtype=np.float) if d.ndim > 1: n_features = d.shape[1] else: n_features = 1 if theta.size == 1: r = np.sqrt(np.sum(d ** 2, axis=1)) / theta[0] elif theta.size != n_features: raise ValueError("Length of theta must be 1 or %s" % n_features) else: r = np.sqrt(np.sum(d ** 2 / theta.reshape(1, n_features) ** 2, axis=1)) return (1 + np.sqrt(5) * r + 5 / 3. * r ** 2) * np.exp(-np.sqrt(5) * r) class BayesianOptimization(object): def __init__(self, f, pbounds, verbose=1): """ :param f: Function to be maximized. :param pbounds: Dictionary with parameters names as keys and a tuple with minimum and maximum values. :param verbose: Whether or not to print progress. """ # Store the original dictionary self.pbounds = pbounds # Get the name of the parameters self.keys = list(pbounds.keys()) # Find number of parameters self.dim = len(pbounds) # Create an array with parameters bounds self.bounds = [] for key in self.pbounds.keys(): self.bounds.append(self.pbounds[key]) self.bounds = np.asarray(self.bounds) # Some function to be optimized self.f = f # Initialization flag self.initialized = False # Initialization lists --- stores starting points before process begins self.init_points = [] self.x_init = [] self.y_init = [] # Numpy array place holders self.X = None self.Y = None # Counter of iterations self.i = 0 # Since scipy 0.16 passing lower and upper bound to theta seems to be # broken. However, there is a lot of development going on around GP # is scikit-learn. So I'll pick the easy route here and simple specify # only theta0. self.gp = GaussianProcess(corr=matern52, theta0=np.random.uniform(0.001, 0.05, self.dim), thetaL=1e-5 * np.ones(self.dim), thetaU=1e0 * np.ones(self.dim), random_start=30) # Utility Function placeholder self.util = None # PrintLog object self.plog = PrintLog(self.keys) # Output dictionary self.res = {} # Output dictionary self.res['max'] = {'max_val': None, 'max_params': None} self.res['all'] = {'values': [], 'params': []} # Verbose self.verbose = verbose def init(self, init_points): """ Initialization method to kick start the optimization process. It is a combination of points passed by the user, and randomly sampled ones. :param init_points: Number of random points to probe. """ # Generate random points rp = [np.random.uniform(x[0], x[1], size=init_points) for x in self.bounds] # Concatenate new random points to possible existing # points from self.explore method. self.init_points += list(map(list, zip(*rp))) # Create empty list to store the new values of the function y_init = [] # Evaluate target function at all initialization # points (random + explore) for x in self.init_points: y_init.append(self.f(**dict(zip(self.keys, x)))) if self.verbose: self.plog.print_step(x, y_init[-1]) # Append any other points passed by the self.initialize method (these # also have a corresponding target value passed by the user). self.init_points += self.x_init # Append the target value of self.initialize method. y_init += self.y_init # Turn it into np array and store. self.X = np.asarray(self.init_points) self.Y = np.asarray(y_init) # Updates the flag self.initialized = True def explore(self, points_dict): """ Method to explore user defined points :param points_dict: :return: """ # Consistency check param_tup_lens = [] for key in self.keys: param_tup_lens.append(len(list(points_dict[key]))) if all([e == param_tup_lens[0] for e in param_tup_lens]): pass else: raise ValueError('The same number of initialization points ' 'must be entered for every parameter.') # Turn into list of lists all_points = [] for key in self.keys: all_points.append(points_dict[key]) # Take transpose of list self.init_points = list(map(list, zip(*all_points))) def initialize(self, points_dict): """ Method to introduce point for which the target function value is known :param points_dict: :return: """ for target in points_dict: self.y_init.append(target) all_points = [] for key in self.keys: all_points.append(points_dict[target][key]) self.x_init.append(all_points) def set_bounds(self, new_bounds): """ A method that allows changing the lower and upper searching bounds :param new_bounds: A dictionary with the parameter name and its new bounds """ # Update the internal object stored dict self.pbounds.update(new_bounds) # Loop through the all bounds and reset the min-max bound matrix for row, key in enumerate(self.pbounds.keys()): # Reset all entries, even if the same. self.bounds[row] = self.pbounds[key] def maximize(self, init_points=5, n_iter=25, acq='ei', kappa=2.576, xi=0.0, **gp_params): """ Main optimization method. Parameters ---------- :param init_points: Number of randomly chosen points to sample the target function before fitting the gp. :param n_iter: Total number of times the process is to repeated. Note that currently this methods does not have stopping criteria (due to a number of reasons), therefore the total number of points to be sampled must be specified. :param acq: Acquisition function to be used, defaults to Expected Improvement. :param gp_params: Parameters to be passed to the Scikit-learn Gaussian Process object Returns ------- :return: Nothing """ # Reset timer self.plog.reset_timer() # Set acquisition function self.util = UtilityFunction(kind=acq, kappa=kappa, xi=xi) # Initialize x, y and find current y_max if not self.initialized: if self.verbose: self.plog.print_header() self.init(init_points) y_max = self.Y.max() # Set parameters if any was passed self.gp.set_params(**gp_params) # Find unique rows of X to avoid GP from breaking
ur = unique_rows(self.X)
2
2023-11-17 09:23:38+00:00
16k
OpenBMB/XAgent
XAgentServer/application/websockets/base.py
[ { "identifier": "XAgentServerEnv", "path": "XAgentServer/application/core/envs.py", "snippet": "class XAgentServerEnv:\n \"\"\"\n XAgentServer environment variables\n if you change value of the environment variable, you need to restart \n the XAgentServer by running the following command:\n `python start_server.py`\n or start a unicorn server by yourself\n \"\"\"\n app = \"app:app\"\n prod: bool = config.get(\"PROD\", \"False\").lower() == \"true\"\n base_dir = \"XAgentServer\"\n use_redis: bool = False\n recorder_root_dir = \"running_records\"\n # you can set default_login with True,\n # use the default user \"admin\" with token \"xagent-admin\" to login,\n default_login: bool = True\n # only one XAgentServer can be set to check whether the interaction is running.\n check_running: bool = False\n host = \"0.0.0.0\"\n port = 8090\n debug = True\n reload = True\n workers = 1\n share_url = \"https://x-agent.net/api/conv/community\"\n\n class DB:\n \"\"\"\n database config\n \"\"\"\n use_db = True\n db_url = \"mysql+pymysql://root:xagent@localhost:3306/xagent\"\n\n class Redis:\n \"\"\"\n redis config\n \"\"\"\n use_redis = False\n redis_url = \"redis://localhost\"\n redis_host = \"localhost\"\n redis_port = 6379\n redis_db = 0\n redis_password = \"xagent\"\n\n # if you want to use email to send message,\n # you can set send_email to True and set\n # email_host,\n # email_port,\n # email_user,\n # email_password,\n # auth_server\n class Email:\n \"\"\"\n email config\n \"\"\"\n send_email = False\n email_host = \"\"\n email_port = 465\n email_user = \"\"\n email_password = \"\"\n auth_server = \"\"\n\n # if you want to use upload function,\n # you can set upload_dir to the path of the upload directory\n # and set upload_allowed_types of the allowed types\n class Upload:\n \"\"\"\n upload config\n \"\"\"\n upload_dir = \"XAgentServer/localstorage/upload\"\n if not os.path.exists(upload_dir):\n os.makedirs(upload_dir)\n upload_allowed_types = [\"image/png\", \"image/jpeg\",\n \"image/gif\", \"text/plain\",\n \"application/msword\", \"pdf\",\n \"txt\", \"pptx\", \"xlsx\",\n \"doc\", \"ppt\", \"xls\",\n \"zip\", \"rar\", \"tar\",\n \"gz\", \"7z\", \"bz2\",\n \"tgz\", \"tbz2\", \"tar.gz\",\n \"tar.bz2\"]" }, { "identifier": "InteractionCRUD", "path": "XAgentServer/application/cruds/interaction.py", "snippet": "class InteractionCRUD(metaclass=abc.ABCMeta):\n \"\"\"\n interaction crud\n \"\"\"\n\n @classmethod\n def search_many_interaction(cls, db: Session) -> list:\n \"\"\"\n search many interaction\n \"\"\"\n try:\n return InteractionDBInterface.search_many_interaction(db=db)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_interaction(cls, db: Session, interaction_id: str) -> InteractionBase | None:\n \"\"\"\n get interaction\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n interaction InteractionBase\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_interaction(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def create_interaction(cls, db: Session, base: InteractionBase):\n \"\"\"\n create interaction\n Args:\n db: db\n base: base\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.create_interaction(db=db, base=base)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n \n @classmethod\n def get_ready_interaction(cls, db: Session, user_id: str):\n \"\"\"\n create interaction\n Args:\n db: db\n user_id: user_id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_ready_interaction(db=db, user_id=user_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n\n @classmethod\n def add_parameter(cls, db: Session, parameter: InteractionParameter = None):\n \"\"\"\n add parameter\n Args:\n db: db\n parameter: parameter\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.add_parameter(db=db, parameter=parameter)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_parameter(cls, db: Session, interaction_id: str) -> list:\n \"\"\"\n get parameter\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n parameter list [InteractionParameter]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n \n \n @classmethod\n def get_init_parameter(cls, db: Session, interaction_id: str) -> InteractionParameter:\n \"\"\"\n get init parameter\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n parameter InteractionParameter\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n parameters = InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)\n init_parameter = parameters[0]\n parameter = InteractionParameter.from_json({\"args\": init_parameter, \"interaction_id\": interaction_id, \"parameter_id\": None})\n return parameter\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def search_interaction_by_user_id(cls,\n db: Session,\n user_id: str,\n page_size: int = 10,\n page_num: int = 1) -> list[dict]:\n \"\"\"\n get interaction by user id\n Args:\n db: db\n user_id: user id\n page_size: page size\n page_num: page num\n Returns:\n interaction list [dict]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n return InteractionDBInterface.search_interaction_by_user_id(db=db,\n user_id=user_id,\n page_size=page_size,\n page_num=page_num)\n\n @classmethod\n def is_exist(cls, db: Session, interaction_id: str) -> bool:\n \"\"\"\n interaction is exist\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n True if interaction is exist, else False\n \n Raises:\n XAgentDBError: XAgent DB Error \n \"\"\"\n try:\n return InteractionDBInterface.is_exist(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_interaction(cls, db: Session, base_data: dict):\n \"\"\"\n update interaction\n Args:\n db: db\n base_data: base data\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_interaction(db=db, base_data=base_data)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_interaction_status(cls,\n db: Session,\n interaction_id: str,\n status: str,\n message: str,\n current_step: int):\n \"\"\"\n update interaction status\n Args:\n db: db\n interaction_id: interaction id\n status: status\n message: message\n current_step: current step\n \n Raises:\n XAgentDBError: XAgent DB Error \n \"\"\"\n try:\n InteractionDBInterface.update_interaction_status(\n db=db,\n interaction_id=interaction_id,\n status=status,\n message=message,\n current_step=current_step)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_interaction_parameter(cls,\n db: Session,\n interaction_id: str,\n parameter: InteractionParameter):\n \"\"\"\n update interaction parameter\n Args:\n db: db\n interaction_id: interaction id\n parameter: parameter\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_interaction_parameter(\n db=db,\n interaction_id=interaction_id,\n parameter=parameter)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def is_running(cls, db: Session, user_id: str):\n \"\"\"\n is running\n Args:\n db: db\n user_id: user id\n Returns:\n True if running, else False\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.is_running(db=db, user_id=user_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def delete_interaction(cls, db: Session, interaction_id: str):\n \"\"\"\n delete interaction\n Args:\n db: db\n interaction_id: interaction id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.delete_interaction(\n db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_shared_interaction(cls,\n db: Session,\n interaction_id: str) -> InteractionBase | None:\n \"\"\"\n get shared interaction\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n interaction InteractionBase, if not found, return None\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_shared_interaction(\n db=db,\n interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def search_many_shared(cls,\n db: Session,\n page_size: int = 20,\n page_index: int = 1) -> list[dict]:\n \"\"\"\n search many shared\n Args:\n db: db\n page_size: page size\n page_index: page index\n Returns:\n interaction list [dict]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.search_many_shared(db=db,\n page_size=page_size,\n page_index=page_index)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def insert_raw(cls, db: Session, process: XAgentRaw):\n \"\"\"\n insert raw\n Args:\n db: db\n process: process\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.insert_raw(db=db, process=process)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def search_many_raws(cls, db: Session, interaction_id: str) -> List[XAgentRaw] | None:\n \"\"\"\n search many raws\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n raw list [XAgentRaw]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return [XAgentRaw.from_db(raw) for raw in \n InteractionDBInterface.search_many_raws(db=db, interaction_id=interaction_id)]\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_raw(cls, db: Session, interaction_id: str, node_id: str) -> XAgentRaw | None:\n \"\"\"\n get raw\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n Returns:\n raw XAgentRaw, if not found, return None\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_raw(db=db,\n interaction_id=interaction_id,\n node_id=node_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_next_send(cls, db: Session, interaction_id: str) -> List[Raw] | None:\n \"\"\"\n get next send\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n raw list [Raw]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_next_send(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_send_flag(cls, db: Session, interaction_id: str, node_id: str):\n \"\"\"\n update send flag\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_send_flag(\n db=db, interaction_id=interaction_id, node_id=node_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_receive_flag(cls, db: Session, interaction_id: str, node_id: str):\n \"\"\"\n update receive flag\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_receive_flag(\n db=db, interaction_id=interaction_id, node_id=node_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_human_data(cls,\n db: Session,\n interaction_id: str,\n node_id: str,\n human_data: dict):\n \"\"\"\n update human data\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n human_data: human data\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_human_data(db=db,\n interaction_id=interaction_id,\n node_id=node_id,\n human_data=human_data)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def insert_error(cls,\n db: Session,\n interaction_id: str,\n message: str,\n status: str = \"failed\"):\n \"\"\"\n insert error\n Args:\n db: db\n interaction_id: interaction id\n message: message\n status: status, default is failed\n Returns:\n raw XAgentRaw\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n process = XAgentRaw(\n node_id=uuid.uuid4().hex,\n interaction_id=interaction_id,\n current=\"\",\n step=0,\n data=message,\n file_list=[],\n status=status,\n do_interrupt=False,\n wait_seconds=0,\n ask_for_human_help=False,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n is_deleted=False,\n is_human=False,\n human_data={},\n human_file_list=[],\n is_send=False,\n is_receive=False,\n include_pictures=False,\n )\n InteractionDBInterface.insert_raw(db=db, process=process)\n return process\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def add_share(cls, db: Session, share):\n \"\"\"\n add share\n Args:\n db: db\n share: share\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.add_share(db=db, shared=share)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n \n \n @classmethod\n def get_finish_status(cls, db: Session, interaction_id: str) -> bool:\n \"\"\"\n get finish status\n \n Args:\n db: db\n interaction_id: interaction id\n \n Returns:\n True if finish, else False\n \"\"\"\n try:\n return InteractionDBInterface.get_finish_status(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e" }, { "identifier": "get_db", "path": "XAgentServer/application/dependence.py", "snippet": "def get_db():\n \"\"\"db\"\"\"\n session = SessionLocal()\n try:\n yield session\n session.commit()\n except Exception as e:\n session.rollback()\n raise e\n finally:\n session.close()" }, { "identifier": "WebsocketResponseBody", "path": "XAgentServer/application/schemas/response_body.py", "snippet": "class WebsocketResponseBody():\n r\"\"\"\n WerSocket 返回值对象\n\n Attributes:\n data: 返回的数据\n\n status: 状态\n\n message: 消息\n\n kwargs: 其他参数, 会被添加到返回值中\n \"\"\"\n\n def __init__(self,\n data: Union[str, dict, list, Json, None],\n status: str = \"success\",\n message: Union[str, None] = None,\n **kwargs):\n self.data = data\n self.status = status\n self.message = message\n self.extend(kwargs)\n\n def to_text(self):\n r\"\"\"\n 返回json格式的字符串\n \"\"\"\n\n return json.dumps(self.__dict__, ensure_ascii=False, indent=2)\n\n def extend(self, extend: dict):\n \"\"\"extend attributes\n \"\"\"\n for key, value in extend.items():\n if key not in self.__dict__.keys():\n self.__dict__[key] = value" }, { "identifier": "XAgentWebSocketConnectError", "path": "XAgentServer/exts/exception_ext.py", "snippet": "class XAgentWebSocketConnectError(XAgentWebSocketError):\n \"\"\"Exception raised for errors in the input.\n\n Attributes:\n message -- explanation of the error\n \"\"\"\n\n def __init__(self, message=\"XAgentWebSocket Connect Error!\"):\n self.message = message\n super().__init__(self.message)" }, { "identifier": "XAgentError", "path": "XAgentServer/exts/exception_ext.py", "snippet": "class XAgentError(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n def __init__(self, message=\"XAgent Error!\"):\n self.message = message\n super().__init__(self.message)" }, { "identifier": "XAgentInteraction", "path": "XAgentServer/interaction.py", "snippet": "class XAgentInteraction(metaclass=abc.ABCMeta):\n \"\"\"\n XAgent 核心交互组件集, 引用: XAgentCE\n Attributes:\n base: 交互基本信息\n parameter: 交互参数\n interrupt: 是否包含中断\n toolserver: 工具服务\n call_method: 调用方式\n wait_seconds: 等待时间\n \n Components:\n logger: 日志\n db: 数据库\n recorder: 运行记录\n toolserver_interface: 工具服务接口\n \n 组件集中的所有组件全局唯一\n\n \"\"\"\n\n def __init__(\n self,\n base: InteractionBase,\n parameter: InteractionParameter,\n interrupt: bool = False,\n call_method: str = \"web\",\n wait_seconds: int = 600,\n ) -> None:\n self.base = base\n self.parameter = parameter\n # 唯一标识当前的执行步骤\n self.current_step = uuid.uuid4().hex\n self.logger = None\n self.interrupt = interrupt\n self.call_method = call_method\n self.wait_seconds = wait_seconds\n self.log_dir = os.path.join(\n os.path.join(XAgentServerEnv.base_dir,\n \"localstorage\",\n \"interact_records\"),\n datetime.now().strftime(\"%Y-%m-%d\"),\n self.base.interaction_id)\n self.human_data = None\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n self.extract_dir = os.path.join(self.log_dir, \"workspace\")\n if not os.path.exists(self.extract_dir):\n os.makedirs(self.extract_dir)\n\n self.db: Session = None\n self.toolserver_interface = None\n\n def register_toolserver_interface(self, toolserver_interface: ToolServerInterface):\n \"\"\"register tool server interface\"\"\"\n self.toolserver_interface = toolserver_interface\n\n def resister_logger(self, logger: Logger):\n \"\"\"\n 注册logger, 根据会话id创建日志文件夹, 并创建日志文件\n \"\"\"\n\n self.logger = logger\n self.logger.info(f\"init interaction: {self.base.interaction_id}\")\n\n def register_db(self, db: Session):\n \"\"\"\n 注册db\n\n Args:\n db: Session对象\n \"\"\"\n self.db = db\n\n def insert_data(self,\n data: dict,\n status=\"\",\n current: str = None,\n is_include_pictures: bool = False,):\n \"\"\"\n 更新缓存, 推送数据\n \"\"\"\n # check alive\n alive = redis.get_key(self.base.interaction_id)\n if alive == \"close\":\n self.logger.info(\"The user terminated this action and exited.\")\n exit(0)\n self.current_step = uuid.uuid4().hex\n\n if status == \"inner\":\n tool_name = data.get(\"using_tools\", {}).get(\n \"tool_name\", \"\") if isinstance(data, dict) else \"\"\n\n if tool_name == \"subtask_submit\":\n status = StatusEnum.SUBMIT\n\n # download workspace files\n self.download_files()\n\n file_list = os.listdir(self.extract_dir)\n\n # insert raw\n process = XAgentRaw(\n node_id=self.current_step,\n interaction_id=self.base.interaction_id,\n current=current,\n step=0,\n data=data,\n file_list=file_list,\n status=status,\n do_interrupt=self.interrupt,\n wait_seconds=0,\n ask_for_human_help=False,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n is_deleted=False,\n is_human=False,\n human_data=None,\n human_file_list=[],\n is_send=self.call_method != 'web',\n is_receive=False,\n include_pictures=is_include_pictures,\n )\n if status == StatusEnum.FINISHED:\n InteractionCRUD.update_interaction_status(\n db=self.db,\n interaction_id=self.base.interaction_id,\n status=StatusEnum.FINISHED,\n message=\"finished\",\n current_step=self.current_step)\n else:\n InteractionCRUD.update_interaction_status(\n db=self.db,\n interaction_id=self.base.interaction_id,\n status=\"running\",\n message=\"running\",\n current_step=self.current_step)\n InteractionCRUD.insert_raw(db=self.db, process=process)\n if self.call_method == \"web\":\n redis.set_key(self.base.interaction_id + \"_send\", 1)\n elif self.call_method == \"cmd\":\n # print workspace file list\n file_list_str = \", \".join(file_list) \n self.logger.typewriter_log(\n title=f\"-=-=-=-=-=-=-= {self.base.interaction_id}, {self.current_step}, WORKSPACE FILE LIST -=-=-=-=-=-=-=\\n\",\n title_color=Fore.GREEN,\n content=f\"[{file_list_str}] in {self.extract_dir}\"\n )\n\n def download_files(self):\n \"\"\"download files\n\n Returns:\n Boolean: True or False\n \"\"\"\n try:\n save_path = self.toolserver_interface.download_all_files()\n\n if os.path.exists(save_path):\n zip_file = zipfile.ZipFile(save_path)\n zip_list = zip_file.namelist() # 得到压缩包里所有文件\n for f in zip_list:\n zip_file.extract(f, self.extract_dir) # 循环解压文件到指定目录\n\n zip_file.close()\n return True\n except zipfile.BadZipFile:\n return False\n\n def receive(self, can_modify=None):\n \"\"\"\n 接收数据\n \"\"\"\n\n if self.call_method == \"web\":\n wait = 0\n while wait < self.wait_seconds:\n human_data = self.get_human_data()\n if human_data is not None:\n return human_data\n else:\n wait += 2\n time.sleep(2)\n\n raise XAgentTimeoutError(\"等待数据超时,关闭连接\")\n else:\n print(can_modify)\n\n def get_human_data(self):\n \"\"\"\n 获取人类数据\n \"\"\"\n # check alive, ensure the interaction is alive\n # if The user terminated this action and exited\n alive = redis.get_key(self.base.interaction_id)\n if alive == \"close\":\n self.logger.info(\"The user terminated this action and exited!\")\n exit(0)\n receive_key = self.base.interaction_id + \"_\" + self.current_step + \"_receive\"\n is_receive = redis.get_key(receive_key)\n\n if is_receive:\n raw = InteractionCRUD.get_raw(\n db=self.db, interaction_id=self.base.interaction_id, node_id=self.current_step)\n\n if raw and raw.is_human and raw.is_receive:\n redis.delete_key(receive_key)\n return raw.human_data\n\n return None\n\n def ask_for_human_help(self, data):\n \"\"\"调用工具时,请求人类帮助\n Execute the tool and ask for human help\n \"\"\"\n\n self.current_step = uuid.uuid4().hex\n self.download_files()\n file_list = os.listdir(self.extract_dir)\n # special: ask for human help and do interrupt\n # send data\n process = XAgentRaw(\n node_id=self.current_step,\n interaction_id=self.base.interaction_id,\n current=self.current_step,\n step=0,\n data=data,\n file_list=file_list,\n status=StatusEnum.ASK_FOR_HUMAN_HELP,\n do_interrupt=True,\n wait_seconds=0,\n ask_for_human_help=True,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n is_deleted=False,\n is_human=False,\n human_data=None,\n human_file_list=[],\n is_send=False,\n is_receive=False,\n include_pictures=False,\n )\n\n # insert into mysql\n InteractionCRUD.insert_raw(db=self.db, process=process)\n\n # set redis\n redis.set_key(self.base.interaction_id + \"_send\", 1)\n\n # set status\n\n InteractionCRUD.update_interaction_status(\n db=self.db,\n interaction_id=self.base.interaction_id,\n status=StatusEnum.ASK_FOR_HUMAN_HELP,\n message=\"ask for human help\",\n current_step=self.current_step)\n\n # check alive\n alive = redis.get_key(self.base.interaction_id)\n if alive == \"close\":\n self.logger.info(\"The user terminated this action and exited!\")\n exit(0)\n\n # wait for human data\n wait = 0\n while wait < self.wait_seconds:\n human_data = self.get_human_data()\n if human_data is not None:\n return human_data\n else:\n wait += 2\n time.sleep(2)\n\n raise XAgentTimeoutError(\"ASK-For-Human-Data: 等待数据超时,关闭连接\")" }, { "identifier": "Logger", "path": "XAgentServer/loggers/logs.py", "snippet": "class Logger(metaclass=abc.ABCMeta):\n \"\"\"\n Logger that handle titles in different colors.\n Outputs logs in console, activity.log, and errors.log\n For console handler: simulates typing\n \"\"\"\n\n def __init__(self, log_dir: str = None, log_name: str= \"\", log_file: str = \"activity.log\", error_file: str = \"errors.log\"):\n \"\"\"init\"\"\"\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n # create log directory if it doesn't exist\n self.log_name = time.strftime(\"%Y-%m-%d\", time.localtime()) if not log_name else log_name\n self.logger = logging.getLogger(self.log_name)\n console_formatter = RecordFormatter(\"%(title_color)s %(message)s\")\n\n # Create a handler for console which simulate typing\n self.typing_console_handler = TypingConsoleHandler()\n self.typing_console_handler.setLevel(logging.INFO)\n self.typing_console_handler.setFormatter(console_formatter)\n\n # Create a handler for console without typing simulation\n self.console_handler = ConsoleHandler()\n self.console_handler.setLevel(logging.DEBUG)\n self.console_handler.setFormatter(console_formatter)\n\n self.speak_mode = False\n self.chat_plugins = []\n\n # Info handler in activity.log\n self.file_handler = logging.FileHandler(\n os.path.join(log_dir, log_file), \"a\", \"utf-8\"\n )\n self.file_handler.setLevel(logging.DEBUG)\n info_formatter = RecordFormatter(\n \"%(asctime)s [%(threadName)s] %(levelname)s: %(title_color)s %(title)s %(message)s\"\n )\n self.file_handler.setFormatter(info_formatter)\n\n # Error handler error.log\n error_handler = logging.FileHandler(\n os.path.join(log_dir, error_file), \"a\", \"utf-8\"\n )\n error_handler.setLevel(logging.ERROR)\n error_formatter = RecordFormatter(\n \"%(asctime)s [%(threadName)s] %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title_color)s %(title)s\"\n \" %(message_no_color)s\"\n )\n error_handler.setFormatter(error_formatter)\n\n # self.typing_logger = logging.getLogger(self.log_name)\n # if not self.typing_logger.handlers:\n # self.typing_logger.addHandler(self.typing_console_handler)\n # self.typing_logger.addHandler(self.file_handler)\n # self.typing_logger.addHandler(error_handler)\n # self.typing_logger.setLevel(logging.DEBUG)\n\n if self.log_name.endswith(\"_INTERACT\") or not self.logger.handlers:\n # self.logger.addHandler(self.typing_console_handler)\n self.logger.addHandler(self.console_handler)\n self.logger.addHandler(error_handler)\n self.logger.addHandler(self.file_handler)\n self.logger.setLevel(logging.DEBUG)\n \n def typewriter_log(\n self, title=\"\", title_color=\"\", content=\"\", speak_text=False, level=logging.INFO\n ):\n # if speak_text and self.speak_mode:\n # say_text(f\"{title}. {content}\")\n\n for plugin in self.chat_plugins:\n plugin.report(f\"{title}. {content}\")\n\n if content:\n if isinstance(content, list):\n content = \" \".join(content)\n else:\n content = \"\"\n\n self.logger.log(\n level, content, extra={\"title\": title, \"color\": title_color}\n )\n\n def debug(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.DEBUG)\n\n def info(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.INFO)\n\n def warn(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.WARN)\n\n def error(self, title, message=\"\"):\n self._log(title, Fore.RED, message, logging.ERROR)\n\n def _log(\n self,\n title: str = \"\",\n title_color: str = \"\",\n message: str = \"\",\n level=logging.INFO,\n ):\n if message:\n if isinstance(message, list):\n message = \" \".join(message)\n self.logger.log(\n level, message, extra={\"title\": str(title), \"color\": str(title_color)}\n )\n\n def set_level(self, level):\n self.logger.setLevel(level)\n self.typing_logger.setLevel(level)\n\n def double_check(self, additionalText=None):\n if not additionalText:\n additionalText = (\n \"Please ensure you've setup and configured everything\"\n \" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to \"\n \"double check. You can also create a github issue or join the discord\"\n \" and ask there!\"\n )\n\n self.typewriter_log(\"DOUBLE CHECK CONFIGURATION\", Fore.YELLOW, additionalText)\n\n def log_json(self, data: Any, file_name: str) -> None:\n # Define log directory\n this_files_dir_path = os.path.dirname(__file__)\n log_dir = os.path.join(this_files_dir_path, \"../logs\")\n\n # Create a handler for JSON files\n json_file_path = os.path.join(log_dir, file_name)\n json_data_handler = JsonFileHandler(json_file_path)\n json_data_handler.setFormatter(JsonFormatter())\n\n # Log the JSON data using the custom file handler\n self.json_logger.addHandler(json_data_handler)\n self.json_logger.debug(data)\n self.json_logger.removeHandler(json_data_handler)\n\n def get_log_directory(self):\n this_files_dir_path = os.path.dirname(__file__)\n log_dir = os.path.join(this_files_dir_path, \"../logs\")\n return os.path.abspath(log_dir)" }, { "identifier": "InteractionParameter", "path": "XAgentServer/models/parameter.py", "snippet": "class InteractionParameter(metaclass=abc.ABCMeta):\n \"\"\"\n 交互参数\n \"\"\"\n\n def __init__(self,\n interaction_id: str,\n parameter_id: str,\n args: Union[str, dict, None] = None\n ):\n self.interaction_id = interaction_id\n self.args = args\n self.parameter_id = parameter_id\n\n def to_dict(self):\n return {\n \"interaction_id\": self.interaction_id,\n \"parameter_id\": self.parameter_id,\n \"args\": self.args,\n }\n\n def to_json(self):\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n\n @classmethod\n def from_json(cls, json_data):\n return cls(**json_data)\n \n @classmethod\n def from_db(cls, interaction):\n return cls(interaction.interaction_id,\n interaction.parameter_id,\n interaction.args\n )" }, { "identifier": "XAgentRaw", "path": "XAgentServer/models/raw.py", "snippet": "class XAgentRaw(metaclass=abc.ABCMeta):\n \"\"\"XAgent Raw Object\"\"\"\n\n def __init__(self, node_id: str,\n interaction_id: str,\n current: str,\n step: int,\n data: dict,\n file_list: list,\n status: str,\n do_interrupt: bool,\n wait_seconds: int,\n ask_for_human_help: bool,\n create_time: str,\n update_time: str,\n is_deleted: bool,\n is_human: bool,\n human_data: dict,\n human_file_list: list,\n is_send: bool,\n is_receive: bool,\n include_pictures: bool = False,):\n self.node_id = node_id\n self.interaction_id = interaction_id\n self.current = current\n self.step = step\n self.data = data\n self.file_list = file_list\n self.status = status\n self.do_interrupt = do_interrupt\n self.wait_seconds = wait_seconds\n self.ask_for_human_help = ask_for_human_help\n self.create_time = create_time\n self.update_time = update_time\n self.is_deleted = is_deleted\n self.is_human = is_human\n self.human_data = human_data\n self.human_file_list = human_file_list\n self.is_send = is_send\n self.is_receive = is_receive\n self.include_pictures = include_pictures\n\n def to_dict(self):\n \"\"\"XAgent Raw Object to dict\"\"\"\n return {\n \"node_id\": self.node_id,\n \"interaction_id\": self.interaction_id,\n \"current\": self.current,\n \"step\": self.step,\n \"data\": self.data,\n \"file_list\": self.file_list,\n \"status\": self.status,\n \"do_interrupt\": self.do_interrupt,\n \"wait_seconds\": self.wait_seconds,\n \"ask_for_human_help\": self.ask_for_human_help,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time,\n \"is_deleted\": self.is_deleted,\n \"is_human\": self.is_human,\n \"human_data\": self.human_data,\n \"human_file_list\": self.human_file_list,\n \"is_send\": self.is_send,\n \"is_receive\": self.is_receive,\n \"include_pictures\": self.include_pictures\n }\n\n def to_json(self):\n \"\"\"XAgent Raw Object to json\"\"\"\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n\n @classmethod\n def from_json(cls, json_data):\n \"\"\"XAgent Raw Object from json\"\"\"\n return cls(**json_data)\n\n def update(self, update_data: dict):\n \"\"\"XAgent Raw Object update\"\"\"\n for k, v in update_data.items():\n setattr(self, k, v)\n return self\n\n @classmethod\n def from_db(cls, db_data):\n \"\"\"XAgent Raw Object from db\"\"\"\n return cls(\n node_id=db_data.node_id,\n interaction_id=db_data.interaction_id,\n current=db_data.current,\n step=db_data.step,\n data=db_data.data,\n file_list=db_data.file_list,\n status=db_data.status,\n do_interrupt=db_data.do_interrupt,\n wait_seconds=db_data.wait_seconds,\n ask_for_human_help=db_data.ask_for_human_help,\n create_time=db_data.create_time,\n update_time=db_data.update_time,\n is_deleted=db_data.is_deleted,\n is_human=db_data.is_human,\n human_data=db_data.human_data,\n human_file_list=db_data.human_file_list,\n is_send=db_data.is_send,\n is_receive=db_data.is_receive,\n include_pictures=db_data.include_pictures\n )" }, { "identifier": "XAgentServer", "path": "XAgentServer/server.py", "snippet": "class XAgentServer:\n \"\"\"XAgent Server Start Class\n \"\"\"\n\n def __init__(self, logger: Logger) -> None:\n self.logger: Logger = logger\n\n def interact(self, interaction: XAgentInteraction):\n # query = message\n \"\"\"\n XAgent Server Start Function\n \"\"\"\n from XAgent.config import CONFIG as config\n xagent_core = None\n try:\n config.reload()\n args = {}\n # args\n args = interaction.parameter.args\n\n self.logger.info(\n f\"server is running, the start query is {args.get('goal', '')}\")\n xagent_param = XAgentParam()\n\n # build query\n xagent_param.build_query({\n \"role_name\": \"Assistant\",\n \"task\": args.get(\"goal\", \"\"),\n \"plan\": args.get(\"plan\", [\"Pay attention to the language in initial goal, always answer with the same language of the initial goal given.\"]),\n })\n xagent_param.build_config(config)\n xagent_core = XAgentCoreComponents()\n # build XAgent Core Components\n xagent_core.build(xagent_param, interaction=interaction)\n json_str = json.dumps(\n xagent_param.config.to_dict(), indent=2)\n json_str=re.sub(r'\"api_key\": \"(.+?)\"', r'\"api_key\": \"**\"', json_str)\n self.logger.info(json_str)\n self.logger.typewriter_log(\n \"Human-In-The-Loop\",\n Fore.RED,\n str(xagent_param.config.enable_ask_human_for_help),\n )\n\n file_list = interaction.base.file_list\n for file in file_list:\n file_uuid = file.get(\"uuid\", \"\")\n file_name = file.get(\"name\", \"\")\n if file_uuid.startswith(\"/\"):\n file_path = file_uuid\n else:\n file_path = os.path.join(XAgentServerEnv.Upload.upload_dir,\n interaction.base.user_id, file_uuid)\n\n upload_dir = os.path.join(\n xagent_core.base_dir, \"upload\")\n if not os.path.exists(upload_dir):\n os.makedirs(upload_dir)\n # 拷贝到workspace\n if interaction.call_method == \"web\":\n shutil.copy(file_path, os.path.join(upload_dir, file_name))\n else:\n if os.path.exists(file_path):\n if os.path.samefile(file_path, os.path.join(upload_dir, file_name)):\n # 文件路径相同,跳过复制\n pass\n else:\n shutil.copy(file_path, os.path.join(upload_dir, file_name))\n # shutil.copy(file_path, os.path.join(upload_dir, file_name))\n\n new_file = os.path.join(upload_dir, file_name)\n try:\n xagent_core.toolserver_interface.upload_file(new_file)\n except Exception as e:\n self.logger.typewriter_log(\n \"Error happens when uploading file\",\n Fore.RED,\n f\"{new_file}\\n{e}\",\n )\n raise XAgentUploadFileError(str(e)) from e\n\n task_handler = TaskHandler(xagent_core=xagent_core,\n xagent_param=xagent_param)\n self.logger.info(\"Start outer loop async\")\n task_handler.outer_loop()\n except Exception as e:\n raise XAgentRunningError(str(e)) from e\n finally:\n if xagent_core is not None:\n xagent_core.close()" }, { "identifier": "StatusEnum", "path": "XAgentServer/enums/status.py", "snippet": "class StatusEnum:\n \"\"\"XAgent Status Enum\n \"\"\"\n START = \"start\"\n SUBTASK = \"subtask\"\n REFINEMENT = \"refinement\"\n INNER = \"inner\"\n FINISHED = \"finished\"\n FAILED = \"failed\"\n SUBMIT = \"subtask_submit\"\n RUNNING = \"running\"\n ASK_FOR_HUMAN_HELP = \"ask_for_human_help\"\n CLOSED = \"closed\"" }, { "identifier": "check_user", "path": "XAgentServer/application/websockets/common.py", "snippet": "async def check_user(db, user_id, token):\n \"\"\"\n check user for websocket connection\n \"\"\"\n if not UserCRUD.is_exist(db=db, user_id=user_id):\n raise XAgentWebSocketConnectError(\"user is not exist!\")\n # auth\n if not UserCRUD.user_is_valid(db=db, user_id=user_id, token=token):\n raise XAgentWebSocketConnectError(\"user is not available!\")\n\n user = UserCRUD.get_user(db=db, user_id=user_id)\n if not user or user.token != token or user.available is False or user.is_beta is False:\n raise XAgentWebSocketConnectError(\n \"XAgentServer is running in production mode, if you want to use it, please contact the administrator.\")" }, { "identifier": "handle_data", "path": "XAgentServer/application/websockets/common.py", "snippet": "def handle_data(row: Raw, root_dir: str):\n \"\"\"\n handle data for websocket response\n \"\"\"\n data = row.data\n try:\n using_tools = data.get(\"using_tools\", \"\")\n if not using_tools:\n return data\n tool_name = using_tools.get(\"tool_name\", \"\") if isinstance(\n using_tools, dict) else \"\"\n tool_output = using_tools.get(\n \"tool_output\", {}) if isinstance(using_tools, dict) else \"\"\n tool_input = using_tools.get(\n \"tool_input\", {}) if isinstance(using_tools, dict) else \"\"\n if row.include_pictures:\n if tool_name == \"PythonNotebook_execute_cell\":\n for output in tool_output:\n if isinstance(output, dict) and 'file_name' in output:\n file_name = output['file_name']\n png_base64 = None\n if file_name:\n file_path = os.path.join(\n root_dir, \"workspace\", file_name)\n if os.path.exists(file_path):\n try:\n with open(file_path, \"rb\") as f:\n png_base64 = base64.b64encode(\n f.read()).decode(\"utf-8\")\n except Exception:\n pass\n\n output[\"file_data\"] = png_base64\n using_tools[\"is_include_pictures\"] = True\n \n if tool_input:\n data[\"using_tools\"][\"tool_input\"] = tool_input.encode(\"utf-8\").decode(\"unicode_escape\")\n if tool_output and isinstance(tool_output, str):\n data[\"using_tools\"][\"tool_output\"] = tool_output.encode(\"utf-8\").decode(\"unicode_escape\")\n except Exception:\n pass\n return data" }, { "identifier": "handle_workspace_filelist", "path": "XAgentServer/application/websockets/common.py", "snippet": "def handle_workspace_filelist(file_list):\n \"\"\"handle workspace file list\n\n Args:\n file_list (_type_): file_list is a list of file name\n\n Returns:\n List[Dict]: element list, each element is a dict with name and suffix\n \"\"\"\n if not isinstance(file_list, list) or not file_list:\n return []\n return [{\"name\": file, \"suffix\": file.split(\".\")[-1]} for file in file_list]" }, { "identifier": "redis", "path": "XAgentServer/application/global_val.py", "snippet": "def init_yag(logger):\ndef init_executor(logger):" } ]
import json import os import threading import traceback import uuid from datetime import datetime from urllib.parse import parse_qs from typing import Any from colorama import Fore from fastapi import APIRouter, Depends, WebSocket from sqlalchemy.orm import Session from starlette.endpoints import WebSocketEndpoint from apscheduler.schedulers.asyncio import AsyncIOScheduler from XAgentServer.application.core.envs import XAgentServerEnv from XAgentServer.application.cruds.interaction import InteractionCRUD from XAgentServer.application.dependence import get_db from XAgentServer.application.schemas.response_body import WebsocketResponseBody from XAgentServer.exts.exception_ext import XAgentWebSocketConnectError, XAgentError from XAgentServer.interaction import XAgentInteraction from XAgentServer.loggers.logs import Logger from XAgentServer.models.parameter import InteractionParameter from XAgentServer.models.raw import XAgentRaw from XAgentServer.server import XAgentServer from XAgentServer.enums.status import StatusEnum from XAgentServer.application.websockets.common import (check_user, handle_data, handle_workspace_filelist) from XAgentServer.application.global_val import redis
13,417
db=self.db, interaction_id=self.client_id) if interaction.status not in [StatusEnum.FINISHED, StatusEnum.FAILED]: InteractionCRUD.update_interaction_status(db=self.db, interaction_id=self.client_id, status=StatusEnum.CLOSED, message="closed", current_step="0") try: await self.on_disconnect(websocket, close_code) if self.scheduler.running: self.scheduler.shutdown() self.logger.info("shutdown scheduler") if self.db: self.db.close() self.logger.info("close db") finally: # notice the agent stop if user close the websocket redis.set_key(f"{self.client_id}", "close") async def on_connect(self, websocket: WebSocket): """Connect to client Args: websocket (WebSocket): A websocket object Raises: XAgentWebSocketConnectError: If the user is running, it will raise this error. """ self.log_dir = os.path.join(os.path.join(XAgentServerEnv.base_dir, "localstorage", "interact_records"), self.date_str, self.client_id) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.logger = Logger( log_dir=self.log_dir, log_file="interact.log", log_name=f"{self.client_id}_INTERACT") query_string = self.scope.get("query_string", b"").decode() parameters = parse_qs(query_string) user_id = parameters.get("user_id", [""])[0] token = parameters.get("token", [""])[0] description = parameters.get("description", [""])[0] self.logger.typewriter_log( title=f"Receive connection from {self.client_id}: ", title_color=Fore.RED, content=f"user_id: {user_id}, token: {token}, description: {description}") await websocket.accept() try: await check_user(db=self.db, user_id=user_id, token=token) # check running, you can edit it by yourself in envs.py to skip this check if XAgentServerEnv.check_running: if InteractionCRUD.is_running(db=self.db, user_id=user_id): raise XAgentWebSocketConnectError( "You have a running interaction, please wait for it to finish!") base = InteractionCRUD.get_interaction(db=self.db, interaction_id=self.client_id) if base is None: raise XAgentWebSocketConnectError( "init interaction failed, please restart!") InteractionCRUD.update_interaction(db=self.db, base_data={ "interaction_id": self.client_id, "status": "connected", "message": "connected", "current_step": "0", "description": description} ) except XAgentWebSocketConnectError as e: self.logger.error( f"Error in on_connect of {self.client_id}: {e}") await websocket.send_text( WebsocketResponseBody( status="connect", success=False, message=str(e), data=None).to_text()) await websocket.close(code=1000) return await websocket.send_text( WebsocketResponseBody( status="connect", success=True, message="connect success", data=base.to_dict()).to_text()) async def on_disconnect(self, websocket: WebSocket, close_code): """When disconnect with client, it will run this function Override this function to do something when disconnect with client Args: websocket (WebSocket): A websocket object close_code (_type_): The close code, default is 0 """ self.logger.typewriter_log( title=f"Disconnect with client {self.client_id}: ", title_color=Fore.RED) # await websocket.close(code=close_code) async def on_receive(self, websocket: WebSocket, data: Any): """ When receive data from client, it will run this function Args: websocket (WebSocket): A websocket object data (any): The data from client """ data = json.loads(data) if data.get("type", "") != "ping": self.logger.typewriter_log( title=f"Receive data from {self.client_id}: ", title_color=Fore.RED, content=json.dumps(data, indent=4, ensure_ascii=False) ) if data.get("type", "") == "data": args = data.get("args", {}) agent = data.get("agent", "") mode = data.get("mode", "") file_list = data.get("file_list", []) node_id = data.get("node_id", "")
""" Base Websocket Server Note: You can use this websocket to run your interaction. You can modify it by yourself to do something, such as change the way to receive data from client, or use celery to run interaction, or use ThreadPoolExecutor to run interaction and so on. Version: 1.1.0 Attention: Since Version: 1.1.0, Local storage will no longer be supported, replaced by Mysql and only Components: Websocket is a way for long connect with client MySQL to save xagent data Redis to save status of interaction Threading to run interaction APScheduler to send data to client and keep alive FastAPI APIRouter to manage websocket route XAgentError in XAgentServer.exts.exception_ext """ router = APIRouter() # @router.websocket_route("/ws/{client_id}", name="ws") @router.websocket("/ws/base/{client_id}", name="ws") class MainServer(WebSocketEndpoint): """Main Websocket Server Extends: WebSocketEndpoint Description: In this websocket, we will receive the args from user, and you can use it to run the interaction. specifically, the args is a dict, and it must contain a key named "goal" to tell XAgent what do you want to do. """ def __init__(self, websocket: WebSocket, db: Session = Depends(get_db), client_id: str = ""): super().__init__(websocket.scope, websocket.receive, websocket.send) self.db = db self.client_id: str = client_id self.websocket = websocket self.date_str = datetime.now().strftime("%Y-%m-%d") self.log_dir = "" self.logger = None self.scheduler = AsyncIOScheduler() self.continue_flag = True async def dispatch(self) -> None: """XAgent Websocket Server Dispatch Function extend from WebSocketEndpoint override this function block: loop flag and finally block to do something Raises: exc: extend from WebSocketEndpoint """ websocket = WebSocket(self.scope, receive=self.receive, send=self.send) close_code = 1000 await self.on_connect(websocket) redis.set_key(f"{self.client_id}", "alive") try: while self.continue_flag: message = await websocket.receive() if message["type"] == "websocket.receive": data = await self.decode(websocket, message) await self.on_receive(websocket, data) elif message["type"] == "websocket.disconnect": close_code = 1000 break except Exception as exc: close_code = 1011 raise exc finally: interaction = InteractionCRUD.get_interaction( db=self.db, interaction_id=self.client_id) if interaction.status not in [StatusEnum.FINISHED, StatusEnum.FAILED]: InteractionCRUD.update_interaction_status(db=self.db, interaction_id=self.client_id, status=StatusEnum.CLOSED, message="closed", current_step="0") try: await self.on_disconnect(websocket, close_code) if self.scheduler.running: self.scheduler.shutdown() self.logger.info("shutdown scheduler") if self.db: self.db.close() self.logger.info("close db") finally: # notice the agent stop if user close the websocket redis.set_key(f"{self.client_id}", "close") async def on_connect(self, websocket: WebSocket): """Connect to client Args: websocket (WebSocket): A websocket object Raises: XAgentWebSocketConnectError: If the user is running, it will raise this error. """ self.log_dir = os.path.join(os.path.join(XAgentServerEnv.base_dir, "localstorage", "interact_records"), self.date_str, self.client_id) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.logger = Logger( log_dir=self.log_dir, log_file="interact.log", log_name=f"{self.client_id}_INTERACT") query_string = self.scope.get("query_string", b"").decode() parameters = parse_qs(query_string) user_id = parameters.get("user_id", [""])[0] token = parameters.get("token", [""])[0] description = parameters.get("description", [""])[0] self.logger.typewriter_log( title=f"Receive connection from {self.client_id}: ", title_color=Fore.RED, content=f"user_id: {user_id}, token: {token}, description: {description}") await websocket.accept() try: await check_user(db=self.db, user_id=user_id, token=token) # check running, you can edit it by yourself in envs.py to skip this check if XAgentServerEnv.check_running: if InteractionCRUD.is_running(db=self.db, user_id=user_id): raise XAgentWebSocketConnectError( "You have a running interaction, please wait for it to finish!") base = InteractionCRUD.get_interaction(db=self.db, interaction_id=self.client_id) if base is None: raise XAgentWebSocketConnectError( "init interaction failed, please restart!") InteractionCRUD.update_interaction(db=self.db, base_data={ "interaction_id": self.client_id, "status": "connected", "message": "connected", "current_step": "0", "description": description} ) except XAgentWebSocketConnectError as e: self.logger.error( f"Error in on_connect of {self.client_id}: {e}") await websocket.send_text( WebsocketResponseBody( status="connect", success=False, message=str(e), data=None).to_text()) await websocket.close(code=1000) return await websocket.send_text( WebsocketResponseBody( status="connect", success=True, message="connect success", data=base.to_dict()).to_text()) async def on_disconnect(self, websocket: WebSocket, close_code): """When disconnect with client, it will run this function Override this function to do something when disconnect with client Args: websocket (WebSocket): A websocket object close_code (_type_): The close code, default is 0 """ self.logger.typewriter_log( title=f"Disconnect with client {self.client_id}: ", title_color=Fore.RED) # await websocket.close(code=close_code) async def on_receive(self, websocket: WebSocket, data: Any): """ When receive data from client, it will run this function Args: websocket (WebSocket): A websocket object data (any): The data from client """ data = json.loads(data) if data.get("type", "") != "ping": self.logger.typewriter_log( title=f"Receive data from {self.client_id}: ", title_color=Fore.RED, content=json.dumps(data, indent=4, ensure_ascii=False) ) if data.get("type", "") == "data": args = data.get("args", {}) agent = data.get("agent", "") mode = data.get("mode", "") file_list = data.get("file_list", []) node_id = data.get("node_id", "")
parameter = InteractionParameter(
8
2023-10-16 03:44:57+00:00
16k
deepseek-ai/DreamCraft3D
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n # 4D Gaussian Annealing\n anneal_density_blob_std_config: Optional[dict] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )\n\n # FIXME: use progressive normal eps\n def update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ) -> None:\n if self.cfg.anneal_density_blob_std_config is not None:\n min_step = self.cfg.anneal_density_blob_std_config.min_anneal_step\n max_step = self.cfg.anneal_density_blob_std_config.max_anneal_step\n if global_step >= min_step and global_step <= max_step:\n end_val = self.cfg.anneal_density_blob_std_config.end_val\n start_val = self.cfg.anneal_density_blob_std_config.start_val\n self.density_blob_std = start_val + (global_step - min_step) * (\n end_val - start_val\n ) / (max_step - min_step)\n\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config) # 4D-fy encoding\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
14,073
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone())
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone())
self.isosurface_helper = MarchingTetrahedraHelper(
5
2023-10-23 07:40:20+00:00
16k
zju3dv/4K4D
tests/headless_opengl_tests.py
[ { "identifier": "eglContextManager", "path": "easyvolcap/utils/egl_utils.py", "snippet": "class eglContextManager:\n # Manages the creation and destruction of an EGL context\n # Will resize if the size of the window changes\n # Will also manage gl.Viewport to render different parts of the screen\n # Only resize the underlying egl ctx when exceeding current size\n def __init__(self, W=1920, H=1080) -> None:\n self.H, self.W = H, W\n self.max_H, self.max_W = H, W # always create at first\n self.eglctx = create_opengl_context()\n self.create_fbo_with_rbos(W, H)\n self.resize(W, H) # maybe create new framebuffer\n\n def create_fbo_with_rbos(self, W: int, H: int):\n if hasattr(self, 'fbo'):\n gl.glDeleteFramebuffers(1, [self.fbo])\n gl.glDeleteRenderbuffers(6, [self.rbo0, self.rbo1, self.rbo2, self.rbo3, self.rbo4, self.rbo_dpt])\n\n # Add new buffer\n self.fbo = gl.glGenFramebuffers(1)\n self.rbo0, self.rbo1, self.rbo2, self.rbo3, self.rbo4, self.rbo_dpt = gl.glGenRenderbuffers(6)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo0)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo1)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo2)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo3)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo4)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_RGBA8, W, H)\n gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self.rbo_dpt)\n gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, gl.GL_DEPTH_COMPONENT, W, H)\n\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_RENDERBUFFER, self.rbo0)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo1)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo2)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo3)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_RENDERBUFFER, self.rbo4)\n gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_RENDERBUFFER, self.rbo_dpt)\n gl.glDrawBuffers(5, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2, gl.GL_COLOR_ATTACHMENT3, gl.GL_COLOR_ATTACHMENT4])\n\n gl.glViewport(0, 0, W, H) # wtf\n gl.glScissor(0, 0, W, H) # wtf # NOTE: Need to redefine scissor size\n\n def resize(self, W=1920, H=1080):\n self.H, self.W = H, W\n if self.H > self.max_H or self.W > self.max_W:\n self.max_H, self.max_W = max(int(self.H * 1.0), self.max_H), max(int(self.W * 1.0), self.max_W)\n self.create_fbo_with_rbos(self.max_W, self.max_H)\n gl.glViewport(0, 0, self.W, self.H)" }, { "identifier": "Quad", "path": "easyvolcap/utils/gl_utils.py", "snippet": "class Quad(Mesh):\n # A shared texture for CUDA (pytorch) and OpenGL\n # Could be rendererd to screen using blitting or just drawing a quad\n def __init__(self, H: int = 256, W: int = 256, use_cudagl: bool = True, compose: bool = False, compose_power: float = 1.0): # the texture to blip\n self.use_cudagl = use_cudagl\n self.vert_sizes = [3] # only position\n self.vert_gl_types = [gl.GL_FLOAT] # only position\n self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type\n self.max_verts, self.max_faces = 0, 0\n self.verts = torch.as_tensor([[-1., -1., 0.5],\n [1., -1., 0.5],\n [-1., 1., 0.5],\n [1., 1., 0.5],])\n self.update_gl_buffers()\n self.compile_shaders()\n\n self.max_H, self.max_W = H, W\n self.H, self.W = H, W\n self.compose = compose\n self.compose_power = compose_power\n self.init_texture()\n\n @property\n def n_faces_bytes(self): return 0\n\n def use_gl_program(self, program: shaders.ShaderProgram):\n super().use_gl_program(program)\n self.uniforms.tex = gl.glGetUniformLocation(program, 'tex')\n gl.glUseProgram(self.quad_program) # use a different program\n gl.glUniform1i(self.uniforms.tex, 0)\n\n def compile_shaders(self):\n try:\n self.quad_program = shaders.compileProgram(\n shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER),\n shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER)\n )\n except Exception as e:\n print(str(e).encode('utf-8').decode('unicode_escape'))\n raise e\n\n def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers\n self.H, self.W = H, W\n if self.H > self.max_H or self.W > self.max_W: # max got updated\n self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W)\n self.init_texture()\n\n def init_texture(self):\n if hasattr(self, 'cu_tex'):\n from cuda import cudart\n CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex))\n\n if hasattr(self, 'fbo'):\n gl.glDeleteFramebuffers(1, [self.fbo])\n gl.glDeleteTextures(1, [self.tex])\n\n # Init the texture to be blit onto the screen\n self.tex = gl.glGenTextures(1)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0))\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\n gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)\n\n # Init the framebuffer object if explicit blitting is used (slower than drawing quad)\n self.fbo = gl.glGenFramebuffers(1)\n old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING)\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo)\n gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0)\n gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo)\n\n if self.use_cudagl:\n from cuda import cudart\n if self.compose:\n # Both reading and writing of this resource is required\n flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone\n else:\n flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard\n self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags))\n\n def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0):\n assert self.use_cudagl, \"Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad\"\n w = w or self.W\n h = h or self.H\n if image.shape[-1] == 3:\n image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel\n\n from cuda import cudart\n kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice\n CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))\n cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0))\n\n if self.compose:\n \"\"\"\n Blit current framebuffer to this texture (self.tex)\n Read content of this texture into a cuda buffer\n Perform alpha blending based on the frame's alpha channel\n Copy the blended image back into the texture (self.tex)\n \"\"\"\n old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING)\n gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0\n gl.glBlitFramebuffer(x, y, w, h,\n x, y, w, h,\n gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame\n gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old)\n\n buffer = torch.empty_like(image)\n CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst\n w * 4 * buffer.element_size(), # dpitch\n cu_tex_arr, # src\n x * 4 * image.element_size(), # wOffset\n y, # hOffset\n w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes)\n h, # height\n kind, # kind\n torch.cuda.current_stream().cuda_stream)) # stream\n\n # cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]])\n alpha = image[..., -1:] / 255\n image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int\n image[..., -1:] = buffer[..., -1:] + image[..., -1:]\n image = image.clip(0, 255)\n\n CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr,\n x * 4 * image.element_size(),\n y,\n image.data_ptr(),\n w * 4 * image.element_size(), # differently sized\n w * 4 * image.element_size(), # rgba, should do a composition first\n h,\n kind,\n torch.cuda.current_stream().cuda_stream))\n CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream))\n\n def upload_to_texture(self, ptr: np.ndarray):\n H, W = ptr.shape[:2]\n H, W = min(self.H, H), min(self.W, W)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, W, H, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[:H, :W]) # to gpu, might slow down?\n\n @property\n def verts_data(self): # a heavy copy operation\n verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync\n verts = np.asarray(verts, dtype=np.float32, order='C')\n return verts\n\n def render(self, camera: Camera = None):\n self.draw() # no uploading needed\n\n def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):\n \"\"\"\n Upload the texture instead of the camera\n This respects the OpenGL convension of lower left corners\n \"\"\"\n w = w or self.W\n h = h or self.H\n _, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT)\n gl.glViewport(x, y, w, h)\n gl.glScissor(x, y, w, h) # only render in this small region of the viewport\n\n gl.glUseProgram(self.quad_program) # use a different program\n gl.glActiveTexture(gl.GL_TEXTURE0)\n gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex)\n\n gl.glBindVertexArray(self.vao)\n gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))\n gl.glBindVertexArray(0)\n\n # Some house keepings\n gl.glViewport(0, 0, W, H)\n gl.glScissor(0, 0, W, H)\n\n def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0):\n \"\"\"\n This respects the OpenGL convension of lower left corners\n \"\"\"\n w = w or self.W\n h = h or self.H\n old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING)\n gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0\n gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped\n x, y, x + w, y + h, # the height is flipped\n gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST)\n gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old)" }, { "identifier": "Mesh", "path": "easyvolcap/utils/gl_utils.py", "snippet": "class Mesh:\n class RenderType(Enum):\n POINTS = 1\n LINES = 2\n TRIS = 3\n QUADS = 4 # TODO: Support quad loading\n STRIPS = 5\n\n # Helper class to render a mesh on opengl\n # This implementation should only be used for debug visualization\n # Since no differentiable mechanism will be added\n # We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly\n\n def __init__(self,\n verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update\n faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update\n colors: torch.Tensor = None,\n normals: torch.Tensor = None,\n scalars: dotdict[str, torch.Tensor] = dotdict(),\n render_type: RenderType = RenderType.TRIS,\n\n # Misc info\n name: str = 'mesh',\n filename: str = '',\n visible: bool = True,\n\n # Render options\n shade_flat: bool = False, # smooth shading\n point_radius: float = 0.015,\n render_normal: bool = False,\n\n # Storage options\n store_device: str = 'cpu',\n compute_device: str = 'cuda',\n vert_sizes=[3, 3, 3], # pos + color + norm\n\n # Init options\n est_normal_thresh: int = 100000,\n\n # Ignore unused input\n **kwargs,\n ) -> None:\n super().__init__()\n self.name = name\n self.visible = visible\n self.render_type = render_type\n\n self.shade_flat = shade_flat\n self.point_radius = point_radius\n self.render_normal = render_normal\n\n self.store_device = store_device\n self.compute_device = compute_device\n self.vert_sizes = vert_sizes\n\n self.est_normal_thresh = est_normal_thresh\n\n # Uniform and program\n self.compile_shaders()\n self.uniforms = dotdict() # uniform values\n\n # Before initialization\n self.max_verts = 0\n self.max_faces = 0\n\n # OpenGL data\n if filename: self.load_from_file(filename)\n else: self.load_from_data(verts, faces, colors, normals, scalars)\n\n def compile_shaders(self):\n try:\n self.mesh_program = shaders.compileProgram(\n shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER),\n shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER)\n )\n self.point_program = shaders.compileProgram(\n shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER),\n shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER)\n )\n except Exception as e:\n print(str(e).encode('utf-8').decode('unicode_escape'))\n raise e\n\n @property\n def n_verts_bytes(self):\n return len(self.verts) * self.vert_size * self.verts.element_size()\n\n @property\n def n_faces_bytes(self):\n return len(self.faces) * self.face_size * self.faces.element_size()\n\n @property\n def verts_data(self): # a heavy copy operation\n verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync\n verts = np.asarray(verts, dtype=np.float32, order='C')\n return verts\n\n @property\n def faces_data(self): # a heavy copy operation\n faces = self.faces.ravel().numpy() # N, 3\n faces = np.asarray(faces, dtype=np.uint32, order='C')\n return faces\n\n @property\n def face_size(self):\n return self.render_type.value\n\n @property\n def vert_size(self):\n return sum(self.vert_sizes)\n\n def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'):\n verts, faces, colors, normals, scalars = self.load_data_from_file(filename)\n self.load_from_data(verts, faces, colors, normals, scalars)\n\n def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'):\n self.name = os.path.split(filename)[-1]\n verts, faces, colors, normals, scalars = None, None, None, None, None\n verts, faces = load_mesh(filename, device=self.store_device)\n if not len(faces):\n verts, colors, normals, scalars = load_pts(filename)\n self.render_type = Mesh.RenderType.POINTS\n else:\n self.render_type = Mesh.RenderType(faces.shape[-1]) # use value\n return verts, faces, colors, normals, scalars\n\n def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()):\n # Data type conversion\n verts = torch.as_tensor(verts) # convert to tensor if input is of other types\n if verts.dtype == torch.float32:\n pass # supports this for now\n elif verts.dtype == torch.float16:\n pass # supports this for now\n else:\n verts = verts.type(torch.float) # convert to float32 if input is of higher precision\n gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT\n self.vert_gl_types = [gl_dtype] * len(self.vert_sizes)\n\n # Prepare main mesh data: vertices and faces\n self.verts = torch.as_tensor(verts, device=self.store_device)\n self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support\n\n # Prepare colors and normals\n if colors is not None:\n self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype)\n else:\n bounds = get_bounds(self.verts[None])[0]\n self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0])\n if normals is not None:\n self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype)\n else:\n self.estimate_vertex_normals()\n\n # Prepare other scalars\n if scalars is not None:\n for k, v in scalars.items():\n setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok?\n\n # Prepare OpenGL related buffer\n self.update_gl_buffers()\n\n def estimate_vertex_normals(self):\n def est_pcd_norms():\n if self.verts.dtype == torch.half:\n self.normals = self.verts\n else:\n from pytorch3d.structures import Pointclouds, Meshes\n pcd = Pointclouds([self.verts]).to(self.compute_device)\n self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim\n\n def est_tri_norms():\n if self.verts.dtype == torch.half:\n self.normals = self.verts\n else:\n from pytorch3d.structures import Pointclouds, Meshes\n mesh = Meshes([self.verts], [self.faces]).to(self.compute_device)\n self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim\n\n if not len(self.verts) > self.est_normal_thresh:\n if self.render_type == Mesh.RenderType.TRIS: est_tri_norms()\n elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms()\n else:\n # log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping'))\n self.normals = self.verts\n else:\n # log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation'))\n self.normals = self.verts\n\n def offscreen_render(self, eglctx: \"eglContextManager\", camera: Camera):\n eglctx.resize(camera.W, camera.H)\n self.render(camera)\n\n def render(self, camera: Camera):\n if not self.visible: return\n\n # For point rendering\n if self.render_type == Mesh.RenderType.POINTS:\n gl.glUseProgram(self.point_program)\n self.use_gl_program(self.point_program)\n else:\n gl.glUseProgram(self.mesh_program)\n self.use_gl_program(self.mesh_program)\n\n self.upload_gl_uniforms(camera)\n gl.glBindVertexArray(self.vao)\n\n if self.render_type == Mesh.RenderType.POINTS:\n gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices\n elif self.render_type == Mesh.RenderType.LINES:\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices\n elif self.render_type == Mesh.RenderType.TRIS:\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices\n elif self.render_type == Mesh.RenderType.QUADS:\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices\n elif self.render_type == Mesh.RenderType.STRIPS:\n gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts))\n else:\n raise NotImplementedError\n\n gl.glBindVertexArray(0)\n\n def use_gl_program(self, program: shaders.ShaderProgram):\n use_gl_program(program)\n self.uniforms.shade_flat = gl.glGetUniformLocation(program, \"shade_flat\")\n self.uniforms.point_radius = gl.glGetUniformLocation(program, \"point_radius\")\n self.uniforms.render_normal = gl.glGetUniformLocation(program, \"render_normal\")\n self.uniforms.H = gl.glGetUniformLocation(program, \"H\")\n self.uniforms.W = gl.glGetUniformLocation(program, \"W\")\n self.uniforms.n = gl.glGetUniformLocation(program, \"n\")\n self.uniforms.f = gl.glGetUniformLocation(program, \"f\")\n self.uniforms.P = gl.glGetUniformLocation(program, \"P\")\n self.uniforms.K = gl.glGetUniformLocation(program, \"K\")\n self.uniforms.V = gl.glGetUniformLocation(program, \"V\")\n self.uniforms.M = gl.glGetUniformLocation(program, \"M\")\n\n def upload_gl_uniforms(self, camera: Camera):\n K = camera.gl_ixt # hold the reference\n V = camera.gl_ext # hold the reference\n M = glm.identity(mat4)\n P = K * V * M\n\n gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat)\n gl.glUniform1f(self.uniforms.point_radius, self.point_radius)\n gl.glUniform1i(self.uniforms.render_normal, self.render_normal)\n gl.glUniform1i(self.uniforms.H, camera.H) # o2w\n gl.glUniform1i(self.uniforms.W, camera.W) # o2w\n gl.glUniform1f(self.uniforms.n, camera.n) # o2w\n gl.glUniform1f(self.uniforms.f, camera.f) # o2w\n gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip\n gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip\n gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c\n gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w\n\n def update_gl_buffers(self):\n # Might be overwritten\n self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0,\n len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated\n\n if hasattr(self, 'verts'):\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference\n if hasattr(self, 'faces'):\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data)\n\n def resize_buffers(self, v: int = 0, f: int = 0):\n if v > self.max_verts or f > self.max_faces:\n if v > self.max_verts: self.max_verts = v\n if f > self.max_faces: self.max_faces = f\n self.init_gl_buffers(v, f)\n\n def init_gl_buffers(self, v: int = 0, f: int = 0):\n # This will only init the corresponding buffer object\n n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes\n n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes\n\n # Housekeeping\n if hasattr(self, 'vao'):\n gl.glDeleteVertexArrays(1, [self.vao])\n gl.glDeleteBuffers(2, [self.vbo, self.ebo])\n\n self.vao = gl.glGenVertexArrays(1)\n self.vbo = gl.glGenBuffers(1)\n self.ebo = gl.glGenBuffers(1)\n\n gl.glBindVertexArray(self.vao)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work\n\n # https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao\n cumsum = 0\n for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)):\n gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float\n gl.glEnableVertexAttribArray(i)\n cumsum += s\n\n if n_faces_bytes > 0:\n # Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used\n gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo)\n gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW)\n gl.glBindVertexArray(0)\n\n def render_imgui(self):\n pass" }, { "identifier": "Camera", "path": "easyvolcap/utils/viewer_utils.py", "snippet": "class Camera:\n # Helper class to manage camera parameters\n def __init__(self,\n H: int = 512,\n W: int = 512,\n K: torch.Tensor = torch.tensor([[512.0, 0.0, 256], [0.0, 512.0, 256.0], [0.0, 0.0, 1.0]]), # intrinsics\n R: torch.Tensor = torch.tensor([[-1.0, 0.0, 0.0,], [0.0, 0.0, -1.0,], [0.0, -1.0, 0.0,]]), # extrinsics\n T: torch.Tensor = torch.tensor([[0.0], [0.0], [-3.0],]), # extrinsics\n n: float = 0.002, # bounds limit\n f: float = 100, # bounds limit\n t: float = 0.0, # temporal dimension (implemented as a float instead of int)\n v: float = 0.0, # view dimension (implemented as a float instead of int)\n bounds: torch.Tensor = torch.tensor([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]]), # bounding box\n\n # camera update hyperparameters\n origin: torch.Tensor = torch.tensor([0.0, 0.0, 0.0]),\n world_up: torch.Tensor = torch.tensor([0.0, 0.0, 1.0]),\n movement_speed: float = 1.0, # gui movement speed\n\n batch: dotdict = None, # will ignore all other inputs\n string: str = None, # will ignore all other inputs\n **kwargs,\n ) -> None:\n\n # Batch (network input parameters)\n if string is None:\n if batch is None:\n batch = dotdict()\n batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds = H, W, K, R, T, n, f, t, v, bounds\n self.from_batch(batch)\n \n # Other configurables\n self.origin = vec3(*origin)\n self.world_up = vec3(*world_up)\n self.movement_speed = movement_speed\n # self.front = self.front # will trigger an update\n else:\n self.from_string(string)\n\n # Internal states to facilitate camera position change\n self.is_dragging = False # rotation\n self.about_origin = False # about origin rotation\n self.is_panning = False # translation\n self.lock_fx_fy = True\n\n @property\n def w2p(self):\n ixt = mat4(self.ixt)\n ixt[3, 3] = 0\n ixt[2, 3] = 1\n return ixt @ self.ext # w2c -> c2p = w2p\n\n @property\n def V(self): return self.c2w\n\n @property\n def ixt(self): return self.K\n\n @property\n def gl_ext(self):\n gl_c2w = self.c2w\n gl_c2w[0] *= 1 # flip x\n gl_c2w[1] *= -1 # flip y\n gl_c2w[2] *= -1 # flip z\n gl_ext = glm.affineInverse(gl_c2w)\n return gl_ext # use original opencv ext since we've taken care of the intrinsics in gl_ixt\n\n @property\n def gl_ixt(self):\n # Construct opengl camera matrix with projection & clipping\n # https://fruty.io/2019/08/29/augmented-reality-with-opencv-and-opengl-the-tricky-projection-matrix/\n # https://gist.github.com/davegreenwood/3a32d779f81f08dce32f3bb423672191\n # fmt: off\n gl_ixt = mat4(\n 2 * self.fx / self.W, 0, 0, 0,\n 2 * self.s / self.W, 2 * self.fy / self.H, 0, 0,\n 1 - 2 * (self.cx / self.W), 2 * (self.cy / self.H) - 1, (self.f + self.n) / (self.n - self.f), -1,\n 0, 0, 2 * self.f * self.n / (self.n - self.f), 0,\n )\n # fmt: on\n\n return gl_ixt\n\n @property\n def ext(self): return self.w2c\n\n @property\n def w2c(self):\n w2c = mat4(self.R)\n w2c[3] = vec4(*self.T, 1.0)\n return w2c\n\n @property\n def c2w(self):\n return glm.affineInverse(self.w2c)\n\n @property\n def right(self) -> vec3: return vec3(self.R[0, 0], self.R[1, 0], self.R[2, 0]) # c2w R, 0 -> 3,\n\n @property\n def down(self) -> vec3: return vec3(self.R[0, 1], self.R[1, 1], self.R[2, 1]) # c2w R, 1 -> 3,\n\n @property\n def front(self) -> vec3: return vec3(self.R[0, 2], self.R[1, 2], self.R[2, 2]) # c2w R, 2 -> 3,\n\n @front.setter\n def front(self, v: vec3):\n front = v # the last row of R\n self.R[0, 2], self.R[1, 2], self.R[2, 2] = front.x, front.y, front.z\n right = glm.normalize(glm.cross(self.front, self.world_up)) # right\n self.R[0, 0], self.R[1, 0], self.R[2, 0] = right.x, right.y, right.z\n down = glm.cross(self.front, self.right) # down\n self.R[0, 1], self.R[1, 1], self.R[2, 1] = down.x, down.y, down.z\n\n @property\n def center(self): return -glm.transpose(self.R) @ self.T # 3,\n\n @center.setter\n def center(self, v: vec3):\n self.T = -self.R @ v # 3, 1\n\n @property\n def s(self): return self.K[1, 0]\n\n @s.setter\n def s(self, s): self.K[1, 0] = s\n\n @property\n def fx(self): return self.K[0, 0]\n\n @fx.setter\n def fx(self, v: float):\n v = min(v, 1e5)\n v = max(v, 1e-3)\n if self.lock_fx_fy:\n self.K[1, 1] = v / self.K[0, 0] * self.K[1, 1]\n self.K[0, 0] = v\n\n @property\n def fy(self): return self.K[1, 1]\n\n @fy.setter\n def fy(self, v: float):\n if self.lock_fx_fy:\n self.K[0, 0] = v / self.K[1, 1] * self.K[0, 0]\n self.K[1, 1] = v\n\n @property\n def cx(self): return self.K[2, 0]\n\n @cx.setter\n def cx(self, v: float):\n self.K[2, 0] = v\n\n @property\n def cy(self): return self.K[2, 1]\n\n @cy.setter\n def cy(self, v: float):\n self.K[2, 1] = v\n\n def begin_dragging(self,\n x: float, y: float,\n is_panning: bool,\n about_origin: bool,\n ):\n self.is_dragging = True\n self.is_panning = is_panning\n self.about_origin = about_origin\n self.drag_start = vec2([x, y])\n\n # Record internal states # ? Will this make a copy?\n self.drag_start_front = self.front # a recording\n self.drag_start_down = self.down\n self.drag_start_right = self.right\n self.drag_start_center = self.center\n self.drag_start_origin = self.origin\n self.drag_start_world_up = self.world_up\n\n # Need to find the max or min delta y to align with world_up\n dot = glm.dot(self.world_up, self.drag_start_front)\n self.drag_ymin = -np.arccos(-dot) + 0.01 # drag up, look down\n self.drag_ymax = np.pi + self.drag_ymin - 0.02 # remove the 0.01 of drag_ymin\n\n def end_dragging(self):\n self.is_dragging = False\n\n def update_dragging(self, x: float, y: float):\n if not self.is_dragging:\n return\n\n current = vec2(x, y)\n delta = current - self.drag_start\n delta /= max(self.H, self.W)\n delta *= -1\n\n if self.is_panning:\n delta *= self.movement_speed\n center_delta = delta[0] * self.drag_start_right + delta[1] * self.drag_start_down\n self.center = self.drag_start_center + center_delta\n if self.about_origin:\n self.origin = self.drag_start_origin + center_delta\n else:\n m = mat4(1.0)\n m = glm.rotate(m, delta.x % 2 * np.pi, self.world_up)\n m = glm.rotate(m, np.clip(delta.y, self.drag_ymin, self.drag_ymax), self.drag_start_right)\n self.front = m @ self.drag_start_front # might overshoot\n\n if self.about_origin:\n self.center = -m @ (self.origin - self.drag_start_center) + self.origin\n\n def move(self, x_offset: float, y_offset: float):\n speed_factor = 1e-1\n movement = y_offset * speed_factor\n movement = movement * self.front * self.movement_speed\n self.center += movement\n\n if self.is_dragging:\n self.drag_start_center += movement\n\n def to_batch(self):\n meta = dotdict()\n meta.H = torch.as_tensor(self.H)\n meta.W = torch.as_tensor(self.W)\n meta.K = torch.as_tensor(self.K.to_list()).mT\n meta.R = torch.as_tensor(self.R.to_list()).mT\n meta.T = torch.as_tensor(self.T.to_list())[..., None]\n meta.n = torch.as_tensor(self.n)\n meta.f = torch.as_tensor(self.f)\n meta.t = torch.as_tensor(self.t)\n meta.v = torch.as_tensor(self.v)\n meta.bounds = torch.as_tensor(self.bounds.to_list()) # no transpose for bounds\n\n # GUI related elements\n meta.movement_speed = torch.as_tensor(self.movement_speed)\n meta.origin = torch.as_tensor(self.origin.to_list())\n meta.world_up = torch.as_tensor(self.world_up.to_list())\n\n batch = dotdict()\n batch.update(meta)\n batch.meta.update(meta)\n return batch\n\n def to_easymocap(self):\n batch = self.to_batch()\n camera = to_numpy(batch)\n return camera\n\n def from_easymocap(self, camera: dict):\n batch = to_tensor(camera)\n self.from_batch(batch)\n return self\n\n def to_string(self) -> str:\n batch = to_list(self.to_batch().meta)\n return json.dumps(batch)\n\n def from_string(self, string: str):\n batch = to_tensor(dotdict(json.loads(string)), ignore_list=True)\n self.from_batch(batch)\n\n def from_batch(self, batch: dotdict):\n H, W, K, R, T, n, f, t, v, bounds = batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds\n\n # Batch (network input parameters)\n self.H = int(H)\n self.W = int(W)\n self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel()) # 3,\n self.n = float(n)\n self.f = float(f)\n self.t = float(t)\n self.v = float(v)\n self.bounds = mat2x3(*bounds.ravel()) # 2, 3\n\n if 'movement_speed' in batch: self.movement_speed = float(batch.movement_speed)\n if 'origin' in batch: self.origin = vec3(*batch.origin.ravel()) # 3,\n if 'world_up' in batch: self.world_up = vec3(*batch.world_up.ravel()) # 3,\n return self\n\n def custom_pose(self, R: torch.Tensor, T: torch.Tensor, K: torch.Tensor):\n # self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel())" }, { "identifier": "save_image", "path": "easyvolcap/utils/data_utils.py", "snippet": "def save_image(img_path: str, img: np.ndarray, jpeg_quality=75, png_compression=9, save_dtype=np.uint8):\n if isinstance(img, torch.Tensor): img = img.detach().cpu().numpy()\n if img.ndim == 4: img = np.concatenate(img, axis=0)\n if img.shape[0] < img.shape[-1] and (img.shape[0] == 3 or img.shape[0] == 4): img = np.transpose(img, (1, 2, 0))\n if np.issubdtype(img.dtype, np.integer):\n img = img / np.iinfo(img.dtype).max # to float\n if img.shape[-1] >= 3:\n if not img.flags['WRITEABLE']:\n img = img.copy() # avoid assignment only inputs\n img[..., :3] = img[..., [2, 1, 0]]\n if os.path.dirname(img_path):\n os.makedirs(os.path.dirname(img_path), exist_ok=True)\n if img_path.endswith('.png'):\n max = np.iinfo(save_dtype).max\n img = (img * max).clip(0, max).astype(save_dtype)\n elif img_path.endswith('.jpg'):\n img = img[..., :3] # only color\n img = (img * 255).clip(0, 255).astype(np.uint8)\n elif img_path.endswith('.hdr'):\n img = img[..., :3] # only color\n elif img_path.endswith('.exr'):\n # ... https://github.com/opencv/opencv/issues/21326\n os.environ[\"OPENCV_IO_ENABLE_OPENEXR\"] = \"1\"\n else:\n # should we try to discard alpha channel here?\n # exr could store alpha channel\n pass # no transformation for other unspecified file formats\n return cv2.imwrite(img_path, img, [cv2.IMWRITE_JPEG_QUALITY, jpeg_quality, cv2.IMWRITE_PNG_COMPRESSION, png_compression])" }, { "identifier": "common_opengl_options", "path": "easyvolcap/utils/gl_utils.py", "snippet": "def common_opengl_options():\n # Use program point size\n gl.glEnable(gl.GL_PROGRAM_POINT_SIZE)\n\n # Performs face culling\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glCullFace(gl.GL_BACK)\n\n # Performs alpha trans testing\n gl.glEnable(gl.GL_ALPHA_TEST)\n\n # Performs z-buffer testing\n gl.glEnable(gl.GL_DEPTH_TEST)\n # gl.glDepthMask(gl.GL_TRUE)\n gl.glDepthFunc(gl.GL_LEQUAL)\n # gl.glDepthRange(-1.0, 1.0)\n gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)\n\n # Enable some masking tests\n gl.glEnable(gl.GL_SCISSOR_TEST)\n\n # Enable this to correctly render points\n # https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310\n gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW\n # gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW\n\n # # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory.\n # # The second argument specifies that our pixels will be in bytes.\n # gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1)" }, { "identifier": "linearize_depth", "path": "easyvolcap/utils/gl_utils.py", "snippet": "def linearize_depth(d, n: float, f: float):\n # 0-1 -> -1,1\n # ndc -> view\n return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n))" }, { "identifier": "my_tests", "path": "easyvolcap/utils/test_utils.py", "snippet": "@catch_throw\ndef my_tests(globals: dict = globals(), prefix: str = 'test'):\n # extract testing functions\n tests = {name: func for name, func in globals.items() if name.startswith(prefix)}\n # run tests\n pbar = tqdm(total=len(tests))\n for name, func in tests.items():\n pbar.desc = name\n pbar.refresh()\n\n func()\n log(f'{name}: {green(\"OK\")}')\n\n pbar.update(n=1)\n pbar.refresh()" } ]
from easyvolcap.utils.egl_utils import eglContextManager # must be imported before OpenGL.GL from os.path import join, dirname from easyvolcap.utils.console_utils import * from easyvolcap.utils.gl_utils import Quad, Mesh from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.data_utils import save_image from easyvolcap.utils.gl_utils import common_opengl_options, linearize_depth from easyvolcap.utils.test_utils import my_tests import OpenGL.GL as gl import os import cv2 import torch import numpy as np
12,166
from __future__ import absolute_import, division, print_function # fmt: off # fmt: on WIDTH, HEIGHT = 512, 512 eglctx = eglContextManager(HEIGHT, WIDTH) # will create a new context common_opengl_options() # common init def test_gl_context(): # Render triangle gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) gl.glBegin(gl.GL_TRIANGLES) gl.glColor3f(1, 0, 0) gl.glVertex2f(0, 1) gl.glColor3f(0, 1, 0) gl.glVertex2f(-1, -1) gl.glColor3f(0, 0, 1) gl.glVertex2f(1, -1) gl.glEnd() # Read result img_buf = gl.glReadPixels(0, 0, WIDTH, HEIGHT, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE) img = np.frombuffer(img_buf, np.uint8).reshape(HEIGHT, WIDTH, 4)[::-1] assert all(img[0, 0, :3] == 0) # black corner assert all(img[0, -1, :3] == 0) # black corner assert img[10, WIDTH // 2, :3].argmax() == 0 # red corner assert img[-1, 10, :3].argmax() == 1 # green corner assert img[-1, -10, :3].argmax() == 2 # blue corner def test_gl_mesh_rast(): gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) mesh_path = 'assets/meshes/bunny.ply' img_path = 'test_gl_mesh_rast.png' camera = Camera(H=HEIGHT, W=WIDTH, K=torch.tensor([[592., 0., 256.], [0., 592., 256.], [0., 0., 1.]]), R=torch.tensor([[0.9908, -0.1353, 0.0000], [-0.1341, -0.9815, -0.1365], [0.0185, 0.1353, -0.9906]]), T=torch.tensor([[0.0178], [0.0953], [0.3137]]) )
from __future__ import absolute_import, division, print_function # fmt: off # fmt: on WIDTH, HEIGHT = 512, 512 eglctx = eglContextManager(HEIGHT, WIDTH) # will create a new context common_opengl_options() # common init def test_gl_context(): # Render triangle gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) gl.glBegin(gl.GL_TRIANGLES) gl.glColor3f(1, 0, 0) gl.glVertex2f(0, 1) gl.glColor3f(0, 1, 0) gl.glVertex2f(-1, -1) gl.glColor3f(0, 0, 1) gl.glVertex2f(1, -1) gl.glEnd() # Read result img_buf = gl.glReadPixels(0, 0, WIDTH, HEIGHT, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE) img = np.frombuffer(img_buf, np.uint8).reshape(HEIGHT, WIDTH, 4)[::-1] assert all(img[0, 0, :3] == 0) # black corner assert all(img[0, -1, :3] == 0) # black corner assert img[10, WIDTH // 2, :3].argmax() == 0 # red corner assert img[-1, 10, :3].argmax() == 1 # green corner assert img[-1, -10, :3].argmax() == 2 # blue corner def test_gl_mesh_rast(): gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) mesh_path = 'assets/meshes/bunny.ply' img_path = 'test_gl_mesh_rast.png' camera = Camera(H=HEIGHT, W=WIDTH, K=torch.tensor([[592., 0., 256.], [0., 592., 256.], [0., 0., 1.]]), R=torch.tensor([[0.9908, -0.1353, 0.0000], [-0.1341, -0.9815, -0.1365], [0.0185, 0.1353, -0.9906]]), T=torch.tensor([[0.0178], [0.0953], [0.3137]]) )
mesh = Mesh(filename=mesh_path, shade_flat=False)
2
2023-10-17 04:48:46+00:00
16k
0xbitches/sd-webui-lcm
scripts/main.py
[ { "identifier": "LCMScheduler", "path": "lcm/lcm_scheduler.py", "snippet": "class LCMScheduler(SchedulerMixin, ConfigMixin):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n\n This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic\n methods the library implements for all schedulers such as loading and saving.\n\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n beta_schedule (`str`, defaults to `\"linear\"`):\n The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from\n `linear`, `scaled_linear`, or `squaredcos_cap_v2`.\n trained_betas (`np.ndarray`, *optional*):\n Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.\n clip_sample (`bool`, defaults to `True`):\n Clip the predicted sample for numerical stability.\n clip_sample_range (`float`, defaults to 1.0):\n The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.\n set_alpha_to_one (`bool`, defaults to `True`):\n Each diffusion step uses the alphas product value at that step and at the previous one. For the final step\n there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,\n otherwise it uses the alpha value at step 0.\n steps_offset (`int`, defaults to 0):\n An offset added to the inference steps. You can use a combination of `offset=1` and\n `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable\n Diffusion.\n prediction_type (`str`, defaults to `epsilon`, *optional*):\n Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),\n `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen\n Video](https://imagen.research.google/video/paper.pdf) paper).\n thresholding (`bool`, defaults to `False`):\n Whether to use the \"dynamic thresholding\" method. This is unsuitable for latent-space diffusion models such\n as Stable Diffusion.\n dynamic_thresholding_ratio (`float`, defaults to 0.995):\n The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.\n sample_max_value (`float`, defaults to 1.0):\n The threshold value for dynamic thresholding. Valid only when `thresholding=True`.\n timestep_spacing (`str`, defaults to `\"leading\"`):\n The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and\n Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.\n rescale_betas_zero_snr (`bool`, defaults to `False`):\n Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and\n dark samples instead of limiting it to samples with medium brightness. Loosely related to\n [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).\n \"\"\"\n\n # _compatibles = [e.name for e in KarrasDiffusionSchedulers]\n order = 1\n\n @register_to_config\n def __init__(\n self,\n num_train_timesteps: int = 1000,\n beta_start: float = 0.0001,\n beta_end: float = 0.02,\n beta_schedule: str = \"linear\",\n trained_betas: Optional[Union[np.ndarray, List[float]]] = None,\n clip_sample: bool = True,\n set_alpha_to_one: bool = True,\n steps_offset: int = 0,\n prediction_type: str = \"epsilon\",\n thresholding: bool = False,\n dynamic_thresholding_ratio: float = 0.995,\n clip_sample_range: float = 1.0,\n sample_max_value: float = 1.0,\n timestep_spacing: str = \"leading\",\n rescale_betas_zero_snr: bool = False,\n ):\n if trained_betas is not None:\n self.betas = torch.tensor(trained_betas, dtype=torch.float32)\n elif beta_schedule == \"linear\":\n self.betas = torch.linspace(\n beta_start, beta_end, num_train_timesteps, dtype=torch.float32)\n elif beta_schedule == \"scaled_linear\":\n # this schedule is very specific to the latent diffusion model.\n self.betas = (\n torch.linspace(beta_start**0.5, beta_end**0.5,\n num_train_timesteps, dtype=torch.float32) ** 2\n )\n elif beta_schedule == \"squaredcos_cap_v2\":\n # Glide cosine schedule\n self.betas = betas_for_alpha_bar(num_train_timesteps)\n else:\n raise NotImplementedError(\n f\"{beta_schedule} does is not implemented for {self.__class__}\")\n\n # Rescale for zero SNR\n if rescale_betas_zero_snr:\n self.betas = rescale_zero_terminal_snr(self.betas)\n\n self.alphas = 1.0 - self.betas\n self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)\n\n # At every step in ddim, we are looking into the previous alphas_cumprod\n # For the final step, there is no previous alphas_cumprod because we are already at 0\n # `set_alpha_to_one` decides whether we set this parameter simply to one or\n # whether we use the final alpha of the \"non-previous\" one.\n self.final_alpha_cumprod = torch.tensor(\n 1.0) if set_alpha_to_one else self.alphas_cumprod[0]\n\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n\n # setable values\n self.num_inference_steps = None\n self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[\n ::-1].copy().astype(np.int64))\n\n def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:\n \"\"\"\n Ensures interchangeability with schedulers that need to scale the denoising model input depending on the\n current timestep.\n\n Args:\n sample (`torch.FloatTensor`):\n The input sample.\n timestep (`int`, *optional*):\n The current timestep in the diffusion chain.\n\n Returns:\n `torch.FloatTensor`:\n A scaled input sample.\n \"\"\"\n return sample\n\n def _get_variance(self, timestep, prev_timestep):\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n variance = (beta_prod_t_prev / beta_prod_t) * \\\n (1 - alpha_prod_t / alpha_prod_t_prev)\n\n return variance\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample\n def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n \"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the\n prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by\n s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing\n pixels from saturation at each step. We find that dynamic thresholding results in significantly better\n photorealism as well as better image-text alignment, especially when using very large guidance weights.\"\n\n https://arxiv.org/abs/2205.11487\n \"\"\"\n dtype = sample.dtype\n batch_size, channels, height, width = sample.shape\n\n if dtype not in (torch.float32, torch.float64):\n # upcast for quantile calculation, and clamp not implemented for cpu half\n sample = sample.float()\n\n # Flatten sample for doing quantile calculation along each image\n sample = sample.reshape(batch_size, channels * height * width)\n\n abs_sample = sample.abs() # \"a certain percentile absolute pixel value\"\n\n s = torch.quantile(\n abs_sample, self.config.dynamic_thresholding_ratio, dim=1)\n s = torch.clamp(\n s, min=1, max=self.config.sample_max_value\n ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]\n\n # (batch_size, 1) because clamp will broadcast along dim=0\n s = s.unsqueeze(1)\n # \"we threshold xt0 to the range [-s, s] and then divide by s\"\n sample = torch.clamp(sample, -s, s) / s\n\n sample = sample.reshape(batch_size, channels, height, width)\n sample = sample.to(dtype)\n\n return sample\n\n def set_timesteps(self, num_inference_steps: int, original_inference_steps: int, device: Union[str, torch.device] = None):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n \"\"\"\n\n if num_inference_steps > self.config.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:\"\n f\" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.config.num_train_timesteps} timesteps.\"\n )\n\n self.num_inference_steps = num_inference_steps\n\n # LCM Timesteps Setting: # Linear Spacing\n c = self.config.num_train_timesteps // original_inference_steps\n lcm_origin_timesteps = np.asarray(\n list(range(1, original_inference_steps + 1))) * c - 1 # LCM Training Steps Schedule\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n # LCM Inference Steps Schedule\n timesteps = lcm_origin_timesteps[::-\n skipping_step][:num_inference_steps]\n\n self.timesteps = torch.from_numpy(timesteps.copy()).to(device)\n\n def get_scalings_for_boundary_condition_discrete(self, t):\n self.sigma_data = 0.5 # Default: 0.5\n\n # By dividing 0.1: This is almost a delta function at t=0.\n c_skip = self.sigma_data**2 / (\n (t / 0.1) ** 2 + self.sigma_data**2\n )\n c_out = ((t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5)\n return c_skip, c_out\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timeindex: int,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n variance_noise: Optional[torch.FloatTensor] = None,\n return_dict: bool = True,\n ) -> Union[LCMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n model_output (`torch.FloatTensor`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n A current instance of a sample created by the diffusion process.\n eta (`float`):\n The weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`, defaults to `False`):\n If `True`, computes \"corrected\" `model_output` from the clipped predicted original sample. Necessary\n because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no\n clipping has happened, \"corrected\" `model_output` would coincide with the one provided as input and\n `use_clipped_model_output` has no effect.\n generator (`torch.Generator`, *optional*):\n A random number generator.\n variance_noise (`torch.FloatTensor`):\n Alternative to generating noise with `generator` by directly providing the noise for the variance\n itself. Useful for methods such as [`CycleDiffusion`].\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.\n\n Returns:\n [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:\n If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a\n tuple is returned where the first element is the sample tensor.\n\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # 1. get previous step value\n prev_timeindex = timeindex + 1\n if prev_timeindex < len(self.timesteps):\n prev_timestep = self.timesteps[prev_timeindex]\n else:\n prev_timestep = timestep\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n # 3. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(\n timestep)\n\n # 4. Different Parameterization:\n parameterization = self.config.prediction_type\n\n if parameterization == \"epsilon\": # noise-prediction\n pred_x0 = (sample - beta_prod_t.sqrt() *\n model_output) / alpha_prod_t.sqrt()\n\n elif parameterization == \"sample\": # x-prediction\n pred_x0 = model_output\n\n elif parameterization == \"v_prediction\": # v-prediction\n pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output\n\n # 4. Denoise model output using boundary conditions\n denoised = c_out * pred_x0 + c_skip * sample\n\n # 5. Sample z ~ N(0, I), For MultiStep Inference\n # Noise is not used for one-step sampling.\n if len(self.timesteps) > 1:\n noise = torch.randn(model_output.shape).to(model_output.device)\n prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise\n else:\n prev_sample = denoised\n\n if not return_dict:\n return (prev_sample, denoised)\n\n return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise\n\n def add_noise(\n self,\n original_samples: torch.FloatTensor,\n noise: torch.FloatTensor,\n timesteps: torch.IntTensor,\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as original_samples\n alphas_cumprod = self.alphas_cumprod.to(\n device=original_samples.device, dtype=original_samples.dtype)\n timesteps = timesteps.to(original_samples.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(original_samples.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n noisy_samples = sqrt_alpha_prod * original_samples + \\\n sqrt_one_minus_alpha_prod * noise\n return noisy_samples\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity\n def get_velocity(\n self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as sample\n alphas_cumprod = self.alphas_cumprod.to(\n device=sample.device, dtype=sample.dtype)\n timesteps = timesteps.to(sample.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(sample.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample\n return velocity\n\n def __len__(self):\n return self.config.num_train_timesteps" }, { "identifier": "LatentConsistencyModelPipeline", "path": "lcm/lcm_pipeline.py", "snippet": "class LatentConsistencyModelPipeline(DiffusionPipeline):\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: None,\n safety_checker: None,\n feature_extractor: CLIPImageProcessor\n ):\n super().__init__()\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (\n len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(\n vae_scale_factor=self.vae_scale_factor)\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds: None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n \"\"\"\n\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(\n prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1: -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n if self.text_encoder is not None:\n prompt_embeds_dtype = self.text_encoder.dtype\n elif self.unet is not None:\n prompt_embeds_dtype = self.unet.dtype\n else:\n prompt_embeds_dtype = prompt_embeds.dtype\n\n prompt_embeds = prompt_embeds.to(\n dtype=prompt_embeds_dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(\n bs_embed * num_images_per_prompt, seq_len, -1)\n\n # Don't need to get uncond prompt embedding because of LCM Guided Distillation\n return prompt_embeds\n\n # ¯\\_(ツ)_/¯\n def run_safety_checker(self, image, device, dtype):\n return image, None\n \n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):\n shape = (batch_size, num_channels_latents, height //\n self.vae_scale_factor, width // self.vae_scale_factor)\n if latents is None:\n latents = torch.randn(shape, dtype=dtype).to(device)\n else:\n latents = latents.to(device)\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):\n \"\"\"\n see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298\n Args:\n timesteps: torch.Tensor: generate embedding vectors at these timesteps\n embedding_dim: int: dimension of the embeddings to generate\n dtype: data type of the generated embeddings\n\n Returns:\n embedding vectors with shape `(len(timesteps), embedding_dim)`\n \"\"\"\n assert len(w.shape) == 1\n w = w * 1000.\n\n half_dim = embedding_dim // 2\n emb = torch.log(torch.tensor(10000.)) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)\n emb = w.to(dtype)[:, None] * emb[None, :]\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = torch.nn.functional.pad(emb, (0, 1))\n assert emb.shape == (w.shape[0], embedding_dim)\n return emb\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = 768,\n width: Optional[int] = 768,\n guidance_scale: float = 7.5,\n num_images_per_prompt: Optional[int] = 1,\n latents: Optional[torch.FloatTensor] = None,\n num_inference_steps: int = 4,\n original_inference_steps: int = 50,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n device: Optional[Union[str, torch.device]] = None,\n ):\n\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)\n\n # 3. Encode input prompt\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds=prompt_embeds,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, original_inference_steps)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variable\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n latents,\n )\n bs = batch_size * num_images_per_prompt\n\n # 6. Get Guidance Scale Embedding\n w = torch.tensor(guidance_scale).repeat(bs)\n w_embedding = self.get_w_embedding(w, embedding_dim=256).to(\n device=device, dtype=latents.dtype)\n\n # 7. LCM MultiStep Sampling Loop:\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n\n ts = torch.full((bs,), t, device=device, dtype=torch.long)\n latents = latents.to(prompt_embeds.dtype)\n\n # model prediction (v-prediction, eps, x)\n model_pred = self.unet(\n latents,\n ts,\n timestep_cond=w_embedding,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False)[0]\n\n # compute the previous noisy sample x_t -> x_t-1\n latents, denoised = self.scheduler.step(\n model_pred, i, t, latents, return_dict=False)\n\n # # call the callback, if provided\n # if i == len(timesteps) - 1:\n progress_bar.update()\n\n denoised = denoised.to(prompt_embeds.dtype)\n if not output_type == \"latent\":\n image = self.vae.decode(\n denoised / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(\n image, device, prompt_embeds.dtype)\n else:\n image = denoised\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(\n image, output_type=output_type, do_denormalize=do_denormalize)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "LatentConsistencyModelImg2ImgPipeline", "path": "lcm/lcm_i2i_pipeline.py", "snippet": "class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):\n _optional_components = [\"scheduler\"]\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: \"LCMSchedulerWithTimestamp\",\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = False,\n ):\n super().__init__()\n\n scheduler = (\n scheduler\n if scheduler is not None\n else LCMSchedulerWithTimestamp(\n beta_start=0.00085, beta_end=0.0120, beta_schedule=\"scaled_linear\", prediction_type=\"epsilon\"\n )\n )\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds: None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n \"\"\"\n\n if prompt is not None and isinstance(prompt, str):\n pass\n elif prompt is not None and isinstance(prompt, list):\n len(prompt)\n else:\n prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n if self.text_encoder is not None:\n prompt_embeds_dtype = self.text_encoder.dtype\n elif self.unet is not None:\n prompt_embeds_dtype = self.unet.dtype\n else:\n prompt_embeds_dtype = prompt_embeds.dtype\n\n prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # Don't need to get uncond prompt embedding because of LCM Guided Distillation\n return prompt_embeds\n\n # ¯\\_(ツ)_/¯\n def run_safety_checker(self, image, device, dtype):\n return image, None\n\n def prepare_latents(self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, latents=None, generator=None):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n image = image.to(device=device, dtype=dtype)\n\n # batch_size = batch_size * num_images_per_prompt\n\n if image.shape[1] == 4:\n init_latents = image\n\n else:\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n elif isinstance(generator, list):\n init_latents = [\n self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)\n ]\n init_latents = torch.cat(init_latents, dim=0)\n else:\n init_latents = self.vae.encode(image).latent_dist.sample(generator)\n\n init_latents = self.vae.config.scaling_factor * init_latents\n\n if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:\n # expand init_latents for batch_size\n deprecation_message = (\n f\"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial\"\n \" images (`image`). Initial images are now duplicating to match the number of text prompts. Note\"\n \" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update\"\n \" your script to pass as many initial images as text prompts to suppress this warning.\"\n )\n # deprecate(\"len(prompt) != len(image)\", \"1.0.0\", deprecation_message, standard_warn=False)\n additional_image_per_prompt = batch_size // init_latents.shape[0]\n init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)\n elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:\n raise ValueError(\n f\"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.\"\n )\n else:\n init_latents = torch.cat([init_latents], dim=0)\n\n shape = init_latents.shape\n noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n\n # get latents\n init_latents = self.scheduler.add_noise(init_latents, noise, timestep)\n latents = init_latents\n\n return latents\n\n if latents is None:\n latents = torch.randn(shape, dtype=dtype).to(device)\n else:\n latents = latents.to(device)\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):\n \"\"\"\n see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298\n Args:\n timesteps: torch.Tensor: generate embedding vectors at these timesteps\n embedding_dim: int: dimension of the embeddings to generate\n dtype: data type of the generated embeddings\n Returns:\n embedding vectors with shape `(len(timesteps), embedding_dim)`\n \"\"\"\n assert len(w.shape) == 1\n w = w * 1000.0\n\n half_dim = embedding_dim // 2\n emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)\n emb = w.to(dtype)[:, None] * emb[None, :]\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = torch.nn.functional.pad(emb, (0, 1))\n assert emb.shape == (w.shape[0], embedding_dim)\n return emb\n\n def get_timesteps(self, num_inference_steps, strength, device):\n # get the original timestep using init_timestep\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n\n t_start = max(num_inference_steps - init_timestep, 0)\n timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]\n\n return timesteps, num_inference_steps - t_start\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: PipelineImageInput = None,\n strength: float = 0.8,\n height: Optional[int] = 768,\n width: Optional[int] = 768,\n guidance_scale: float = 7.5,\n num_images_per_prompt: Optional[int] = 1,\n latents: Optional[torch.FloatTensor] = None,\n num_inference_steps: int = 4,\n original_inference_steps: int = 50,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n device: Optional[Union[str, torch.device]] = None,\n ):\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = device\n # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)\n\n # 3. Encode input prompt\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds=prompt_embeds,\n )\n\n # 3.5 encode image\n image = self.image_processor.preprocess(image=image)\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(strength, num_inference_steps, original_inference_steps)\n # timesteps = self.scheduler.timesteps\n # timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1.0, device)\n timesteps = self.scheduler.timesteps\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n\n # 5. Prepare latent variable\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n image,\n latent_timestep,\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n latents,\n )\n bs = batch_size * num_images_per_prompt\n\n # 6. Get Guidance Scale Embedding\n w = torch.tensor(guidance_scale).repeat(bs)\n w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)\n\n # 7. LCM MultiStep Sampling Loop:\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n ts = torch.full((bs,), t, device=device, dtype=torch.long)\n latents = latents.to(prompt_embeds.dtype)\n\n # model prediction (v-prediction, eps, x)\n model_pred = self.unet(\n latents,\n ts,\n timestep_cond=w_embedding,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # compute the previous noisy sample x_t -> x_t-1\n latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)\n\n # # call the callback, if provided\n # if i == len(timesteps) - 1:\n progress_bar.update()\n\n denoised = denoised.to(prompt_embeds.dtype)\n if not output_type == \"latent\":\n image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = denoised\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" } ]
from concurrent.futures import ThreadPoolExecutor from pathlib import Path from typing import Optional from lcm.lcm_scheduler import LCMScheduler from lcm.lcm_pipeline import LatentConsistencyModelPipeline from lcm.lcm_i2i_pipeline import LatentConsistencyModelImg2ImgPipeline from diffusers.image_processor import PipelineImageInput from modules import script_callbacks from PIL import Image, PngImagePlugin import uuid import modules.scripts as scripts import modules.shared import os import random import time import numpy as np import gradio as gr import torch import cv2
11,143
DESCRIPTION = '''# Latent Consistency Model Running [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) | [Project Page](https://latent-consistency-models.github.io) | [Extension Page](https://github.com/0xbitches/sd-webui-lcm) ''' MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768")) class Script(scripts.Script): def __init__(self) -> None: super().__init__() def title(self): return "LCM" def show(self, is_img2img): return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32") scheduler = LCMScheduler.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", subfolder="scheduler")
DESCRIPTION = '''# Latent Consistency Model Running [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) | [Project Page](https://latent-consistency-models.github.io) | [Extension Page](https://github.com/0xbitches/sd-webui-lcm) ''' MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768")) class Script(scripts.Script): def __init__(self) -> None: super().__init__() def title(self): return "LCM" def show(self, is_img2img): return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32") scheduler = LCMScheduler.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", subfolder="scheduler")
pipe = LatentConsistencyModelPipeline.from_pretrained(
1
2023-10-22 11:53:48+00:00
16k
kylesargent/ZeroNVS
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n # import pdb\n # pdb.set_trace()\n\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x = x.clone()\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n assert self.unbounded\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
13,640
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only:
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only:
self.encoding = get_encoding(
7
2023-10-24 19:02:44+00:00
16k
princeton-nlp/LLM-Shearing
llmshearing/models/composer_pythia.py
[ { "identifier": "L0Module", "path": "llmshearing/models/l0_module.py", "snippet": "class L0Module(nn.Module):\n def __init__(self, cfg, device):\n super(L0Module, self).__init__()\n\n # base and target model info\n n_matrix_mlp = 2 if \"pythia\" in cfg.name else 3\n self.base_model_info = self.set_model_info(cfg, n_matrix_mlp=n_matrix_mlp) \n l0_module_cfg = cfg.l0_module\n self.target_model_info = None\n target_model_cfg = getattr(l0_module_cfg, \"target_model\", None)\n if target_model_cfg is not None:\n self.target_model_info = self.set_model_info(target_model_cfg, n_matrix_mlp=n_matrix_mlp)\n \n # l0 config\n self.pruning_modules = l0_module_cfg.pruning_modules \n self.start_sparsity = l0_module_cfg.start_sparsity \n self.lagrangian_warmup_steps = Time.from_timestring(l0_module_cfg.lagrangian_warmup_steps).value\n self.device = device\n self.eval_target_model = l0_module_cfg.get(\"eval_target_model\", True)\n \n # l0 params\n self.lambdas = {}\n self.lambdas[\"lambda_1\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.lambdas[\"lambda_2\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.masks = {}\n for pruning_module in self.pruning_modules:\n self.initialize_one_module(pruning_module)\n self.masks = torch.nn.ModuleDict(self.masks)\n self.lambdas = torch.nn.ParameterDict(self.lambdas)\n \n # config after initialization\n self.prunable_model_size = self.calculate_prunable_model_size(self.base_model_info)\n if target_model_cfg is not None:\n self.prunable_target_model_size = self.calculate_prunable_model_size(self.target_model_info)\n self.target_sparsity = 1 - self.prunable_target_model_size / self.prunable_model_size\n else:\n self.target_sparsity = l0_module_cfg.target_sparsity\n\n print(\"********** Initializing L0 Module **********\") \n for pruning_module in self.pruning_modules:\n print(f\"***** {pruning_module} *****\")\n print(f\"z.shape\", self.masks[pruning_module].z_loga.shape)\n print(f\"size\", self.masks[pruning_module].mask_size)\n print(f\"prunable model size: {self.prunable_model_size}\")\n \n \n def set_model_info(self, cfg, n_matrix_mlp):\n ns = NS() \n ns.hidden_size = cfg.d_model\n ns.intermediate_size = cfg.intermediate_size\n ns.num_attention_heads = cfg.n_heads\n ns.mlp_num_per_layer = 1\n ns.dim_per_head = ns.hidden_size // ns.num_attention_heads \n ns.num_layers = cfg.n_layers\n ns.vocab_size = cfg.vocab_size\n\n ns.params_per_head_layer = ns.hidden_size * ns.hidden_size * 4\n ns.params_per_head = ns.params_per_head_layer // ns.num_attention_heads\n ns.params_per_mlp_layer = ns.hidden_size * ns.intermediate_size * n_matrix_mlp\n ns.params_per_intermediate_dim = ns.params_per_mlp_layer // ns.intermediate_size\n\n ns.full_model_size = (ns.params_per_head_layer + ns.params_per_mlp_layer) * ns.num_layers\n return ns\n \n def calculate_prunable_model_size(self, ns: NS):\n prunable_mlp_size = ns.params_per_mlp_layer * ns.num_layers\n prunable_head_layer_size = ns.params_per_head_layer * ns.num_layers\n prunable_model_size = 0\n if \"hidden\" in self.pruning_modules:\n return prunable_mlp_size + prunable_head_layer_size\n if \"head_layer\" in self.pruning_modules or \"head\" in self.pruning_modules:\n prunable_model_size += prunable_head_layer_size\n if \"mlp\" in self.pruning_modules or \"intermediate\" in self.pruning_modules:\n prunable_model_size += prunable_mlp_size\n return prunable_model_size\n \n def initialize_one_module(self, module_name: str):\n func_name = f\"initialize_{module_name}\"\n try:\n method = getattr(self, func_name)\n except AttributeError:\n raise NotImplementedError(\"Instance `{}` does not implement `{}`\".format(self, func_name))\n method()\n \n def initialize_hidden(self):\n mask_shape = [self.base_model_info.hidden_size]\n num_params_per_mask=self.base_model_info.hidden_size * 4 + self.base_model_info.hidden_size * 4 * 2\n \n target_hidden_sparsity = None; pd=None; target_mask_size=None; \n if self.target_model_info is not None:\n target_hidden_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n target_mask_size = self.target_model_info.hidden_size\n pd = {\"lambda_1_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n hidden_mask = Mask(name=\"hidden\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=[self.base_model_info.hidden_size],\n target_sparsity=target_hidden_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"hidden\"] = hidden_mask\n\n def initialize_head(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads]\n num_params_per_mask = self.base_model_info.params_per_head\n mask_output_shape = [self.base_model_info.num_layers, 1, self.base_model_info.num_attention_heads, 1] \n \n target_head_sparsity = None; pd = {} ; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_sparsity = 1 - self.target_model_info.num_attention_heads / self.base_model_info.num_attention_heads\n target_mask_size = self.target_model_info.num_attention_heads\n pd = {\"lambda_1_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n head_mask = Mask(name=\"head\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head\"] = head_mask \n\n def initialize_qk_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_qk_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_qk_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n qk_head_dim = Mask(name=\"qk_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_qk_head_dim_sparsity,\n target_mask_size=self.target_model_info.hidden_size,\n device=self.device)\n self.masks[\"qk_head_dim\"] = qk_head_dim \n \n \n def initialize_vo_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_vo_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_vo_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n vo_head_dim = Mask(name=\"vo_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_vo_head_dim_sparsity,\n device=self.device)\n self.masks[\"vo_head_dim\"] = vo_head_dim \n \n def initialize_head_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_head_layer_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n head_layer_mask = Mask(name=\"head_layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head_layer\"] = head_layer_mask\n \n def initialize_intermediate(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.intermediate_size]\n num_params_per_mask=self.base_model_info.params_per_intermediate_dim\n mask_output_shape = [self.base_model_info.num_layers, 1, 1, self.base_model_info.intermediate_size] \n \n target_int_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_int_sparsity = 1 - self.target_model_info.intermediate_size / self.base_model_info.intermediate_size\n target_mask_size = self.target_model_info.intermediate_size\n pd = {\"lambda_1_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n int_mask = Mask(name=\"intermediate\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_int_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"intermediate\"] = int_mask\n \n\n def initialize_mlp(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_mlp_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_mlp_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n mlp_mask = Mask(name=\"mlp\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_mlp_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"mlp\"] = mlp_mask \n\n def initialize_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads + self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_layer_sparsity = None; target_mask_size=None; pd = {}\n if self.target_model_info is not None:\n target_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n layer_mask = Mask(name=\"layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model) \n self.masks[\"layer\"] = layer_mask \n \n def constrain_parameters(self):\n for key in self.masks:\n self.masks[key].constrain_parameters()\n\n def calculate_expected_score_sparsity(self):\n expected_scores = {}\n expected_sparsitys = {}\n for key in self.masks:\n score, sparsity = self.masks[key].calculate_expected_score_sparsity()\n expected_scores[key] = score\n expected_sparsitys[key] = sparsity\n return expected_scores, expected_sparsitys\n \n def transform_scores_for_head(self, expected_scores: dict):\n head_score = expected_scores[\"head\"] # 12 * 12\n\n head_layer_score = None\n if \"head_layer\" in expected_scores:\n head_layer_score = expected_scores[\"head_layer\"]\n elif \"layer\" in expected_scores:\n head_layer_score = expected_scores[\"layer\"] # 12\n if head_layer_score is not None:\n head_layer_score = head_layer_score.view(-1, 1) # 12 * 1\n \n return head_layer_score, head_score\n\n def transform_scores_for_mlp(self, expected_scores: dict):\n mlp_score = None\n if \"mlp\" in expected_scores:\n mlp_score = expected_scores[\"mlp\"] # 12\n elif \"layer\" in expected_scores:\n mlp_score = expected_scores[\"layer\"] # 12\n if mlp_score is not None:\n mlp_score = mlp_score.unsqueeze(-1)\n \n intermediate_score = expected_scores[\"intermediate\"] # 12 * 3072\n return mlp_score, intermediate_score\n\n\n def get_expected_num_params(self, expected_scores: dict): #! calculate the current parsity\n num_parameters = 0\n \n # 12 * 1 \n # 12 * 12\n head_layer_score, head_score = self.transform_scores_for_head(expected_scores)\n mlp_score, int_score = self.transform_scores_for_mlp(expected_scores)\n \n head_score = (head_layer_score * head_score) # 12 * 12\n int_score = (mlp_score * int_score) # 12 * 3072\n\n qk_score = None\n if \"qk_head_dim\" in expected_scores:\n qk_head_dim_score = expected_scores[\"qk_head_dim\"] # num_layer * hidden_size\n vo_head_dim_score = expected_scores[\"vo_head_dim\"] # num_layer * hidden_size\n qk_head_dim_score = qk_head_dim_score.view(qk_head_dim_score.shape[0], -1) # 12 * 768\n vo_head_dim_score = vo_head_dim_score.view(vo_head_dim_score.shape[0], -1) # 12 * 768\n head_score = torch.repeat_interleave(head_score, self.base_model_info.dim_per_head, dim=1) # 12 * 768\n\n qk_score = head_score * qk_head_dim_score # 12 * 768\n vo_score = head_score * vo_head_dim_score # 12 * 768\n \n if \"hidden\" in expected_scores:\n hidden_score = expected_scores[\"hidden\"] # 768 \n \n if qk_score is None:\n num_parameters += torch.outer(hidden_score, head_score.reshape(-1)).sum() * self.masks.head.num_params_per_mask / self.base_model_info.hidden_size # 768 * 144\n num_parameters += torch.outer(hidden_score, int_score.reshape(-1)).sum() * self.masks.intermediate.num_params_per_mask / self.base_model_info.hidden_size # 768 * 36864\n else:\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), qk_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), vo_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), int_score.unsqueeze(1))) * 3 # 12 * 768 * 3072\n else:\n num_parameters += torch.sum(head_score) * self.masks.head.num_params_per_mask\n num_parameters += torch.sum(int_score) * self.masks.intermediate.num_params_per_mask\n return num_parameters\n \n def get_target_sparsity(self, pruned_steps: int, full_sparsity: float = None):\n target_sparsity = full_sparsity\n if getattr(self, \"lagrangian_warmup_steps\", 0) > 0:\n target_sparsity = (target_sparsity - self.start_sparsity) * min(1, pruned_steps / self.lagrangian_warmup_steps) + self.start_sparsity\n return target_sparsity\n\n\n def lagrangian_regularization(self, pruned_steps: int):\n def _lag_loss(expected_sparsity: torch.tensor, target_sparsity: float, lambda_1: torch.tensor, lambda_2: torch.tensor):\n lagrangian_loss = lambda_1 * (expected_sparsity - target_sparsity) + lambda_2 * (expected_sparsity - target_sparsity) ** 2 \n lagrangian_loss = lagrangian_loss.mean()\n return lagrangian_loss\n\n target_sparsity = self.get_target_sparsity(pruned_steps, self.target_sparsity) \n expected_scores, expected_sparsitys = self.calculate_expected_score_sparsity()\n expected_size = self.get_expected_num_params(expected_scores) #! calculate \\bar s\n expected_sparsity = 1 - expected_size / self.prunable_model_size\n \n return_v = {}\n if self.target_model_info is None:\n lagrangian_loss = _lag_loss(expected_sparsity, target_sparsity, self.lambdas[\"lambda_1\"], self.lambdas[\"lambda_2\"])\n return_v = {\"expected_sparsity\": expected_sparsity.item(), \"target_sparsity\": target_sparsity}\n for key in expected_sparsitys:\n return_v[f\"expected_{key}_sparsity\"] = expected_sparsitys[key].mean().item()\n else:\n lagrangian_loss = 0\n return_v = {}\n for pruning_module in self.pruning_modules:\n ts = self.get_target_sparsity(pruned_steps, self.masks[pruning_module].target_sparsity)\n expected_ts = expected_sparsitys[pruning_module] \n lagrangian_loss += _lag_loss(expected_ts, ts, self.lambdas[f\"lambda_1_{pruning_module}\"], self.lambdas[f\"lambda_2_{pruning_module}\"])\n expected_ts = expected_ts.mean().item()\n return_v.update({\"expected_{}_sparsity\".format(pruning_module): expected_ts, \"target_{}_sparsity\".format(pruning_module): ts})\n return_v[\"expected_sparsity\"] = expected_sparsity.item()\n return_v[\"target_sparsity\"] = target_sparsity\n\n\n # return_v might not matter\n return lagrangian_loss, return_v\n \n def forward(self, calculate_lagrangian: bool = False, pruned_steps: int = 0):\n self.constrain_parameters()\n if calculate_lagrangian:\n return self.lagrangian_regularization(pruned_steps)\n \n zs = {f\"{pruning_module}_z\": [] for pruning_module in self.pruning_modules}\n \n if \"layer\" in self.pruning_modules:\n zs.pop(\"layer_z\")\n zs[\"mlp_z\"] = []\n zs[\"head_layer_z\"] = []\n \n if self.training:\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.sample_z()\n zs[f\"{pruning_module}_z\"] = z\n else: # removed layerwise! \n with torch.no_grad():\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.deterministic_z()\n zs[f\"{pruning_module}_z\"] = z\n if \"layer_z\" in zs:\n zs[\"mlp_z\"] = zs.pop(\"layer_z\")\n zs[\"head_layer_z\"] = zs[\"mlp_z\"]\n return zs " }, { "identifier": "ComposerMosaicLlama", "path": "llmshearing/models/composer_llama.py", "snippet": "class ComposerMosaicLlama(ComposerModel):\n \"\"\" Llama model with the Composer model interface. \"\"\"\n def __init__(self, cfg):\n super().__init__()\n self.model = LlamaModel(cfg)\n self.ref_model = None\n self.num_fwd_flops = self._compute_num_fwd_flops()\n self.train_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n self.eval_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n\n self.set_names = getattr(cfg, \"set_names\", None)\n if self.set_names is not None:\n self.set_name_to_id = {set_name: i for i, set_name in enumerate(self.set_names)}\n self.set_id_to_name = {i: set_name for i, set_name in enumerate(self.set_names)}\n \n for set_name in self.set_names:\n # add train and eval metrics for each set\n self.train_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.eval_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.train_metrics[f'{set_name}_count'] = DomainCount(set_name=set_name, set_index=self.set_name_to_id[set_name]) \n\n def prune_params(self, zs=None):\n self.model.prune_params(zs)\n \n def get_targets(self, batch):\n targets = torch.roll(batch['labels'], shifts=-1)\n targets[:, -1] = -100\n return targets\n \n def forward(self, batch):\n input_ids = batch['input_ids']\n key_padding_mask = batch['attention_mask'].bool(\n ) if 'attention_mask' in batch else None\n pruned_steps = batch.get('pruned_steps', None)\n if pruned_steps is not None:\n pruned_steps = pruned_steps[0].item()\n zs = {key: batch[key] for key in batch if \"_z\" in key}\n model_output = self.model(input_ids=input_ids, key_padding_mask=key_padding_mask, pruned_steps=pruned_steps, **zs)\n return model_output\n\n def eval_forward(self, batch, outputs=None):\n return outputs if outputs is not None else self.forward(batch)\n\n def loss(self, outputs, batch):\n logits = outputs[\"logits\"]\n l0_output = outputs[\"l0_output\"]\n targets = self.get_targets(batch)\n\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)),\n targets.view(-1),\n ignore_index=-100)\n return_loss = {\"ce_loss\": loss}\n if l0_output is not None:\n lag_loss = l0_output[0]\n return_loss[\"lag_loss\"] = lag_loss\n return_loss[\"total\"] = sum(return_loss.values())\n return return_loss\n\n def get_metrics(self, is_train=False):\n return self.train_metrics if is_train else self.eval_metrics\n\n def update_metric(self, batch, outputs, metric) -> None:\n logits = outputs[\"logits\"]\n if isinstance(metric, DomainLanguageCrossEntropy):\n targets = self.get_targets(batch)\n set_id = self.set_name_to_id[metric.set_name]\n targets[batch[\"set\"] != set_id] = -100\n metric.update(logits, targets)\n elif isinstance(metric, DomainCount):\n with torch.inference_mode():\n idx = None\n selected_sets = batch['set']\n metric.update(selected_sets, idx)\n else:\n logits = logits.view(-1, logits.size(-1))\n targets = self.get_targets(batch).view(-1)\n metric.update(logits, targets)\n\n def add_eval_metrics(self, evaluator):\n evaluator_metrics = {\n m: METRIC_DEFAULT_CTORS[m]() for m in evaluator.metric_names\n }\n if self.eval_metrics is not None:\n self.eval_metrics.update(evaluator_metrics)\n else:\n self.eval_metrics = evaluator_metrics\n\n def _compute_num_fwd_flops(self):\n # Might not be correct for LLaMA structures\n n_params = sum(p.numel() for p in self.parameters())\n # the number of paramters is approximately the number of multiply-accumulates (MAC) in the network\n # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param\n # this gets us FLOPs / token\n params_flops_per_token = 2 * n_params\n params_flops_per_seq = params_flops_per_token * self.model.cfg.max_seq_len\n # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2)\n attn_flops_per_seq = self.model.cfg.n_layers * 2 * 2 * (\n self.model.cfg.d_model * (self.model.cfg.max_seq_len**2))\n return params_flops_per_seq + attn_flops_per_seq\n\n def flops_per_batch(self, batch):\n # Note: this computation does not take into account padding, and assumes\n # that the dataset has been constructed without padding. Additionally, we\n # assume the backward pass is approximately 2x the forward pass\n return self.num_fwd_flops * 3 * batch['input_ids'].shape[0]\n\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:\n if new_num_tokens is not None:\n self.model._resize_token_embeddings(new_num_tokens)" }, { "identifier": "prepare_decoder_attention_mask", "path": "llmshearing/models/composer_llama.py", "snippet": "def prepare_decoder_attention_mask(input_shape, inputs_embeds):\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(input_shape, inputs_embeds.dtype).to(inputs_embeds.device)\n\n return combined_attention_mask" }, { "identifier": "turn_head_z", "path": "llmshearing/models/composer_llama.py", "snippet": "def turn_head_z(head_z, head_layer_z):\n head_z = head_z.squeeze().clone()\n if head_layer_z is not None:\n head_z *= head_layer_z\n to_prune_heads = torch.where(head_z == 0)[0].view(-1).tolist()\n return to_prune_heads" }, { "identifier": "turn_mlp_z", "path": "llmshearing/models/composer_llama.py", "snippet": "def turn_mlp_z(intermediate_z, mlp_z):\n intermediate_z_layer = intermediate_z.squeeze().clone()\n if mlp_z is not None:\n intermediate_z_layer *= mlp_z\n keep_intermediate_dims = torch.where(intermediate_z_layer != 0)[0].tolist()\n return keep_intermediate_dims " }, { "identifier": "normal_attn_fn", "path": "llmshearing/models/composer_llama.py", "snippet": "def normal_attn_fn(\n query,\n key, \n value,\n attention_mask=None,\n head_z=None\n):\n bsz, n_heads, q_len, head_dim = query.shape\n dim = n_heads * head_dim\n attn_weights = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(head_dim)\n attn_weights = attn_weights + attention_mask\n attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))\n\n # upcast attention to fp32\n attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)\n attn_output = torch.matmul(attn_weights, value) # (bsz, n_heads, q_len, head_dim)\n if head_z is not None:\n attn_output *= head_z.unsqueeze(-1)\n attn_output = attn_output.transpose(1, 2)\n attn_output = attn_output.reshape(bsz, q_len, dim)\n return attn_output" }, { "identifier": "flash_attn_fn", "path": "llmshearing/models/composer_llama.py", "snippet": "def flash_attn_fn(\n query,\n key,\n value,\n softmax_scale=None,\n attn_bias=None,\n query_padding_mask=None,\n key_padding_mask=None,\n is_causal=False,\n dropout_p=0.0,\n training=False,\n needs_weights=False,\n head_z=None,\n \n):\n try:\n from flash_attn import bert_padding # type: ignore\n from flash_attn import flash_attn_interface # type: ignore\n except ImportError as e:\n raise e\n\n # check_valid_inputs(query, key, value)\n\n if attn_bias is not None:\n raise NotImplementedError(f'attn_bias not implemented for flash attn.')\n\n batch_size, seqlen = query.shape[:2]\n\n if query_padding_mask is None:\n query_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=query.device)\n if key_padding_mask is None:\n key_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=key.device)\n\n query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(\n query, query_padding_mask)\n # query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(\n key, key_padding_mask)\n # key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)\n # value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n dropout_p = dropout_p if training else 0.0\n \n output_unpad = flash_attn_interface.flash_attn_unpadded_func(\n query_unpad,\n key_unpad,\n value_unpad,\n cu_seqlens_q,\n cu_seqlens_k,\n max_seqlen_q,\n max_seqlen_k,\n dropout_p,\n softmax_scale=softmax_scale,\n causal=is_causal,\n return_attn_probs=needs_weights)\n\n if head_z is not None:\n output_unpad = output_unpad * head_z # 1 * h * 1\n output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)\n return output, None" } ]
import math import torch import torch.nn as nn from typing import List, Optional, Tuple from einops import rearrange from omegaconf import DictConfig from torch.nn import functional as F from transformers.pytorch_utils import (find_pruneable_heads_and_indices, prune_linear_layer) from llmshearing.models.l0_module import L0Module from llmshearing.models.composer_llama import ComposerMosaicLlama, prepare_decoder_attention_mask, turn_head_z, turn_mlp_z, normal_attn_fn, flash_attn_fn from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb
11,070
query_padding_mask = None if key_padding_mask is not None: query_padding_mask = key_padding_mask[:, -query.size(1):] # b, s, d = query.shape new_qkv_shape = qkv.size()[:-1] + (self.n_heads, 3 * self.head_dim) qkv = qkv.view(*new_qkv_shape) # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size] query = qkv[..., : self.head_dim].permute(0, 2, 1, 3) key = qkv[..., self.head_dim : 2 * self.head_dim].permute(0, 2, 1, 3) value = qkv[..., 2 * self.head_dim :].permute(0, 2, 1, 3) query_rot = query[..., : self.rotary_ndims] query_pass = query[..., self.rotary_ndims :] key_rot = key[..., : self.rotary_ndims] key_pass = key[..., self.rotary_ndims :] kv_seq_len = key.size(2) offset = 0 if past_key_value is not None: offset = past_key_value[0].shape[-2] kv_seq_len += offset cos, sin = self.rotary_emb(value, seq_len=kv_seq_len) position_ids = torch.arange(offset, kv_seq_len, dtype=torch.long, device=cos.device) position_ids = position_ids.unsqueeze(0).view(-1, kv_seq_len) query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) query = torch.cat((query, query_pass), dim=-1) key = torch.cat((key, key_pass), dim=-1) offset = 0 if past_key_value is not None: if len(past_key_value) != 0: offset = past_key_value[0].shape[-2] key = torch.cat([past_key_value[0], key], dim=1) value = torch.cat([past_key_value[1], value], dim=1) past_key_value = (key, value) if self.attn_fn == flash_attn_fn: # TODO: test if it is the same as attn query = rearrange(query, 'b h s d -> b s h d') key = rearrange(key, 'b h s d -> b s h d') value = rearrange(value, 'b h s d -> b s h d') context, attn_weights = self.attn_fn( query, key, value, self.n_heads, softmax_scale=self.softmax_scale, attn_bias=attn_bias, query_padding_mask=query_padding_mask, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, head_z=head_z ) else: context = self.attn_fn( query=query, key=key, value=value, attention_mask=attention_mask, head_z=head_z ) attn_weights = None if retain_grad: self.context = context if self.context.requires_grad: self.context.retain_grad() output = self.out_proj(context) if head_layer_z is not None: output *= head_layer_z if hidden_z is not None: output *= hidden_z if retain_grad: self.output = output if self.output.requires_grad: self.output.retain_grad() return output, attn_weights, past_key_value class PythiaMLP(nn.Module): def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.cfg = cfg self.down_proj = nn.Linear(cfg.intermediate_size, cfg.d_model, bias=True, device=device) self.up_proj = nn.Linear(cfg.d_model, cfg.intermediate_size, bias=True, device=device) def prune_params(self, zs_block): intermediate_z = zs_block.get("intermediate_z", None) mlp_z = zs_block.get("mlp_z", None) hidden_z = zs_block.get("hidden_z", None) # update params # if intermediate_z is not None: self.down_proj.weight.data = self.down_proj.weight.data.mul(intermediate_z.squeeze(0)) if mlp_z is not None: self.down_proj.weight.data = self.down_proj.weight.data.transpose(0, 1).mul(mlp_z).transpose(0, 1) self.down_proj.bias.data = self.down_proj.bias.data.mul(mlp_z) if hidden_z is not None: self.down_proj.weight.data = self.down_proj.weight.data.transpose(0, 1).mul(hidden_z).transpose(0, 1) self.down_proj.bias.data = self.down_proj.bias.data.mul(hidden_z) ################# if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] print(f" FFN hidden dim: {len(hidden_z)} -> {len(remaining_index)}") half = next(self.up_proj.parameters()).dtype self.up_proj = prune_linear_layer(self.up_proj, remaining_index, dim=1) self.down_proj = prune_linear_layer(self.down_proj, remaining_index, dim=0) if half == torch.float16: self.up_proj = self.up_proj.half() self.down_proj = self.down_proj.half()
class ComposerMosaicPythia(ComposerMosaicLlama): def __init__(self, cfg): super().__init__(cfg) self.model = PythiaModel(cfg) class CoFiLayerNorm(torch.nn.LayerNorm): def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None: super().__init__(normalized_shape, eps, elementwise_affine, device) def forward(self, input, hidden_z=None): if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] compressed_input = torch.index_select( input, dim=-1, index=remaining_index) compressed_weight = self.weight[remaining_index] compressed_bias = self.bias[remaining_index] normalized_shape = len(remaining_index) normed_input = F.layer_norm( compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps) output = input.clone() normed_input = normed_input.to(output.dtype) output[..., remaining_index] = normed_input else: output = F.layer_norm( input, self.normalized_shape, self.weight, self.bias, self.eps) return output def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] # self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index]) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index)) self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index)) self.normalized_shape = (len(remaining_index),) class PythiaEmbedding(nn.Embedding): def forward(self, input, hidden_z=None): embeddings = super().forward(input) if hidden_z is not None: embeddings = embeddings.mul(hidden_z) return embeddings def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] self.weight.data = self.weight.data.mul(hidden_z) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone()) self.embedding_dim = len(remaining_index) print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}") class PythiaModel(nn.Module): def __init__(self, cfg: DictConfig): super().__init__() print(f'Tried to build Pythia model with cfg.name={cfg.name}') self.cfg = cfg ### added ### self.l0_module = None if getattr(self.cfg, "l0_module", None) is not None: self.l0_module = L0Module(self.cfg, device=cfg.init_device) ############# layernorm_class = CoFiLayerNorm self.attn_impl = cfg.attn_impl self.embedding_fraction = cfg.get('embedding_fraction', 1) assert 0 < self.embedding_fraction <= 1, 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!' self.transformer = nn.ModuleDict({ "wte": PythiaEmbedding(cfg.vocab_size, cfg.d_model, device=cfg.init_device), }) self.transformer.update({ 'blocks': nn.ModuleList([ PythiaBlock(cfg, device=cfg.init_device) for _ in range(cfg.n_layers) ]) }) self.transformer.update({ "output": nn.Linear(cfg.d_model, cfg.vocab_size, device=cfg.init_device, bias=False), }) self.transformer.update({ "ln_f": layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=cfg.init_device), # TODO: add to config }) self.is_causal = True if cfg.get('verbose') and cfg.get('verbose') > 2: print(self) def prune_params(self, zs=None): # TODO if zs is None: self.l0_module.eval() zs = self.l0_module(calculate_lagrangian=False) # wte as well :) # ln_f if hidden states are to be pruned if "hidden_z" in zs: hidden_z = zs["hidden_z"] remaining_index = torch.where(~hidden_z.eq(0))[0] self.transformer.ln_f.prune_params(hidden_z) self.transformer.wte.weight.data = self.transformer.wte.weight.data.mul(hidden_z) self.transformer.wte.weight = torch.nn.parameter.Parameter( self.transformer.wte.weight.index_select(1, remaining_index).clone()) self.transformer.wte.embedding_dim = len(remaining_index) # self.transformer.output.weight.data = self.transformer.output.weight.data.mul(hidden_z) half = self.transformer.output.weight.data.dtype == torch.float16 self.transformer.output = prune_linear_layer(self.transformer.output, remaining_index, dim=1) if half: self.transformer.output = self.transformer.output.half() for i, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, i) block.prune_params(zs_block) def get_zs_block(self, zs, block_idx): zs_block = {} if zs is not None: for key in zs: if key == "hidden_z": zs_block["hidden_z"] = zs["hidden_z"] else: zs_block[key] = zs[key][block_idx] return zs_block def forward( self, input_ids: torch.LongTensor, key_padding_mask: Optional[torch.ByteTensor] = None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, pruned_steps: int = 0, retain_grad: bool = False, **zs,): S = input_ids.size(1) assert S <= self.cfg.max_seq_len, f"Sequence length ({S}) exceeds model maximum sequence length ({self.cfg.max_seq_len})!" tok_emb = self.transformer.wte(input_ids) if "hidden_z" in zs: tok_emb = tok_emb.mul(zs["hidden_z"]) x = tok_emb attn_bias = None # only consider the flash attention case attention_mask = prepare_decoder_attention_mask((tok_emb.size(0), tok_emb.size(1)), tok_emb) l0_output = None if self.l0_module is not None: assert zs == {}, "zs should be empty when using L0Module" zs = self.l0_module(calculate_lagrangian=False, pruned_steps=pruned_steps) for b_idx, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, b_idx) past_key_value = past_key_values[ b_idx] if past_key_values is not None else None x, past_key_value = block( x, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=self.is_causal, attention_mask=attention_mask, retain_grad=retain_grad, **zs_block ) if past_key_values is not None: past_key_values[b_idx] = past_key_value x = self.transformer.ln_f(x, hidden_z=zs.get("hidden_z", None)) logits = self.transformer.output(x) if self.l0_module is not None: l0_output = self.l0_module(calculate_lagrangian=True, pruned_steps=pruned_steps) return {"logits": logits, "l0_output": l0_output, "zs": zs} def param_init_fn(self, module): pass def fsdp_wrap_fn(self, module): return isinstance(module, PythiaBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module): return isinstance(module, PythiaBlock) class PythiaBlock(nn.Module): def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() layernorm_class = CoFiLayerNorm # TODO: CoFiLayerNorm,RMSLayerNorm self.ln_1 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.attn = PythiaAttention(cfg, device) self.ln_2 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.mlp = PythiaMLP(cfg, device) self.use_parallel_residual = cfg.get('use_parallel_residual', False) # TODO: add to config def prune_params(self, zs_block): self.attn.prune_params(zs_block) self.mlp.prune_params(zs_block) if self.attn.query_key_value is None: self.ln_1 = None if self.mlp.up_proj is None: self.ln_2 = None if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"] if self.ln_1 is not None: self.ln_1.prune_params(hidden_z) if self.ln_2 is not None: self.ln_2.prune_params(hidden_z) def forward( self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attn_bias: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.ByteTensor] = None, is_causal: bool = True, attention_mask: Optional[torch.Tensor] = None, retain_grad: bool = False, head_z: Optional[torch.Tensor] = None, head_layer_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, mlp_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, qk_head_dim_z: Optional[torch.Tensor] = None, vo_head_dim_z: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: if self.ln_1 is not None: a = self.ln_1(x, hidden_z=hidden_z) attn_output, _, past_key_value = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, attention_mask=attention_mask, retain_grad=retain_grad, head_z=head_z, head_layer_z=head_layer_z, hidden_z=hidden_z, qk_head_dim_z=qk_head_dim_z, vo_head_dim_z=vo_head_dim_z) else: attn_output = 0 if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) if self.ln_2 is not None: b = self.ln_2(x, hidden_z=hidden_z) mlp_output = self.mlp(b, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output + x else: x = attn_output + x else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) if self.ln_2 is not None: attn_output = x + attn_output hidden_states = self.ln_2(attn_output, hidden_z=hidden_z) mlp_output = self.mlp(hidden_states, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output else: x = x + attn_output return x, past_key_value class PythiaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.attn_impl = cfg.get('attn_impl') self.d_model = cfg.d_model self.n_heads = cfg.n_heads self.all_head_size = cfg.d_model self.head_dim = self.d_model // self.n_heads self.pruned_heads = set() self.softmax_scale = cfg.get('softmax_scale') if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = cfg.get('attn_pdrop') # self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=False) # for param init fn; enables shape based init of fused layers # fuse_splits = (cfg.d_model, 2 * cfg.d_model) # self.Wqkv._fused = (0, fuse_splits) # type: ignore self.query_key_value = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=True) fuse_splits = (cfg.d_model, 2 * cfg.d_model) self.query_key_value._fused = (0, fuse_splits) self.attn_fn = flash_attn_fn if self.attn_impl == 'flash' else normal_attn_fn self.out_proj = nn.Linear(self.d_model, self.d_model, device=device, bias=True) self.out_proj._is_residual = True # type: ignore self.rotary_ndims = int(self.head_dim * cfg.rotary_pct) self.rotary_emb = RotaryEmbedding(self.rotary_ndims, max_position_embeddings=cfg.max_seq_len, device=device) def prune_params(self, zs_block): head_z = None; head_layer_z = None; hidden_z = None; qk_head_dim_z = None; vo_head_dim_z = None if "head_z" in zs_block: head_z = zs_block["head_z"].squeeze() if "head_layer_z" in zs_block: head_layer_z = zs_block["head_layer_z"].squeeze() if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"].squeeze() # update params # if head_z is not None: head_z_for_update = torch.repeat_interleave(head_z, self.head_dim) start_index = torch.arange(0, self.n_heads * 3, 3) + 2 end_index = start_index + 1 index = torch.cat([torch.arange(i, j) for i, j in zip(start_index * self.head_dim, end_index * self.head_dim)]) self.query_key_value.weight.data[index, :] = \ self.query_key_value.weight.data.transpose(0, 1)[:, index].mul(head_z_for_update).transpose(0, 1) self.query_key_value.bias.data[index] = \ self.query_key_value.bias.data[index].mul(head_z_for_update) if head_layer_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(head_layer_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(head_layer_z) if hidden_z is not None: self.out_proj.weight.data = self.out_proj.weight.data.transpose(0, 1).mul(hidden_z).transpose(0, 1) self.out_proj.bias.data = self.out_proj.bias.data.mul(hidden_z) ################# if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] print(f" Head hidden: {len(hidden_z)} -> {len(remaining_index)}") half = next(self.query_key_value.parameters()).dtype == torch.float16 self.query_key_value = prune_linear_layer(self.query_key_value, remaining_index, dim=1) self.out_proj = prune_linear_layer(self.out_proj, remaining_index) if half: self.query_key_value.half() self.out_proj.half() to_prune_heads = turn_head_z(head_z, head_layer_z) len_to_prune_heads = len(to_prune_heads) if len_to_prune_heads == 0: print(f" Heads: {self.n_heads} -> {self.n_heads}") return heads, index = find_pruneable_heads_and_indices( to_prune_heads, self.n_heads, self.head_dim, self.pruned_heads ) # Prune linear layers # setting layers to be None if all the heads are pruned if len(index) == 0: self.query_key_value = None self.out_proj = None else: half = next(self.query_key_value.parameters()).dtype == torch.float16 remaining_heads = list(set([i for i in range(self.n_heads)]) - set(to_prune_heads)) qkv_index = torch.cat([torch.arange(i * self.head_dim * 3, (i+1) * self.head_dim * 3).to(index.device) for i in remaining_heads]) self.query_key_value = prune_linear_layer(self.query_key_value, qkv_index) self.out_proj = prune_linear_layer(self.out_proj, index, dim=1) if half: self.query_key_value.half() self.out_proj.half() print(f" Heads: {self.n_heads} -> {self.n_heads - len(heads)}") # Update hyper params and store pruned heads self.n_heads = self.n_heads - len(heads) self.all_head_size = self.head_dim * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, x, past_key_value=None, attn_bias=None, key_padding_mask=None, is_causal=True, needs_weights=False, attention_mask=None, retain_grad=False, head_z=None, head_layer_z=None, hidden_z=None, qk_head_dim_z=None, vo_head_dim_z=None): if self.query_key_value is None: return None, None, past_key_value qkv = self.query_key_value(x) query_padding_mask = None if key_padding_mask is not None: query_padding_mask = key_padding_mask[:, -query.size(1):] # b, s, d = query.shape new_qkv_shape = qkv.size()[:-1] + (self.n_heads, 3 * self.head_dim) qkv = qkv.view(*new_qkv_shape) # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size] query = qkv[..., : self.head_dim].permute(0, 2, 1, 3) key = qkv[..., self.head_dim : 2 * self.head_dim].permute(0, 2, 1, 3) value = qkv[..., 2 * self.head_dim :].permute(0, 2, 1, 3) query_rot = query[..., : self.rotary_ndims] query_pass = query[..., self.rotary_ndims :] key_rot = key[..., : self.rotary_ndims] key_pass = key[..., self.rotary_ndims :] kv_seq_len = key.size(2) offset = 0 if past_key_value is not None: offset = past_key_value[0].shape[-2] kv_seq_len += offset cos, sin = self.rotary_emb(value, seq_len=kv_seq_len) position_ids = torch.arange(offset, kv_seq_len, dtype=torch.long, device=cos.device) position_ids = position_ids.unsqueeze(0).view(-1, kv_seq_len) query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids) query = torch.cat((query, query_pass), dim=-1) key = torch.cat((key, key_pass), dim=-1) offset = 0 if past_key_value is not None: if len(past_key_value) != 0: offset = past_key_value[0].shape[-2] key = torch.cat([past_key_value[0], key], dim=1) value = torch.cat([past_key_value[1], value], dim=1) past_key_value = (key, value) if self.attn_fn == flash_attn_fn: # TODO: test if it is the same as attn query = rearrange(query, 'b h s d -> b s h d') key = rearrange(key, 'b h s d -> b s h d') value = rearrange(value, 'b h s d -> b s h d') context, attn_weights = self.attn_fn( query, key, value, self.n_heads, softmax_scale=self.softmax_scale, attn_bias=attn_bias, query_padding_mask=query_padding_mask, key_padding_mask=key_padding_mask, is_causal=is_causal, dropout_p=self.attn_dropout_p, training=self.training, needs_weights=needs_weights, head_z=head_z ) else: context = self.attn_fn( query=query, key=key, value=value, attention_mask=attention_mask, head_z=head_z ) attn_weights = None if retain_grad: self.context = context if self.context.requires_grad: self.context.retain_grad() output = self.out_proj(context) if head_layer_z is not None: output *= head_layer_z if hidden_z is not None: output *= hidden_z if retain_grad: self.output = output if self.output.requires_grad: self.output.retain_grad() return output, attn_weights, past_key_value class PythiaMLP(nn.Module): def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.cfg = cfg self.down_proj = nn.Linear(cfg.intermediate_size, cfg.d_model, bias=True, device=device) self.up_proj = nn.Linear(cfg.d_model, cfg.intermediate_size, bias=True, device=device) def prune_params(self, zs_block): intermediate_z = zs_block.get("intermediate_z", None) mlp_z = zs_block.get("mlp_z", None) hidden_z = zs_block.get("hidden_z", None) # update params # if intermediate_z is not None: self.down_proj.weight.data = self.down_proj.weight.data.mul(intermediate_z.squeeze(0)) if mlp_z is not None: self.down_proj.weight.data = self.down_proj.weight.data.transpose(0, 1).mul(mlp_z).transpose(0, 1) self.down_proj.bias.data = self.down_proj.bias.data.mul(mlp_z) if hidden_z is not None: self.down_proj.weight.data = self.down_proj.weight.data.transpose(0, 1).mul(hidden_z).transpose(0, 1) self.down_proj.bias.data = self.down_proj.bias.data.mul(hidden_z) ################# if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] print(f" FFN hidden dim: {len(hidden_z)} -> {len(remaining_index)}") half = next(self.up_proj.parameters()).dtype self.up_proj = prune_linear_layer(self.up_proj, remaining_index, dim=1) self.down_proj = prune_linear_layer(self.down_proj, remaining_index, dim=0) if half == torch.float16: self.up_proj = self.up_proj.half() self.down_proj = self.down_proj.half()
keep_dim = turn_mlp_z(intermediate_z, mlp_z)
4
2023-10-16 12:26:08+00:00
16k
hkchengrex/Cutie
cutie/inference/inference_core.py
[ { "identifier": "MemoryManager", "path": "cutie/inference/memory_manager.py", "snippet": "class MemoryManager:\n \"\"\"\n Manages all three memory stores and the transition between working/long-term memory\n \"\"\"\n def __init__(self, cfg: DictConfig, object_manager: ObjectManager):\n self.object_manager = object_manager\n self.sensory_dim = cfg.model.sensory_dim\n self.top_k = cfg.top_k\n self.chunk_size = cfg.chunk_size\n\n self.save_aux = cfg.save_aux\n\n self.use_long_term = cfg.use_long_term\n self.count_long_term_usage = cfg.long_term.count_usage\n # subtract 1 because the first-frame is now counted as \"permanent memory\"\n # and is not counted towards max_mem_frames\n # but we want to keep the hyperparameters consistent as before for the same behavior\n if self.use_long_term:\n self.max_mem_frames = cfg.long_term.max_mem_frames - 1\n self.min_mem_frames = cfg.long_term.min_mem_frames - 1\n self.num_prototypes = cfg.long_term.num_prototypes\n self.max_long_tokens = cfg.long_term.max_num_tokens\n self.buffer_tokens = cfg.long_term.buffer_tokens\n else:\n self.max_mem_frames = cfg.max_mem_frames - 1\n\n # dimensions will be inferred from input later\n self.CK = self.CV = None\n self.H = self.W = None\n\n # The sensory memory is stored as a dictionary indexed by object ids\n # each of shape bs * C^h * H * W\n self.sensory = {}\n\n # a dictionary indexed by object ids, each of shape bs * T * Q * C\n self.obj_v = {}\n\n self.work_mem = KeyValueMemoryStore(save_selection=self.use_long_term,\n save_usage=self.use_long_term)\n if self.use_long_term:\n self.long_mem = KeyValueMemoryStore(save_usage=self.count_long_term_usage)\n\n self.config_stale = True\n self.engaged = False\n\n def update_config(self, cfg: DictConfig) -> None:\n self.config_stale = True\n self.top_k = cfg['top_k']\n\n assert self.use_long_term == cfg.use_long_term, 'cannot update this'\n assert self.count_long_term_usage == cfg.long_term.count_usage, 'cannot update this'\n\n self.use_long_term = cfg.use_long_term\n self.count_long_term_usage = cfg.long_term.count_usage\n if self.use_long_term:\n self.max_mem_frames = cfg.long_term.max_mem_frames - 1\n self.min_mem_frames = cfg.long_term.min_mem_frames - 1\n self.num_prototypes = cfg.long_term.num_prototypes\n self.max_long_tokens = cfg.long_term.max_num_tokens\n self.buffer_tokens = cfg.long_term.buffer_tokens\n else:\n self.max_mem_frames = cfg.max_mem_frames - 1\n\n def _readout(self, affinity, v) -> torch.Tensor:\n # affinity: bs*N*HW\n # v: bs*C*N or bs*num_objects*C*N\n # returns bs*C*HW or bs*num_objects*C*HW\n if len(v.shape) == 3:\n # single object\n return v @ affinity\n else:\n bs, num_objects, C, N = v.shape\n v = v.view(bs, num_objects * C, N)\n out = v @ affinity\n return out.view(bs, num_objects, C, -1)\n\n def _get_mask_by_ids(self, mask: torch.Tensor, obj_ids: List[int]) -> torch.Tensor:\n # -1 because the mask does not contain the background channel\n return mask[:, [self.object_manager.find_tmp_by_id(obj) - 1 for obj in obj_ids]]\n\n def _get_sensory_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n return torch.stack([self.sensory[obj] for obj in obj_ids], dim=1)\n\n def _get_object_mem_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n return torch.stack([self.obj_v[obj] for obj in obj_ids], dim=1)\n\n def _get_visual_values_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n # All the values that the object ids refer to should have the same shape\n value = torch.stack([self.work_mem.value[obj] for obj in obj_ids], dim=1)\n if self.use_long_term and obj_ids[0] in self.long_mem.value:\n lt_value = torch.stack([self.long_mem.value[obj] for obj in obj_ids], dim=1)\n value = torch.cat([lt_value, value], dim=-1)\n\n return value\n\n def read(self, pix_feat: torch.Tensor, query_key: torch.Tensor, selection: torch.Tensor,\n last_mask: torch.Tensor, network: CUTIE) -> Dict[int, torch.Tensor]:\n \"\"\"\n Read from all memory stores and returns a single memory readout tensor for each object\n\n pix_feat: (1/2) x C x H x W\n query_key: (1/2) x C^k x H x W\n selection: (1/2) x C^k x H x W\n last_mask: (1/2) x num_objects x H x W (at stride 16)\n return a dict of memory readouts, indexed by object indices. Each readout is C*H*W\n \"\"\"\n h, w = pix_feat.shape[-2:]\n bs = pix_feat.shape[0]\n assert query_key.shape[0] == bs\n assert selection.shape[0] == bs\n assert last_mask.shape[0] == bs\n\n query_key = query_key.flatten(start_dim=2) # bs*C^k*HW\n selection = selection.flatten(start_dim=2) # bs*C^k*HW\n \"\"\"\n Compute affinity and perform readout\n \"\"\"\n all_readout_mem = {}\n buckets = self.work_mem.buckets\n for bucket_id, bucket in buckets.items():\n if self.use_long_term and self.long_mem.engaged(bucket_id):\n # Use long-term memory\n long_mem_size = self.long_mem.size(bucket_id)\n memory_key = torch.cat([self.long_mem.key[bucket_id], self.work_mem.key[bucket_id]],\n -1)\n shrinkage = torch.cat(\n [self.long_mem.shrinkage[bucket_id], self.work_mem.shrinkage[bucket_id]], -1)\n\n similarity = get_similarity(memory_key, shrinkage, query_key, selection)\n affinity, usage = do_softmax(similarity,\n top_k=self.top_k,\n inplace=True,\n return_usage=True)\n \"\"\"\n Record memory usage for working and long-term memory\n \"\"\"\n # ignore the index return for long-term memory\n work_usage = usage[:, long_mem_size:]\n self.work_mem.update_bucket_usage(bucket_id, work_usage)\n\n if self.count_long_term_usage:\n # ignore the index return for working memory\n long_usage = usage[:, :long_mem_size]\n self.long_mem.update_bucket_usage(bucket_id, long_usage)\n else:\n # no long-term memory\n memory_key = self.work_mem.key[bucket_id]\n shrinkage = self.work_mem.shrinkage[bucket_id]\n similarity = get_similarity(memory_key, shrinkage, query_key, selection)\n\n if self.use_long_term:\n affinity, usage = do_softmax(similarity,\n top_k=self.top_k,\n inplace=True,\n return_usage=True)\n self.work_mem.update_bucket_usage(bucket_id, usage)\n else:\n affinity = do_softmax(similarity, top_k=self.top_k, inplace=True)\n\n if self.chunk_size < 1:\n object_chunks = [bucket]\n else:\n object_chunks = [\n bucket[i:i + self.chunk_size] for i in range(0, len(bucket), self.chunk_size)\n ]\n\n for objects in object_chunks:\n this_sensory = self._get_sensory_by_ids(objects)\n this_last_mask = self._get_mask_by_ids(last_mask, objects)\n this_msk_value = self._get_visual_values_by_ids(objects) # (1/2)*num_objects*C*N\n visual_readout = self._readout(affinity,\n this_msk_value).view(bs, len(objects), self.CV, h, w)\n pixel_readout = network.pixel_fusion(pix_feat, visual_readout, this_sensory,\n this_last_mask)\n this_obj_mem = self._get_object_mem_by_ids(objects).unsqueeze(2)\n readout_memory, aux_features = network.readout_query(pixel_readout, this_obj_mem)\n for i, obj in enumerate(objects):\n all_readout_mem[obj] = readout_memory[:, i]\n\n if self.save_aux:\n aux_output = {\n 'sensory': this_sensory,\n 'pixel_readout': pixel_readout,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'q_weights': aux_features['q_weights'] if aux_features else None,\n 'p_weights': aux_features['p_weights'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'].float() if aux_features else None,\n }\n self.aux = aux_output\n\n return all_readout_mem\n\n def add_memory(self,\n key: torch.Tensor,\n shrinkage: torch.Tensor,\n msk_value: torch.Tensor,\n obj_value: torch.Tensor,\n objects: List[int],\n selection: torch.Tensor = None,\n *,\n as_permanent: bool = False) -> None:\n # key: (1/2)*C*H*W\n # msk_value: (1/2)*num_objects*C*H*W\n # obj_value: (1/2)*num_objects*Q*C\n # objects contains a list of object ids corresponding to the objects in msk_value/obj_value\n bs = key.shape[0]\n assert shrinkage.shape[0] == bs\n assert msk_value.shape[0] == bs\n assert obj_value.shape[0] == bs\n\n self.engaged = True\n if self.H is None or self.config_stale:\n self.config_stale = False\n self.H, self.W = msk_value.shape[-2:]\n self.HW = self.H * self.W\n # convert from num. frames to num. tokens\n self.max_work_tokens = self.max_mem_frames * self.HW\n if self.use_long_term:\n self.min_work_tokens = self.min_mem_frames * self.HW\n\n # key: bs*C*N\n # value: bs*num_objects*C*N\n key = key.flatten(start_dim=2)\n shrinkage = shrinkage.flatten(start_dim=2)\n self.CK = key.shape[1]\n\n msk_value = msk_value.flatten(start_dim=3)\n self.CV = msk_value.shape[2]\n\n if selection is not None:\n # not used in non-long-term mode\n selection = selection.flatten(start_dim=2)\n\n # insert object values into object memory\n for obj_id, obj in enumerate(objects):\n if obj in self.obj_v:\n \"\"\"streaming average\n each self.obj_v[obj] is (1/2)*num_summaries*(embed_dim+1)\n first embed_dim keeps track of the sum of embeddings\n the last dim keeps the total count\n averaging in done inside the object transformer\n\n incoming obj_value is (1/2)*num_objects*num_summaries*(embed_dim+1)\n self.obj_v[obj] = torch.cat([self.obj_v[obj], obj_value[:, obj_id]], dim=0)\n \"\"\"\n last_acc = self.obj_v[obj][:, :, -1]\n new_acc = last_acc + obj_value[:, obj_id, :, -1]\n\n self.obj_v[obj][:, :, :-1] = (self.obj_v[obj][:, :, :-1] +\n obj_value[:, obj_id, :, :-1])\n self.obj_v[obj][:, :, -1] = new_acc\n else:\n self.obj_v[obj] = obj_value[:, obj_id]\n\n # convert mask value tensor into a dict for insertion\n msk_values = {obj: msk_value[:, obj_id] for obj_id, obj in enumerate(objects)}\n self.work_mem.add(key,\n msk_values,\n shrinkage,\n selection=selection,\n as_permanent=as_permanent)\n\n for bucket_id in self.work_mem.buckets.keys():\n # long-term memory cleanup\n if self.use_long_term:\n # Do memory compressed if needed\n if self.work_mem.non_perm_size(bucket_id) >= self.max_work_tokens:\n # Remove obsolete features if needed\n if self.long_mem.non_perm_size(bucket_id) >= (self.max_long_tokens -\n self.num_prototypes):\n self.long_mem.remove_obsolete_features(\n bucket_id,\n self.max_long_tokens - self.num_prototypes - self.buffer_tokens)\n\n self.compress_features(bucket_id)\n else:\n # FIFO\n self.work_mem.remove_old_memory(bucket_id, self.max_work_tokens)\n\n def purge_except(self, obj_keep_idx: List[int]) -> None:\n # purge certain objects from the memory except the one listed\n self.work_mem.purge_except(obj_keep_idx)\n if self.use_long_term and self.long_mem.engaged():\n self.long_mem.purge_except(obj_keep_idx)\n self.sensory = {k: v for k, v in self.sensory.items() if k in obj_keep_idx}\n\n if not self.work_mem.engaged():\n # everything is removed!\n self.engaged = False\n\n def compress_features(self, bucket_id: int) -> None:\n HW = self.HW\n\n # perform memory consolidation\n prototype_key, prototype_value, prototype_shrinkage = self.consolidation(\n *self.work_mem.get_all_sliced(bucket_id, 0, -self.min_work_tokens))\n\n # remove consolidated working memory\n self.work_mem.sieve_by_range(bucket_id,\n 0,\n -self.min_work_tokens,\n min_size=self.min_work_tokens)\n\n # add to long-term memory\n self.long_mem.add(prototype_key,\n prototype_value,\n prototype_shrinkage,\n selection=None,\n supposed_bucket_id=bucket_id)\n\n def consolidation(self, candidate_key: torch.Tensor, candidate_shrinkage: torch.Tensor,\n candidate_selection: torch.Tensor, candidate_value: Dict[int, torch.Tensor],\n usage: torch.Tensor) -> (torch.Tensor, Dict[int, torch.Tensor], torch.Tensor):\n # find the indices with max usage\n bs = candidate_key.shape[0]\n assert bs in [1, 2]\n\n prototype_key = []\n prototype_selection = []\n for bi in range(bs):\n _, max_usage_indices = torch.topk(usage[bi], k=self.num_prototypes, dim=-1, sorted=True)\n prototype_indices = max_usage_indices.flatten()\n prototype_key.append(candidate_key[bi, :, prototype_indices])\n prototype_selection.append(candidate_selection[bi, :, prototype_indices])\n prototype_key = torch.stack(prototype_key, dim=0)\n prototype_selection = torch.stack(prototype_selection, dim=0)\n \"\"\"\n Potentiation step\n \"\"\"\n similarity = get_similarity(candidate_key, candidate_shrinkage, prototype_key,\n prototype_selection)\n affinity = do_softmax(similarity)\n\n # readout the values\n prototype_value = {k: self._readout(affinity, v) for k, v in candidate_value.items()}\n\n # readout the shrinkage term\n prototype_shrinkage = self._readout(affinity, candidate_shrinkage)\n\n return prototype_key, prototype_value, prototype_shrinkage\n\n def initialize_sensory_if_needed(self, sample_key: torch.Tensor, ids: List[int]):\n for obj in ids:\n if obj not in self.sensory:\n # also initializes the sensory memory\n bs, _, h, w = sample_key.shape\n self.sensory[obj] = torch.zeros((bs, self.sensory_dim, h, w),\n device=sample_key.device)\n\n def update_sensory(self, sensory: torch.Tensor, ids: List[int]):\n # sensory: 1*num_objects*C*H*W\n for obj_id, obj in enumerate(ids):\n self.sensory[obj] = sensory[:, obj_id]\n\n def get_sensory(self, ids: List[int]):\n # returns (1/2)*num_objects*C*H*W\n return self._get_sensory_by_ids(ids)\n \n def clear_non_permanent_memory(self):\n self.work_mem.clear_non_permanent_memory()\n if self.use_long_term:\n self.long_mem.clear_non_permanent_memory()\n\n def clear_sensory_memory(self):\n self.sensory = {}" }, { "identifier": "ObjectManager", "path": "cutie/inference/object_manager.py", "snippet": "class ObjectManager:\n \"\"\"\n Object IDs are immutable. The same ID always represent the same object.\n Temporary IDs are the positions of each object in the tensor. It changes as objects get removed.\n Temporary IDs start from 1.\n \"\"\"\n def __init__(self):\n self.obj_to_tmp_id: Dict[ObjectInfo, int] = {}\n self.tmp_id_to_obj: Dict[int, ObjectInfo] = {}\n self.obj_id_to_obj: Dict[int, ObjectInfo] = {}\n\n self.all_historical_object_ids: List[int] = []\n\n def _recompute_obj_id_to_obj_mapping(self) -> None:\n self.obj_id_to_obj = {obj.id: obj for obj in self.obj_to_tmp_id}\n\n def add_new_objects(\n self, objects: Union[List[ObjectInfo], ObjectInfo,\n List[int]]) -> (List[int], List[int]):\n if not isinstance(objects, list):\n objects = [objects]\n\n corresponding_tmp_ids = []\n corresponding_obj_ids = []\n for obj in objects:\n if isinstance(obj, int):\n obj = ObjectInfo(id=obj)\n\n if obj in self.obj_to_tmp_id:\n # old object\n corresponding_tmp_ids.append(self.obj_to_tmp_id[obj])\n corresponding_obj_ids.append(obj.id)\n else:\n # new object\n new_obj = ObjectInfo(id=obj.id)\n\n # new object\n new_tmp_id = len(self.obj_to_tmp_id) + 1\n self.obj_to_tmp_id[new_obj] = new_tmp_id\n self.tmp_id_to_obj[new_tmp_id] = new_obj\n self.all_historical_object_ids.append(new_obj.id)\n corresponding_tmp_ids.append(new_tmp_id)\n corresponding_obj_ids.append(new_obj.id)\n\n self._recompute_obj_id_to_obj_mapping()\n assert corresponding_tmp_ids == sorted(corresponding_tmp_ids)\n return corresponding_tmp_ids, corresponding_obj_ids\n\n def delete_object(self, obj_ids_to_remove: Union[int, List[int]]) -> None:\n # delete an object or a list of objects\n # re-sort the tmp ids\n if isinstance(obj_ids_to_remove, int):\n obj_ids_to_remove = [obj_ids_to_remove]\n\n new_tmp_id = 1\n total_num_id = len(self.obj_to_tmp_id)\n\n local_obj_to_tmp_id = {}\n local_tmp_to_obj_id = {}\n\n for tmp_iter in range(1, total_num_id + 1):\n obj = self.tmp_id_to_obj[tmp_iter]\n if obj.id not in obj_ids_to_remove:\n local_obj_to_tmp_id[obj] = new_tmp_id\n local_tmp_to_obj_id[new_tmp_id] = obj\n new_tmp_id += 1\n\n self.obj_to_tmp_id = local_obj_to_tmp_id\n self.tmp_id_to_obj = local_tmp_to_obj_id\n self._recompute_obj_id_to_obj_mapping()\n\n def purge_inactive_objects(self,\n max_missed_detection_count: int) -> (bool, List[int], List[int]):\n # remove tmp ids of objects that are removed\n obj_id_to_be_deleted = []\n tmp_id_to_be_deleted = []\n tmp_id_to_keep = []\n obj_id_to_keep = []\n\n for obj in self.obj_to_tmp_id:\n if obj.poke_count > max_missed_detection_count:\n obj_id_to_be_deleted.append(obj.id)\n tmp_id_to_be_deleted.append(self.obj_to_tmp_id[obj])\n else:\n tmp_id_to_keep.append(self.obj_to_tmp_id[obj])\n obj_id_to_keep.append(obj.id)\n\n purge_activated = len(obj_id_to_be_deleted) > 0\n if purge_activated:\n self.delete_object(obj_id_to_be_deleted)\n return purge_activated, tmp_id_to_keep, obj_id_to_keep\n\n def tmp_to_obj_cls(self, mask) -> torch.Tensor:\n # remap tmp id cls representation to the true object id representation\n new_mask = torch.zeros_like(mask)\n for tmp_id, obj in self.tmp_id_to_obj.items():\n new_mask[mask == tmp_id] = obj.id\n return new_mask\n\n def get_tmp_to_obj_mapping(self) -> Dict[int, ObjectInfo]:\n # returns the mapping in a dict format for saving it with pickle\n return {obj.id: tmp_id for obj, tmp_id in self.tmp_id_to_obj.items()}\n\n def realize_dict(self, obj_dict, dim=1) -> torch.Tensor:\n # turns a dict indexed by obj id into a tensor, ordered by tmp IDs\n output = []\n for _, obj in self.tmp_id_to_obj.items():\n if obj.id not in obj_dict:\n raise NotImplementedError\n output.append(obj_dict[obj.id])\n output = torch.stack(output, dim=dim)\n return output\n\n def make_one_hot(self, cls_mask) -> torch.Tensor:\n output = []\n for _, obj in self.tmp_id_to_obj.items():\n output.append(cls_mask == obj.id)\n if len(output) == 0:\n output = torch.zeros((0, *cls_mask.shape), dtype=torch.bool, device=cls_mask.device)\n else:\n output = torch.stack(output, dim=0)\n return output\n\n @property\n def all_obj_ids(self) -> List[int]:\n return [k.id for k in self.obj_to_tmp_id]\n\n @property\n def num_obj(self) -> int:\n return len(self.obj_to_tmp_id)\n\n def has_all(self, objects: List[int]) -> bool:\n for obj in objects:\n if obj not in self.obj_to_tmp_id:\n return False\n return True\n\n def find_object_by_id(self, obj_id) -> ObjectInfo:\n return self.obj_id_to_obj[obj_id]\n\n def find_tmp_by_id(self, obj_id) -> int:\n return self.obj_to_tmp_id[self.obj_id_to_obj[obj_id]]" }, { "identifier": "ImageFeatureStore", "path": "cutie/inference/image_feature_store.py", "snippet": "class ImageFeatureStore:\n \"\"\"\n A cache for image features.\n These features might be reused at different parts of the inference pipeline.\n This class provide an interface for reusing these features.\n It is the user's responsibility to delete redundant features.\n\n Feature of a frame should be associated with a unique index -- typically the frame id.\n \"\"\"\n def __init__(self, network: CUTIE, no_warning: bool = False):\n self.network = network\n self._store = {}\n self.no_warning = no_warning\n\n def _encode_feature(self, index: int, image: torch.Tensor) -> None:\n ms_features, pix_feat = self.network.encode_image(image)\n key, shrinkage, selection = self.network.transform_key(ms_features[0])\n self._store[index] = (ms_features, pix_feat, key, shrinkage, selection)\n\n def get_features(self, index: int,\n image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n if index not in self._store:\n self._encode_feature(index, image)\n\n return self._store[index][:2]\n\n def get_key(self, index: int,\n image: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n if index not in self._store:\n self._encode_feature(index, image)\n\n return self._store[index][2:]\n\n def delete(self, index: int) -> None:\n if index in self._store:\n del self._store[index]\n\n def __len__(self):\n return len(self._store)\n\n def __del__(self):\n if len(self._store) > 0 and not self.no_warning:\n warnings.warn(f'Leaking {self._store.keys()} in the image feature store')" }, { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_cfg.key_dim\n self.value_dim = model_cfg.value_dim\n self.sensory_dim = model_cfg.sensory_dim\n self.pixel_dim = model_cfg.pixel_dim\n self.embed_dim = model_cfg.embed_dim\n self.single_object = single_object\n\n log.info(f'Single object: {self.single_object}')\n\n self.pixel_encoder = PixelEncoder(model_cfg)\n self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1)\n self.key_proj = KeyProjection(model_cfg)\n self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object)\n self.mask_decoder = MaskDecoder(model_cfg)\n self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object)\n self.object_transformer = QueryTransformer(model_cfg)\n self.object_summarizer = ObjectSummarizer(model_cfg)\n self.aux_computer = AuxComputer(cfg)\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(model_cfg.pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(model_cfg.pixel_std).view(-1, 1, 1), False)\n\n def _get_others(self, masks: torch.Tensor) -> torch.Tensor:\n # for each object, return the sum of masks of all other objects\n if self.single_object:\n return None\n\n num_objects = masks.shape[1]\n if num_objects >= 1:\n others = (masks.sum(dim=1, keepdim=True) - masks).clamp(0, 1)\n else:\n others = torch.zeros_like(masks)\n return others\n\n def encode_image(self, image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n ms_image_feat = self.pixel_encoder(image)\n return ms_image_feat, self.pix_feat_proj(ms_image_feat[0])\n\n def encode_mask(\n self,\n image: torch.Tensor,\n ms_features: List[torch.Tensor],\n sensory: torch.Tensor,\n masks: torch.Tensor,\n *,\n deep_update: bool = True,\n chunk_size: int = -1,\n need_weights: bool = False) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n others = self._get_others(masks)\n mask_value, new_sensory = self.mask_encoder(image,\n ms_features,\n sensory,\n masks,\n others,\n deep_update=deep_update,\n chunk_size=chunk_size)\n object_summaries, object_logits = self.object_summarizer(masks, mask_value, need_weights)\n return mask_value, new_sensory, object_summaries, object_logits\n\n def transform_key(self,\n final_pix_feat: torch.Tensor,\n *,\n need_sk: bool = True,\n need_ek: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n key, shrinkage, selection = self.key_proj(final_pix_feat, need_s=need_sk, need_e=need_ek)\n return key, shrinkage, selection\n\n # Used in training only.\n # This step is replaced by MemoryManager in test time\n def read_memory(self, query_key: torch.Tensor, query_selection: torch.Tensor,\n memory_key: torch.Tensor, memory_shrinkage: torch.Tensor,\n msk_value: torch.Tensor, obj_memory: torch.Tensor, pix_feat: torch.Tensor,\n sensory: torch.Tensor, last_mask: torch.Tensor,\n selector: torch.Tensor) -> (torch.Tensor, Dict[str, torch.Tensor]):\n \"\"\"\n query_key : B * CK * H * W\n query_selection : B * CK * H * W\n memory_key : B * CK * T * H * W\n memory_shrinkage: B * 1 * T * H * W\n msk_value : B * num_objects * CV * T * H * W\n obj_memory : B * num_objects * T * num_summaries * C\n pixel_feature : B * C * H * W\n \"\"\"\n batch_size, num_objects = msk_value.shape[:2]\n\n # read using visual attention\n with torch.cuda.amp.autocast(enabled=False):\n affinity = get_affinity(memory_key.float(), memory_shrinkage.float(), query_key.float(),\n query_selection.float())\n\n msk_value = msk_value.flatten(start_dim=1, end_dim=2).float()\n\n # B * (num_objects*CV) * H * W\n pixel_readout = readout(affinity, msk_value)\n pixel_readout = pixel_readout.view(batch_size, num_objects, self.value_dim,\n *pixel_readout.shape[-2:])\n pixel_readout = self.pixel_fusion(pix_feat, pixel_readout, sensory, last_mask)\n\n # read from query transformer\n mem_readout, aux_features = self.readout_query(pixel_readout, obj_memory, selector=selector)\n\n aux_output = {\n 'sensory': sensory,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'] if aux_features else None,\n }\n\n return mem_readout, aux_output\n\n def pixel_fusion(self,\n pix_feat: torch.Tensor,\n pixel: torch.Tensor,\n sensory: torch.Tensor,\n last_mask: torch.Tensor,\n *,\n chunk_size: int = -1) -> torch.Tensor:\n last_mask = F.interpolate(last_mask, size=sensory.shape[-2:], mode='area')\n last_others = self._get_others(last_mask)\n fused = self.pixel_fuser(pix_feat,\n pixel,\n sensory,\n last_mask,\n last_others,\n chunk_size=chunk_size)\n return fused\n\n def readout_query(self,\n pixel_readout,\n obj_memory,\n *,\n selector=None,\n need_weights=False) -> (torch.Tensor, Dict[str, torch.Tensor]):\n return self.object_transformer(pixel_readout,\n obj_memory,\n selector=selector,\n need_weights=need_weights)\n\n def segment(self,\n ms_image_feat: List[torch.Tensor],\n memory_readout: torch.Tensor,\n sensory: torch.Tensor,\n *,\n selector: bool = None,\n chunk_size: int = -1,\n update_sensory: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n \"\"\"\n multi_scale_features is from the key encoder for skip-connection\n memory_readout is from working/long-term memory\n sensory is the sensory memory\n last_mask is the mask from the last frame, supplementing sensory memory\n selector is 1 if an object exists, and 0 otherwise. We use it to filter padded objects\n during training.\n \"\"\"\n sensory, logits = self.mask_decoder(ms_image_feat,\n memory_readout,\n sensory,\n chunk_size=chunk_size,\n update_sensory=update_sensory)\n\n prob = torch.sigmoid(logits)\n if selector is not None:\n prob = prob * selector\n\n # Softmax over all objects[]\n logits = aggregate(prob, dim=1)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=False)\n prob = F.softmax(logits, dim=1)\n\n return sensory, logits, prob\n\n def compute_aux(self, pix_feat: torch.Tensor, aux_inputs: Dict[str, torch.Tensor],\n selector: torch.Tensor) -> Dict[str, torch.Tensor]:\n return self.aux_computer(pix_feat, aux_inputs, selector)\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def load_weights(self, src_dict, init_as_zero_if_needed=False) -> None:\n if not self.single_object:\n # Map single-object weight to multi-object weight (4->5 out channels in conv1)\n for k in list(src_dict.keys()):\n if k == 'mask_encoder.conv1.weight':\n if src_dict[k].shape[1] == 4:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((64, 1, 7, 7), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif k == 'pixel_fuser.sensory_compress.weight':\n if src_dict[k].shape[1] == self.sensory_dim + 1:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((self.value_dim, 1, 1, 1), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif self.single_object:\n \"\"\"\n If the model is multiple-object and we are training in single-object, \n we strip the last channel of conv1.\n This is not supposed to happen in standard training except when users are trying to\n finetune a trained model with single object datasets.\n \"\"\"\n if src_dict['mask_encoder.conv1.weight'].shape[1] == 5:\n log.warning(f'Converting {k} from multiple objects to single object.'\n 'This is not supposed to happen in standard training.')\n src_dict[k] = src_dict[k][:, :-1]\n\n for k in src_dict:\n if k not in self.state_dict():\n log.info(f'Key {k} found in src_dict but not in self.state_dict()!!!')\n for k in self.state_dict():\n if k not in src_dict:\n log.info(f'Key {k} found in self.state_dict() but not in src_dict!!!')\n\n self.load_state_dict(src_dict, strict=False)\n\n @property\n def device(self) -> torch.device:\n return self.pixel_mean.device" }, { "identifier": "pad_divide_by", "path": "cutie/utils/tensor_utils.py", "snippet": "def pad_divide_by(in_img: torch.Tensor, d: int) -> (torch.Tensor, Iterable[int]):\n h, w = in_img.shape[-2:]\n\n if h % d > 0:\n new_h = h + d - h % d\n else:\n new_h = h\n if w % d > 0:\n new_w = w + d - w % d\n else:\n new_w = w\n lh, uh = int((new_h - h) / 2), int(new_h - h) - int((new_h - h) / 2)\n lw, uw = int((new_w - w) / 2), int(new_w - w) - int((new_w - w) / 2)\n pad_array = (int(lw), int(uw), int(lh), int(uh))\n out = F.pad(in_img, pad_array)\n return out, pad_array" }, { "identifier": "unpad", "path": "cutie/utils/tensor_utils.py", "snippet": "def unpad(img: torch.Tensor, pad: Iterable[int]) -> torch.Tensor:\n if len(img.shape) == 4:\n if pad[2] + pad[3] > 0:\n img = img[:, :, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, :, pad[0]:-pad[1]]\n elif len(img.shape) == 3:\n if pad[2] + pad[3] > 0:\n img = img[:, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, pad[0]:-pad[1]]\n elif len(img.shape) == 5:\n if pad[2] + pad[3] > 0:\n img = img[:, :, :, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, :, :, pad[0]:-pad[1]]\n else:\n raise NotImplementedError\n return img" }, { "identifier": "aggregate", "path": "cutie/utils/tensor_utils.py", "snippet": "def aggregate(prob: torch.Tensor, dim: int) -> torch.Tensor:\n with torch.cuda.amp.autocast(enabled=False):\n prob = prob.float()\n new_prob = torch.cat([torch.prod(1 - prob, dim=dim, keepdim=True), prob],\n dim).clamp(1e-7, 1 - 1e-7)\n logits = torch.log((new_prob / (1 - new_prob)))\n\n return logits" } ]
from typing import List, Optional, Iterable, Dict from omegaconf import DictConfig from cutie.inference.memory_manager import MemoryManager from cutie.inference.object_manager import ObjectManager from cutie.inference.image_feature_store import ImageFeatureStore from cutie.model.cutie import CUTIE from cutie.utils.tensor_utils import pad_divide_by, unpad, aggregate import logging import numpy as np import torch import torch.nn.functional as F
11,894
""" if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1 image, self.pad = pad_divide_by(image, 16) image = image.unsqueeze(0) # add the batch dimension if self.flip_aug: image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0) # whether to update the working memory is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or (mask is not None)) and (not end) # segment when there is no input mask or when the input mask is incomplete need_segment = (mask is None) or (self.object_manager.num_obj > 0 and not self.object_manager.has_all(objects)) update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end) # encoding the image ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image) key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image) # segmentation from memory if needed if need_segment: pred_prob_with_bg = self._segment(key, selection, pix_feat, ms_feat, update_sensory=update_sensory) # use the input mask if provided if mask is not None: # inform the manager of the new objects, and get a list of temporary id # temporary ids -- indicates the position of objects in the tensor # (starts with 1 due to the background channel) corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects) mask, _ = pad_divide_by(mask, 16) if need_segment: # merge predicted mask with the incomplete input mask pred_prob_no_bg = pred_prob_with_bg[1:] # use the mutual exclusivity of segmentation if idx_mask: pred_prob_no_bg[:, mask > 0] = 0 else: pred_prob_no_bg[:, mask.max(0) > 0.5] = 0 new_masks = [] for mask_id, tmp_id in enumerate(corresponding_tmp_ids): if idx_mask: this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg) else: this_mask = mask[tmp_id] if tmp_id > pred_prob_no_bg.shape[0]: new_masks.append(this_mask.unsqueeze(0)) else: # +1 for padding the background channel pred_prob_no_bg[tmp_id - 1] = this_mask # new_masks are always in the order of tmp_id mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0) elif idx_mask: # simply convert cls to one-hot representation if len(objects) == 0: if delete_buffer: self.image_feature_store.delete(self.curr_ti) log.warn('Trying to insert an empty mask as memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) mask = torch.stack( [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)], dim=0) pred_prob_with_bg = aggregate(mask, dim=0) pred_prob_with_bg = torch.softmax(pred_prob_with_bg, dim=0) self.last_mask = pred_prob_with_bg[1:].unsqueeze(0) if self.flip_aug: self.last_mask = torch.cat( [self.last_mask, torch.flip(self.last_mask, dims=[-1])], dim=0) # save as memory if needed if is_mem_frame or force_permanent: self._add_memory(image, pix_feat, self.last_mask, key, shrinkage, selection, force_permanent=force_permanent) if delete_buffer: self.image_feature_store.delete(self.curr_ti)
log = logging.getLogger() class InferenceCore: def __init__(self, network: CUTIE, cfg: DictConfig, *, image_feature_store: ImageFeatureStore = None): self.network = network self.cfg = cfg self.mem_every = cfg.mem_every stagger_updates = cfg.stagger_updates self.chunk_size = cfg.chunk_size self.save_aux = cfg.save_aux self.max_internal_size = cfg.max_internal_size self.flip_aug = cfg.flip_aug self.curr_ti = -1 self.last_mem_ti = 0 # at which time indices should we update the sensory memory if stagger_updates >= self.mem_every: self.stagger_ti = set(range(1, self.mem_every + 1)) else: self.stagger_ti = set( np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int)) self.object_manager = ObjectManager() self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager) if image_feature_store is None: self.image_feature_store = ImageFeatureStore(self.network) else: self.image_feature_store = image_feature_store self.last_mask = None def clear_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory = MemoryManager(cfg=self.cfg, object_manager=self.object_manager) def clear_non_permanent_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_non_permanent_memory() def clear_sensory_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_sensory_memory() def update_config(self, cfg): self.mem_every = cfg['mem_every'] self.memory.update_config(cfg) def _add_memory(self, image: torch.Tensor, pix_feat: torch.Tensor, prob: torch.Tensor, key: torch.Tensor, shrinkage: torch.Tensor, selection: torch.Tensor, *, is_deep_update: bool = True, force_permanent: bool = False) -> None: """ Memorize the given segmentation in all memory stores. The batch dimension is 1 if flip augmentation is not used. image: RGB image, (1/2)*3*H*W pix_feat: from the key encoder, (1/2)*_*H*W prob: (1/2)*num_objects*H*W, in [0, 1] key/shrinkage/selection: for anisotropic l2, (1/2)*_*H*W selection can be None if not using long-term memory is_deep_update: whether to use deep update (e.g. with the mask encoder) force_permanent: whether to force the memory to be permanent """ if prob.shape[1] == 0: # nothing to add log.warn('Trying to add an empty object mask to memory!') return if force_permanent: as_permanent = 'all' else: as_permanent = 'first' self.memory.initialize_sensory_if_needed(key, self.object_manager.all_obj_ids) msk_value, sensory, obj_value, self.obj_logits = self.network.encode_mask( image, pix_feat, self.memory.get_sensory(self.object_manager.all_obj_ids), prob, deep_update=is_deep_update, chunk_size=self.chunk_size, need_weights=self.save_aux) self.memory.add_memory(key, shrinkage, msk_value, obj_value, self.object_manager.all_obj_ids, selection=selection, as_permanent=as_permanent) self.last_mem_ti = self.curr_ti if is_deep_update: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) def _segment(self, key: torch.Tensor, selection: torch.Tensor, pix_feat: torch.Tensor, ms_features: Iterable[torch.Tensor], update_sensory: bool = True) -> torch.Tensor: """ Produce a segmentation using the given features and the memory The batch dimension is 1 if flip augmentation is not used. key/selection: for anisotropic l2: (1/2) * _ * H * W pix_feat: from the key encoder, (1/2) * _ * H * W ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W with strides 16, 8, and 4 respectively update_sensory: whether to update the sensory memory Returns: (num_objects+1)*H*W normalized probability; the first channel is the background """ bs = key.shape[0] if self.flip_aug: assert bs == 2 else: assert bs == 1 if not self.memory.engaged: log.warn('Trying to segment without any memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network) memory_readout = self.object_manager.realize_dict(memory_readout) sensory, _, pred_prob_with_bg = self.network.segment(ms_features, memory_readout, self.memory.get_sensory( self.object_manager.all_obj_ids), chunk_size=self.chunk_size, update_sensory=update_sensory) # remove batch dim if self.flip_aug: # average predictions of the non-flipped and flipped version pred_prob_with_bg = (pred_prob_with_bg[0] + torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2 else: pred_prob_with_bg = pred_prob_with_bg[0] if update_sensory: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) return pred_prob_with_bg def step(self, image: torch.Tensor, mask: Optional[torch.Tensor] = None, objects: Optional[List[int]] = None, *, idx_mask: bool = True, end: bool = False, delete_buffer: bool = True, force_permanent: bool = False) -> torch.Tensor: """ Take a step with a new incoming image. If there is an incoming mask with new objects, we will memorize them. If there is no incoming mask, we will segment the image using the memory. In both cases, we will update the memory and return a segmentation. image: 3*H*W mask: H*W (if idx mask) or len(objects)*H*W or None objects: list of object ids that are valid in the mask Tensor. The ids themselves do not need to be consecutive/in order, but they need to be in the same position in the list as the corresponding mask in the tensor in non-idx-mask mode. objects is ignored if the mask is None. If idx_mask is False and objects is None, we sequentially infer the object ids. idx_mask: if True, mask is expected to contain an object id at every pixel. If False, mask should have multiple channels with each channel representing one object. end: if we are at the end of the sequence, we do not need to update memory if unsure just set it to False delete_buffer: whether to delete the image feature buffer after this step force_permanent: the memory recorded this frame will be added to the permanent memory """ if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1 image, self.pad = pad_divide_by(image, 16) image = image.unsqueeze(0) # add the batch dimension if self.flip_aug: image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0) # whether to update the working memory is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or (mask is not None)) and (not end) # segment when there is no input mask or when the input mask is incomplete need_segment = (mask is None) or (self.object_manager.num_obj > 0 and not self.object_manager.has_all(objects)) update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end) # encoding the image ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image) key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image) # segmentation from memory if needed if need_segment: pred_prob_with_bg = self._segment(key, selection, pix_feat, ms_feat, update_sensory=update_sensory) # use the input mask if provided if mask is not None: # inform the manager of the new objects, and get a list of temporary id # temporary ids -- indicates the position of objects in the tensor # (starts with 1 due to the background channel) corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects) mask, _ = pad_divide_by(mask, 16) if need_segment: # merge predicted mask with the incomplete input mask pred_prob_no_bg = pred_prob_with_bg[1:] # use the mutual exclusivity of segmentation if idx_mask: pred_prob_no_bg[:, mask > 0] = 0 else: pred_prob_no_bg[:, mask.max(0) > 0.5] = 0 new_masks = [] for mask_id, tmp_id in enumerate(corresponding_tmp_ids): if idx_mask: this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg) else: this_mask = mask[tmp_id] if tmp_id > pred_prob_no_bg.shape[0]: new_masks.append(this_mask.unsqueeze(0)) else: # +1 for padding the background channel pred_prob_no_bg[tmp_id - 1] = this_mask # new_masks are always in the order of tmp_id mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0) elif idx_mask: # simply convert cls to one-hot representation if len(objects) == 0: if delete_buffer: self.image_feature_store.delete(self.curr_ti) log.warn('Trying to insert an empty mask as memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) mask = torch.stack( [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)], dim=0) pred_prob_with_bg = aggregate(mask, dim=0) pred_prob_with_bg = torch.softmax(pred_prob_with_bg, dim=0) self.last_mask = pred_prob_with_bg[1:].unsqueeze(0) if self.flip_aug: self.last_mask = torch.cat( [self.last_mask, torch.flip(self.last_mask, dims=[-1])], dim=0) # save as memory if needed if is_mem_frame or force_permanent: self._add_memory(image, pix_feat, self.last_mask, key, shrinkage, selection, force_permanent=force_permanent) if delete_buffer: self.image_feature_store.delete(self.curr_ti)
output_prob = unpad(pred_prob_with_bg, self.pad)
5
2023-10-19 17:49:24+00:00
16k
stanford-oval/WikiChat
benchmark/scripts/user_simulator.py
[ { "identifier": "DialogueTurn", "path": "pipelines/dialog_turn.py", "snippet": "class DialogueTurn:\n def __init__(\n self,\n agent_utterance: str = None,\n user_utterance: str = None,\n pipeline: str = None,\n engine: str = None,\n generate_engine: str = None,\n draft_engine: str = None,\n ):\n self.engine = engine\n self.generate_engine = generate_engine\n self.draft_engine = draft_engine\n self.pipeline = pipeline\n self.wall_time_seconds = (\n 0 # how much time it took to generate this turn, in seconds\n )\n self.agent_utterance = agent_utterance\n self.user_utterance = user_utterance\n\n # retrieve_and_generate pipeline\n self.initial_search_query = None\n self.initial_search_query_time = None\n self.initial_search_results = []\n self.initial_search_result_titles = []\n self.initial_search_bullets = []\n\n # generate_and_correct pipeline\n self.llm_utterance = None\n self.claims = []\n self.verification_retrieval_results = {}\n self.verification_result = {}\n\n # early_combine pipeline\n self.combined_evidences = []\n self.combined_utterance = None\n self.feedback = []\n self.feedback_scores = []\n self.refined_utterance = None\n\n def _summarize_vc_log(self):\n verification_summary = {}\n assert len(self.verification_result) == len(\n self.verification_retrieval_results\n ), \"We need to have retrieved evidence for all claims\"\n for key, value in self.verification_retrieval_results.items():\n claim_idx = int(key)\n v_ret_results = []\n for v in value:\n title, paragraph, score = tuple(v)\n v_ret_results.append(\n {\"title\": title, \"paragraph\": paragraph, \"score\": round(score, 1)}\n )\n verification_summary[self.claims[claim_idx][0]] = OrderedDict(\n {\n \"label\": self.verification_result[claim_idx][\"label\"],\n \"fixed_claim\": self.verification_result[claim_idx][\"fixed_claim\"],\n \"retrieval_results\": v_ret_results,\n }\n )\n return verification_summary\n\n def _summarize_rg_log(self):\n rg_summary = {\n \"initial_search_query\": self.initial_search_query,\n \"initial_search_query_time\": self.initial_search_query_time,\n \"initial_search_bullets\": self.initial_search_bullets,\n \"initial_search_results\": [],\n }\n\n for i in range(len(self.initial_search_results)):\n rg_summary[\"initial_search_results\"].append(\n {\n \"title\": self.initial_search_result_titles[i],\n \"paragraph\": self.initial_search_results[i],\n # 'bullets': self.initial_search_bullets,\n }\n )\n\n return rg_summary\n\n def log(self):\n \"\"\"\n Returns a json object that contains all information inside `self`\n \"\"\"\n # combine fields into a more human-readable field\n verification_summary = self._summarize_vc_log()\n rg_summary = self._summarize_rg_log()\n\n return OrderedDict(\n {\n # retrieve_and_generate pipeline\n \"retrieve_and_generate\": rg_summary,\n # generate_and_correct pipeline\n \"llm_utterance\": self.llm_utterance,\n \"generate_and_correct\": verification_summary,\n # early_combine pipeline\n \"combined_evidences\": self.combined_evidences,\n \"combined_utterance\": self.combined_utterance,\n \"feedback\": self.feedback,\n \"feedback_scores\": self.feedback_scores,\n \"refined_utterance\": self.refined_utterance,\n \"user_utterance\": self.user_utterance,\n \"agent_utterance\": self.agent_utterance,\n \"engine\": self.engine,\n \"generate_engine\": self.generate_engine,\n \"draft_engine\": self.draft_engine,\n \"pipeline\": self.pipeline,\n \"wall_time_seconds\": round(self.wall_time_seconds, 1),\n }\n )\n\n @staticmethod\n def utterance_list_to_dialog_history(utterance_list: List[str]):\n \"\"\"\n The resulting dialog history will not have all the fields correctly initialized, since no information about e.g. search queries is available\n \"\"\"\n dialog_history = []\n assert (\n len(utterance_list) % 2 == 1\n ), \"The first turn is always the user, and the turn to be generated is always the agent, so the number of turns should be odd\"\n for i in range(0, len(utterance_list) - 2, 2):\n dialog_history.append(\n DialogueTurn(\n user_utterance=utterance_list[i],\n agent_utterance=utterance_list[i + 1],\n )\n )\n user_utterance = utterance_list[-1]\n\n return dialog_history, user_utterance\n\n @staticmethod\n def dialog_history_to_utterance_list(dialog_history) -> List[str]:\n \"\"\"\n Convert a list of DialogueTurns to a list of strings\n \"\"\"\n utterance_list = []\n for turn in dialog_history:\n utterance_list.append(turn.user_utterance)\n utterance_list.append(turn.agent_utterance)\n return utterance_list" }, { "identifier": "Chatbot", "path": "pipelines/chatbot.py", "snippet": "class Chatbot:\n \"\"\"\n A stateless chatbot. Stateless means that it does not store the history of the dialog in itself, but requires it as an input\n \"\"\"\n\n def __init__(self, args) -> None:\n # Initialize everything, because we can change the pipeline on the fly using system_parameters\n self.claim_splitter = ClaimSplitter(args.claim_prompt_template_file)\n self.evi_num = args.evi_num\n self.colbert_endpoint = args.colbert_endpoint\n self.retrieval_num = args.retrieval_num\n self.refiner = Refiner(prompt=args.refinement_prompt, args=args)\n\n self.temperature = args.temperature\n self.max_tokens = args.max_tokens\n self.top_p = args.top_p\n self.presence_penalty = args.presence_penalty\n self.frequency_penalty = args.frequency_penalty\n self.skip_verification = args.skip_verification\n\n # default parameters, can be overridden:\n self.engine = args.engine\n self.generate_engine = args.generate_engine\n self.draft_engine = args.draft_engine\n self.do_refine=args.do_refine\n self.fuse_claim_splitting = args.fuse_claim_splitting\n\n def generate_next_turn(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n pipeline: str,\n system_parameters: dict = {},\n ):\n \"\"\"\n Generate the next turn of the dialog\n system_parameters: can override some of the default parameters defined in __init__()\n \"\"\"\n # throw error if system_parameters contains keys that are not supported\n for key in system_parameters:\n assert key in [\n \"engine\",\n \"generate_engine\",\n \"draft_engine\",\n \"fuse_claim_splitting\",\n \"do_refine\",\n ], f\"Unsupported system_parameter key: {key}\"\n\n engine = system_parameters.get(\"engine\", self.engine)\n generate_engine = system_parameters.get(\"generate_engine\", self.generate_engine)\n if generate_engine is None:\n # this means that the default `generate_engine` was not provided via commandline, and system_parameters is not override it either.\n # So default to `engine`\n generate_engine = engine\n draft_engine = system_parameters.get(\"draft_engine\", self.draft_engine)\n if draft_engine is None:\n draft_engine = engine\n fuse_claim_splitting = system_parameters.get(\"fuse_claim_splitting\", self.fuse_claim_splitting)\n engine_dict = {\"default\": engine, \"generate\": generate_engine, \"draft\": draft_engine}\n do_refine = system_parameters.get(\"do_refine\", self.do_refine)\n\n start_time = time.time()\n\n if pipeline == \"generate_and_correct\":\n new_dlg_turn = self.generate_and_correct_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n elif pipeline == \"retrieve_and_generate\":\n new_dlg_turn = self.retrieve_and_generate_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n elif pipeline == \"generate\":\n reply = self._generate_only(\n \"baseline_chatbot.prompt\",\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n new_dlg_turn.llm_utterance = reply\n new_dlg_turn.agent_utterance = reply\n elif pipeline == \"retrieve_only\":\n new_dlg_turn = self.retrieve_only_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n elif pipeline == \"early_combine\":\n new_dlg_turn = self.early_combine_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n fuse_claim_splitting=fuse_claim_splitting\n )\n else:\n raise ValueError\n\n if do_refine == \"True\" or do_refine == \"true\" or do_refine == True:\n do_refine = True\n else:\n do_refine = False\n\n if do_refine:\n prerefinement_agent_utterance = new_dlg_turn.agent_utterance\n new_dlg_turn.agent_utterance = self.refiner.set_refinement_fields(\n object_dlg_history, new_dlg_turn, engine_dict=engine_dict\n )\n if new_dlg_turn.agent_utterance == prerefinement_agent_utterance:\n logger.info(\"Refinement did NOT change the agent utterance\")\n\n new_dlg_turn.engine = engine\n new_dlg_turn.generate_engine = generate_engine\n new_dlg_turn.draft_engine = draft_engine\n new_dlg_turn.pipeline = pipeline\n\n end_time = time.time()\n new_dlg_turn.wall_time_seconds = end_time - start_time\n\n return new_dlg_turn\n\n def retrieve_only_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: dict,\n ):\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n # search based on the history of the dialog so far\n search_prompt_output = llm_generate(\n template_file=\"query.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"new_user_utterance\": new_user_utterance,\n \"force_search\": True,\n },\n engine=engine_dict[\"default\"],\n max_tokens=50,\n temperature=0.0,\n top_p=0.5,\n stop_tokens=[\"\\n\"],\n postprocess=False,\n )\n search_prompt_output = (\n \"Yes. \" + search_prompt_output\n ) # because we are forcing a search\n self._handle_search_prompt_output(\n search_prompt_output=search_prompt_output,\n new_dlg_turn=new_dlg_turn,\n num_paragraphs=1,\n summarize_results=False,\n engine_dict=engine_dict,\n )\n\n paragraph = new_dlg_turn.initial_search_results[\n 0\n ] # we only retrieve one paragraph\n title = new_dlg_turn.initial_search_result_titles[0]\n new_dlg_turn.agent_utterance = (\n 'I found an article titled \"' + title + '\": ' + paragraph\n )\n return new_dlg_turn\n\n def retrieve_and_generate_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: dict,\n ):\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n reply = self._retrieve_and_generate(\n object_dlg_history,\n new_user_utterance,\n new_dlg_turn,\n engine_dict=engine_dict,\n )\n new_dlg_turn.agent_utterance = reply\n\n return new_dlg_turn\n\n def generate_and_correct_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: str,\n ):\n \"\"\"\n Verify and correct the last turn of a given dialog using retrieved evidences\n Args:\n - `object_dlg_history` (list): previous dialog turns\n - `new_user_utterance` (str): last user utterance\n Returns:\n - `corrected_reply` (str): corrected LLM response\n - `new_dialog_turn` (DialogTurn)\n \"\"\"\n original_reply = self._generate_only(\n \"generate.prompt\",\n object_dlg_history,\n new_user_utterance,\n engine_dict=engine_dict,\n )\n\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n new_dlg_turn.llm_utterance = original_reply\n\n new_dlg_turn.agent_utterance = self._generate_and_correct_reply(\n object_dlg_history,\n new_user_utterance,\n original_reply,\n new_dlg_turn,\n engine_dict=engine_dict,\n )\n\n return new_dlg_turn\n\n def early_combine_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: str,\n fuse_claim_splitting: bool\n ):\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n\n # gather evidence from two routs in parallel\n with ThreadPoolExecutor(2) as executor:\n search_summary = executor.submit(\n self._search_and_summarize,\n object_dlg_history,\n new_user_utterance,\n new_dlg_turn,\n engine_dict=engine_dict,\n )\n supported_claims = executor.submit(\n self._generate_split_and_fact_check,\n object_dlg_history,\n new_user_utterance,\n new_dlg_turn,\n engine_dict=engine_dict,\n fuse_claim_splitting=fuse_claim_splitting\n )\n search_summary = search_summary.result()\n supported_claims = supported_claims.result()\n\n combined_evi = search_summary + supported_claims\n # logger.info('Combined evidences: %s', new_dlg_turn.combined_evidences)\n new_dlg_turn.combined_evidences = combined_evi\n\n if not combined_evi:\n logger.info(\"Combined evidence is empty\")\n # if new_dlg_turn.initial_search_query is None:\n # new_dlg_turn.combined_utterance = original_reply # no search needed, so return the original chitchat response\n # else:\n # new_dlg_turn.combined_utterance = \"Sorry, I'm not sure.\" # will become more conversational after refinement\n # else:\n new_dlg_turn.combined_utterance = self._reply_using_combined_evidence(\n object_dlg_history,\n new_user_utterance,\n combined_evi,\n engine_dict=engine_dict,\n )\n new_dlg_turn.agent_utterance = new_dlg_turn.combined_utterance\n\n return new_dlg_turn\n\n def _handle_search_prompt_output(\n self,\n search_prompt_output: str,\n new_dlg_turn: DialogueTurn,\n num_paragraphs,\n summarize_results: bool,\n engine_dict: dict,\n ):\n \"\"\"\n Updates `new_dlg_turn` with logs\n A sample output is: Yes. You Google \"James E. Webb the administrator of NASA\". The year of the results is \"none\".]\n \"\"\"\n reranking_factor = 3 # we will retrieve num_paragraphs * reranking_factor paragraphs before reranking them\n\n search_prompt_output = search_prompt_output.strip()\n search_pattern = (\n r'Yes\\. You.*\"([^\"]*)\".* The year of the results is \"([^=]*)\"\\.]?'\n )\n search_match = re.match(search_pattern, search_prompt_output)\n\n if search_prompt_output.startswith(\"No\"):\n # sometimes LLM outputs No. with extra explanation afterwards instead of ']', or \"No search needed\". So this more lax condition leads to fewer Exceptions\n logger.info(\"No search needed.\")\n elif search_match:\n search_query = search_match.group(1)\n search_query_time = search_match.group(2)\n y = extract_year(title=\"\", passage=search_query)\n if len(y) > 0:\n logger.info(\"Overriding query year\")\n search_query_time = y[0]\n logger.info(\"search_query = %s\", search_query)\n logger.info(\"search_query_time = %s\", search_query_time)\n\n # retrieve more paragraphs so that we can do date-based reranking (if needed) and skip \"None\" summaries (if any)\n paragraphs, scores, titles = self._colbert_retrieve(\n query=search_query,\n num_paragraphs=num_paragraphs * reranking_factor,\n rerank=search_query_time,\n )\n\n logger.info(\"Colbert titles: %s\", str(titles))\n\n if summarize_results:\n bullets = []\n not_none_paragraphs = []\n not_none_titles = []\n # summarize in batches, until we reach `num_paragraphs` paragraphs that are deemed relevant\n for start_idx in range(\n 0, num_paragraphs * reranking_factor, num_paragraphs\n ):\n b, not_none_paragraph_indices = self._summarize_results(\n search_query,\n paragraphs[start_idx : start_idx + num_paragraphs],\n titles[start_idx : start_idx + num_paragraphs],\n maximum_paragraphs_needed=num_paragraphs\n - len(not_none_paragraphs),\n engine_dict=engine_dict,\n )\n # print(\"not_none_paragraph_indices = \", not_none_paragraph_indices)\n not_none_paragraphs += [\n paragraphs[start_idx + i] for i in not_none_paragraph_indices\n ]\n not_none_titles += [\n titles[start_idx + i] for i in not_none_paragraph_indices\n ]\n bullets = bullets + b\n assert len(not_none_paragraphs) <= num_paragraphs\n if len(not_none_paragraphs) == num_paragraphs:\n break\n titles = not_none_titles\n paragraphs = not_none_paragraphs\n\n else:\n paragraphs = paragraphs[:num_paragraphs]\n titles = titles[:num_paragraphs]\n bullets = None\n\n # log everything\n new_dlg_turn.initial_search_query = search_query\n new_dlg_turn.initial_search_query_time = search_query_time\n new_dlg_turn.initial_search_results = paragraphs\n new_dlg_turn.initial_search_result_titles = titles\n new_dlg_turn.initial_search_bullets = bullets\n else:\n raise ValueError(\n \"Search prompt's output is invalid: %s\" % search_prompt_output\n )\n # logger.error('Search prompt\\'s output is invalid: %s' % search_prompt_output)\n\n def _summarize_results(\n self,\n search_query,\n paragraphs,\n titles,\n maximum_paragraphs_needed,\n engine_dict,\n ):\n \"\"\"\n Summarizes `paragraphs` and returns the indices of at most `maximum_paragraphs_needed` paragraphs that are deemed relevant to the `query`\n \"\"\"\n summaries = llm_generate(\n template_file=\"summarize_and_filter.prompt\",\n prompt_parameter_values=[\n {\"title\": t, \"article\": p, \"query\": search_query}\n for (t, p) in zip(titles, paragraphs)\n ],\n engine=engine_dict[\"default\"],\n max_tokens=200,\n temperature=0.0,\n top_p=0.5,\n stop_tokens=None,\n postprocess=False,\n )\n bullets = []\n not_none_paragraph_indices = []\n for paragraph_idx, s in enumerate(summaries):\n if s.startswith(\"Yes. \"):\n # necessary for distilled models\n s = s[5:]\n if s.startswith(\"None\") or s == \"- None\" or s == \"-None\":\n # skip the None paragraphs\n logger.info(\n \"This retrieved paragraphs was deemed unrelated: %s\",\n paragraphs[paragraph_idx],\n )\n continue\n not_none_paragraph_indices.append(paragraph_idx)\n for b in s.split(\"\\n-\"):\n b = b.strip()\n if len(b) == 0:\n continue\n if not b.endswith(\".\"):\n # most likely a partial generation that was cut off because of max_tokens\n continue\n bullets.append(b.strip(\"- \"))\n if len(not_none_paragraph_indices) == maximum_paragraphs_needed:\n break\n\n return bullets, not_none_paragraph_indices\n\n def _retrieve_and_generate(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n ) -> str:\n \"\"\"\n Retrieves related documents and generates a reply base on them, given the dialog history\n Updates `new_dlg_turn` with logs\n Returns reply\n \"\"\"\n self._search_and_summarize(\n object_dlg_history, new_user_utterance, new_dlg_turn, engine_dict\n )\n\n reply = llm_generate(\n template_file=\"draft.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": new_user_utterance,\n \"evidences\": new_dlg_turn.initial_search_bullets,\n },\n engine=engine_dict[\"default\"],\n max_tokens=self.max_tokens,\n temperature=self.temperature,\n top_p=self.top_p,\n presence_penalty=self.presence_penalty,\n stop_tokens=[\"\\n\"],\n postprocess=True,\n )\n return reply\n\n def _search_and_summarize(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n ):\n search_prompt_output = llm_generate(\n template_file=\"query.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"new_user_utterance\": new_user_utterance,\n \"force_search\": False,\n },\n engine=engine_dict[\"default\"],\n max_tokens=50,\n temperature=0.0,\n top_p=0.5,\n stop_tokens=[\"\\n\"],\n postprocess=False,\n )\n self._handle_search_prompt_output(\n search_prompt_output=search_prompt_output,\n new_dlg_turn=new_dlg_turn,\n num_paragraphs=self.retrieval_num,\n summarize_results=True,\n engine_dict=engine_dict,\n )\n return new_dlg_turn.initial_search_bullets\n\n def _generate_split_and_fact_check(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n fuse_claim_splitting: bool\n ):\n original_reply = self._generate_only(\n \"generate.prompt\",\n object_dlg_history,\n new_user_utterance,\n engine_dict=engine_dict,\n )\n if not fuse_claim_splitting:\n new_dlg_turn.llm_utterance = original_reply\n claims_output = None\n else:\n new_dlg_turn.llm_utterance = None\n claims_output = original_reply\n\n\n claims = self.claim_splitter.split_claim(\n dialog_history=object_dlg_history,\n new_user_utterance=new_user_utterance,\n current_agent_utterance=original_reply,\n engine_dict=engine_dict,\n claims_output=claims_output\n )\n\n new_dlg_turn.claims = claims\n if not claims:\n logger.info(\"No claims to check\")\n return []\n\n # retrieve evidence\n ret_output = self._retrieve_evidences(claims)\n\n # verify claims\n ver_output = self._verify_claims(\n claims,\n ret_output,\n object_dlg_history,\n new_user_utterance,\n original_reply,\n do_correct=False,\n engine_dict=engine_dict,\n )\n\n new_dlg_turn.verification_retrieval_results = ret_output\n new_dlg_turn.verification_result = ver_output\n\n # only keep supported claim\n supported_claims = []\n for label_fix in ver_output:\n verification_label, fixed_claim = (\n label_fix[\"label\"],\n label_fix[\"fixed_claim\"],\n )\n if verification_label == \"SUPPORTS\":\n supported_claims.append(fixed_claim)\n return supported_claims\n\n def _generate_and_correct_reply(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n original_reply: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n ) -> str:\n \"\"\"\n Verifies and corrects `original_reply` given the dialog history\n Updates `new_dlg_turn` with logs\n Returns corrected reply\n \"\"\"\n # split claims\n # the returned \"claims\" is a list of tuples (claim, year)\n claims = self.claim_splitter.split_claim(\n dialog_history=object_dlg_history,\n new_user_utterance=new_user_utterance,\n current_agent_utterance=original_reply,\n engine_dict=engine_dict,\n )\n claims = ClaimSplitter.remove_claims_from_previous_turns(claims, object_dlg_history)\n if not claims:\n logger.info(\"No claims to check\")\n return original_reply\n new_dlg_turn.claims = claims\n\n # retrieve evidence\n ret_output = self._retrieve_evidences(claims)\n\n # TODO: use the ret_output together with initial search outputs for verification\n # verify claims\n ver_output = self._verify_claims(\n claims,\n ret_output,\n object_dlg_history,\n new_user_utterance,\n original_reply,\n do_correct=True,\n engine_dict=engine_dict,\n )\n\n # update dialog turn\n new_dlg_turn.verification_retrieval_results = ret_output\n new_dlg_turn.verification_result = ver_output\n if is_everything_verified(ver_output):\n logger.info(\"All claims passed verification, nothing to correct\")\n return original_reply\n\n # correction\n corrected_reply = original_reply\n fixed_claims = []\n for label_fix in ver_output:\n verification_label, fixed_claim = (\n label_fix[\"label\"],\n label_fix[\"fixed_claim\"],\n )\n if (\n verification_label == \"SUPPORTS\"\n ): # if the claim is already correct, no need to fix\n continue\n fixed_claims.append(fixed_claim)\n assert len(fixed_claims) > 0\n corrected_reply = self._correct(\n original_reply,\n object_dlg_history,\n new_user_utterance,\n fixed_claims, # corrected claim for REFUTE and \"I'm not sure\" for NOT ENOUGH INFO claims.\n engine_dict=engine_dict,\n )\n\n return corrected_reply\n\n def _generate_only(\n self,\n generation_prompt: str,\n dialog_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: dict,\n ) -> str:\n \"\"\"\n Generate baseline LLM response\n Args:\n - `generation_prompt` (str): the .prompt file to use for this stage\n - `dialog_history` (list): previous turns\n Returns:\n - `reply`(str): original LLM response\n \"\"\"\n reply = llm_generate(\n template_file=generation_prompt,\n prompt_parameter_values={\n \"dlg\": dialog_history,\n \"new_user_utterance\": new_user_utterance,\n \"engine_name\": engine_dict[\"generate\"] # used to enforce model knowledge cut-off date for models other than GPT-4\n },\n engine=engine_dict[\"generate\"],\n max_tokens=self.max_tokens,\n temperature=self.temperature,\n stop_tokens=[\"\\n\"],\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n postprocess=True,\n )\n\n return reply\n\n def _correct(\n self,\n original_reply,\n object_dlg_history,\n last_user_utterance,\n fixed_claims,\n engine_dict: dict,\n ):\n \"\"\"\n Given context + original response + evidence for a claim, fix the original response\n\n Args:\n - `original_reply`(str): LLM's original response\n - `object_dlg_history`(list): list of previous DialogueTurns\n - `last_user_utterance` (str): last user utterance\n - `fixed_claims` (list): list of fixed claims\n Returns:\n - `corrected_reply`(str): corrected LLM response\n \"\"\"\n # correction prompt's context should be in one line\n correction_reply = llm_generate(\n template_file=\"correction_combiner.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": last_user_utterance,\n \"original_reply\": original_reply,\n \"fixed_claims\": fixed_claims,\n },\n engine=engine_dict[\"default\"],\n max_tokens=self.max_tokens,\n temperature=0,\n stop_tokens=[\"\\n\"],\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n postprocess=True,\n )\n\n return correction_reply\n\n def _reply_using_combined_evidence(\n self,\n object_dlg_history,\n last_user_utterance,\n evidences,\n engine_dict: dict,\n ):\n combined_reply = llm_generate(\n template_file=\"draft.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": last_user_utterance,\n \"evidences\": evidences,\n },\n engine=engine_dict[\"draft\"],\n max_tokens=self.max_tokens,\n temperature=0,\n stop_tokens=None,\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n postprocess=True,\n )\n\n return combined_reply\n\n def _colbert_retrieve(\n self,\n query: str,\n num_paragraphs: int,\n rerank=\"none\",\n top_p=1,\n ):\n \"\"\"\n Args:\n `num_paragraphs`: number of paragraphs that will be output\n `rerank` (str): one of 'none', 'recent' or a year like '2005'. 'none' disables reranking. 'recent' retrieves more and returns the most recent ones.\n '2005' boosts the ranking of results that match 2005. The date of a result is determined by the year numbers it contains.\n `top_p` (float): chooses from the smallest possible set of results whose cumulative probability exceeds top_p\n Returns:\n `passages` (list): a list of passage texts (excluding the title) with the highest similarities to the `query`\n `passage_scores` (list): a list of similarity scores of each passage in `passsages` with `query`\n `passage_titles` (list): a list of passage titles\n \"\"\"\n\n # print(self.colbert_endpoint, {'query': query, 'evi_num': num_paragraphs})\n response = requests.get(\n self.colbert_endpoint,\n json={\"query\": query, \"evi_num\": num_paragraphs},\n )\n if response.status_code != 200:\n raise Exception(\"ColBERT Search API Error: %s\" % str(response))\n results = response.json()\n passages = []\n passage_titles = []\n for r in results[\"passages\"]:\n r = r.split(\"|\", maxsplit=1)\n passage_titles.append(r[0].strip())\n passages.append(r[1].strip())\n scores = results[\"passage_scores\"]\n probs = results[\"passage_probs\"]\n # print(\"probs = \", probs)\n top_p_cut_off = np.cumsum(probs) > top_p\n if not np.any(top_p_cut_off):\n # even if we include everything, we don't get to top_p\n top_p_cut_off = len(scores)\n else:\n top_p_cut_off = np.argmax(top_p_cut_off) + 1\n # print(\"top_p_cut_off = \", top_p_cut_off)\n passages, scores, passage_titles = (\n passages[:top_p_cut_off],\n scores[:top_p_cut_off],\n passage_titles[:top_p_cut_off],\n )\n\n if rerank == \"none\":\n pass\n else:\n all_passage_dates = []\n for t, p in zip(passage_titles, passages):\n passage_years = extract_year(title=t, passage=p)\n all_passage_dates.append(passage_years)\n if rerank == \"recent\":\n sort_fn = lambda x: max(\n x[3] if len(x[3]) > 0 else [0]\n ) # sort based on the latest year mentioned in the paragraph, demoting paragraphs that don't mention a year\n else:\n # rerank is a year\n try:\n query_year = int(rerank)\n except ValueError as e:\n # raise ValueError('rerank should be none, recent or an integer.')\n logger.error(e)\n return (\n passages[:num_paragraphs],\n scores[:num_paragraphs],\n passage_titles[:num_paragraphs],\n )\n sort_fn = lambda x: x[3].count(\n query_year\n ) # boost the passages that have a matching year with the query, the more they mention the date the more we boost\n\n # logger.info('Search result dates before date-based reranking: %s', str(all_passage_dates))\n passages, scores, passage_titles, all_passage_dates = list(\n zip(\n *sorted(\n zip(passages, scores, passage_titles, all_passage_dates),\n reverse=True,\n key=sort_fn,\n )\n )\n )\n # logger.info('Search result dates after date-based reranking: %s', str(all_passage_dates))\n\n # choose top num_paragraphs paragraphs\n passages, scores, passage_titles = (\n passages[:num_paragraphs],\n scores[:num_paragraphs],\n passage_titles[:num_paragraphs],\n )\n\n return passages, scores, passage_titles\n\n def _retrieve_evidences(self, claims, top_p: float = 1):\n \"\"\"\n Retrieve evidences\n Args:\n - `claims` (list): list of (claim, year)\n - `top_p` (float): chooses from the smallest possible set of results whose cumulative probability exceeds top_p\n Returns:\n - `ret_output` (dict): a dict from claim_id to a list of `evidence`\n - each `evidence` is a list of length 5: [`title of wikipedia page`, `wikipedia text`, `similarity_score`]\n \"\"\"\n ret_output = dict()\n for id, (cl, year) in enumerate(claims):\n # if self.args.reranking_method == \"none\":\n # No re-ranking on evidence. Reranking to match the dates increases the risk of confirmation bias.\n passages, passage_scores, passage_titles = self._colbert_retrieve(\n query=cl, num_paragraphs=self.evi_num, top_p=top_p, rerank=\"none\"\n )\n # else:\n # # retrieve more so that we can match the dates\n # passages, passage_scores, passage_titles = self._colbert_retrieve(\n # query=cl,\n # num_paragraphs=self.evi_num,\n # rerank=year,\n # num_paragraphs_for_reranking=self.evi_num * 3,\n # top_p=top_p,\n # )\n evidences = []\n for passage, score, title in zip(passages, passage_scores, passage_titles):\n evidences.append([title, passage, score])\n ret_output[id] = evidences\n\n return ret_output\n\n def _verify_claims(\n self,\n claims,\n ret_output,\n object_dlg_history,\n new_user_utterance,\n original_reply,\n do_correct: bool,\n engine_dict: dict,\n ):\n \"\"\"\n Verify claims using retrieval output\n Args:\n - `claims` (list): list of (claim, year) pairs splitted\n - `ret_output` (dict): a dict from claim_id to a list of `evidence`\n - each `evidence` is a list of length 5: [`title of wikipedia page`, `wikipedia text`, `similarity_score`]\n - `object_dlg_history`(str): list of previous DialogueTurns\n - `last_user_utterance` (str): last user utterance\n - `original_reply`(str): original LLM response\n Returns:\n - `ver_output` (list): a list of verification label (\"SUPPORTS\", \"REFUTES\", \"NOT ENOUGH INFO\") and the fixed claims\n \"\"\"\n ver_output = []\n parameter_values_list = []\n\n for claim_id, (cl, year) in enumerate(claims):\n evidences = ret_output[claim_id][: self.evi_num]\n parameter_values_list.append(\n {\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": new_user_utterance,\n \"original_reply\": original_reply,\n \"claim\": cl,\n \"evidence_titles\": [e[0] for e in evidences],\n \"evidence_texts\": [e[1] for e in evidences],\n \"do_correct\": do_correct\n }\n )\n\n # when using gold evidence, we do not split claim so claim is the same with original reply\n if self.skip_verification:\n all_verification_responses = ['is \"SUPPORTS\"'] * len(claims)\n else:\n all_verification_responses = llm_generate(\n template_file=\"verify.prompt\",\n prompt_parameter_values=parameter_values_list,\n engine=engine_dict[\"default\"],\n max_tokens=200,\n temperature=0,\n stop_tokens=None,\n postprocess=False,\n )\n\n for (cl, year), verification_response in zip(\n claims, all_verification_responses\n ):\n # logger.info(\"claim: %s ; verification_response: %s\", cl, verification_response)\n # the following handles cases where smaller models like gpt-35-turbo do not follow the few-shot examples' format\n if (\n 'is \"supports\"' in verification_response.lower()\n or \"no fact-checking is needed for this claim\"\n in verification_response.lower()\n or \"the fact-checking result is not applicable to this response\"\n in verification_response.lower()\n ):\n verification_label = \"SUPPORTS\"\n fixed_claim = cl\n elif (\n 'the fact-checking result is \"not enough info\"'\n in verification_response.lower()\n ):\n verification_label = \"NOT ENOUGH INFO\"\n fixed_claim = \"\"\n else:\n verification_label = \"REFUTES\" # default set to be \"REFUTES\"\n fixed_claim = \"\"\n\n if do_correct and verification_label != \"SUPPORTS\":\n if \"You rewrite your claim:\" in verification_response:\n fixed_claim = verification_response.split(\n \"You rewrite your claim:\"\n )[-1].strip()\n else:\n logger.error(\n \"verification prompt did not fix a %s. Output: %s\"\n % (verification_label, verification_response)\n )\n\n ver_output.append({\"label\": verification_label, \"fixed_claim\": fixed_claim})\n\n return ver_output" }, { "identifier": "add_pipeline_arguments", "path": "pipelines/pipeline_arguments.py", "snippet": "def add_pipeline_arguments(parser):\n # determine components of the pipeline\n parser.add_argument(\n \"--pipeline\",\n type=str,\n required=True,\n choices=[\n \"generate_and_correct\",\n \"retrieve_and_generate\",\n \"generate\",\n \"retrieve_only\",\n \"early_combine\",\n \"atlas\",\n ],\n default=\"generate_and_correct\",\n help=\"The type of pipeline used to imrpove GPT-3 response. Only used to know which modules to load.\",\n )\n parser.add_argument(\n \"--claim_prompt_template_file\",\n type=str,\n default=\"split_claims.prompt\",\n help=\"The path to the file containing the claim LLM prompt.\",\n )\n parser.add_argument(\n \"--refinement_prompt\",\n default=\"refine_w_feedback.prompt\",\n help=\"What prompt to use to refine the final response.\",\n )\n parser.add_argument(\n \"--do_refine\", action=\"store_true\", help=\"Whether to refine the final response.\"\n )\n parser.add_argument(\n \"--skip_verification\",\n action=\"store_true\",\n help=\"If True, all claims will be considered correct without fact-checking. Especially useful to speed up debugging of the other parts of the pipeline.\",\n )\n\n parser.add_argument(\n \"--fuse_claim_splitting\",\n action=\"store_true\",\n help=\"If True, The first claim splitting stage of early_combine pipeline will be fused with the generate stage. Only useful for distilled models that have been trained to do this.\",\n )\n\n parser.add_argument(\n \"--colbert_endpoint\",\n type=str,\n default=\"http://127.0.0.1:5000/search\",\n help=\"whether using colbert for retrieval.\",\n )\n parser.add_argument(\n \"--engine\",\n type=str,\n required=True,\n choices=[\"atlas\"]\n + local_model_list\n + together_model_list\n + openai_chat_model_list\n + openai_nonchat_model_list,\n help=\"The LLM engine to use.\",\n ) # choices are from the smallest to the largest model\n\n parser.add_argument(\n \"--generate_engine\",\n type=str,\n default=None,\n choices=[\"atlas\"]\n + local_model_list\n + together_model_list\n + openai_chat_model_list\n + openai_nonchat_model_list,\n help=\"The LLM engine to use for the 'generate' stage of pipelines. If provided, overrides --engine for that stage.\",\n ) # choices are from the smallest to the largest model\n\n parser.add_argument(\n \"--draft_engine\",\n type=str,\n default=None,\n choices=[\"atlas\"]\n + local_model_list\n + together_model_list\n + openai_chat_model_list\n + openai_nonchat_model_list,\n help=\"The LLM engine to use for the 'draft' stage of pipelines. If provided, overrides --engine for that stage.\",\n ) # choices are from the smallest to the largest model\n\n parser.add_argument(\n \"--reranking_method\",\n type=str,\n choices=[\"none\", \"date\"],\n default=\"none\",\n help=\"Only used for retrieve_and_generate pipeline\",\n )\n\n # LLM generation hyperparameters\n parser.add_argument(\n \"--max_tokens\",\n type=int,\n default=250,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=0.8,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--top_p\",\n type=float,\n default=0.9,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--frequency_penalty\",\n type=float,\n default=0.0,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--presence_penalty\",\n type=float,\n default=0.0,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n\n parser.add_argument(\n \"--evi_num\",\n type=int,\n default=2,\n help=\"Number of evidences to retrieve per claim.\",\n )\n\n parser.add_argument(\n \"--retrieval_num\",\n type=int,\n default=3,\n help=\"Number of passages to retrieve when searching for information.\",\n )" }, { "identifier": "check_pipeline_arguments", "path": "pipelines/pipeline_arguments.py", "snippet": "def check_pipeline_arguments(args):\n # make sure for ATLAS, both engine and pipeline are set to 'atlas'\n if hasattr(args, \"pipeline\"):\n if (args.engine == \"atlas\" and args.pipeline != \"atlas\") or (\n args.engine != \"atlas\" and args.pipeline == \"atlas\"\n ):\n raise ValueError(\n \"When using ATLAS, both `engine` and `pipeline` input arguments should be set to 'atlas'.\"\n )" }, { "identifier": "make_parent_directories", "path": "pipelines/utils.py", "snippet": "def make_parent_directories(file_name: str):\n \"\"\"\n Creates the parent directories of `file_name` if they don't exist\n \"\"\"\n pathlib.Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True)" }, { "identifier": "llm_generate", "path": "llm/llm_generate.py", "snippet": "def llm_generate(\n template_file: str,\n prompt_parameter_values: Union[dict, List[dict]],\n engine: str,\n max_tokens: int,\n temperature: float,\n stop_tokens,\n top_p: float = 0.9,\n frequency_penalty: float = 0,\n presence_penalty: float = 0,\n postprocess: bool = True,\n filled_prompt=None,\n):\n \"\"\"\n Generates continuations for one or more prompts in parallel\n Inputs:\n prompt_parameter_values: dict or list of dict. If the input is a list, the output will be a list as well\n filled_prompt: gives direct access to the underlying model, without having to load a prompt template from a .prompt file. Used for testing.\n \"\"\"\n if not (\n filled_prompt is None\n and prompt_parameter_values is not None\n and template_file is not None\n ) and not (\n filled_prompt is not None\n and prompt_parameter_values is None\n and template_file is None\n ):\n raise ValueError(\n \"Can only use filled_prompt if template_file and prompt_parameter_values are None\"\n )\n\n # Decide which LLM resource to send this request to.\n # Use hash so that each time this function gets called with the same parameters after a backoff, the request gets sent to the same resource\n potential_llm_resources = [\n resource\n for resource in global_variables.all_llm_endpoints\n if engine in resource[\"engine_map\"]\n ]\n llm_resource = potential_llm_resources[\n hash(\n str(\n (\n template_file,\n prompt_parameter_values,\n engine,\n max_tokens,\n temperature,\n stop_tokens,\n top_p,\n frequency_penalty,\n presence_penalty,\n )\n )\n )\n % len(potential_llm_resources)\n ]\n # uniform load balancing instead of hashing\n # llm_resource = potential_llm_resources[random.randrange(len(potential_llm_resources))]\n\n if llm_resource[\"api_type\"] == \"local\":\n prompt_format = llm_resource[\"prompt_format\"]\n else:\n prompt_format = \"none\"\n\n # convert to a single element list so that the rest of the code only has to deal with a list\n input_was_list = True\n if filled_prompt is None:\n assert prompt_parameter_values is not None\n if not isinstance(prompt_parameter_values, list):\n input_was_list = False\n prompt_parameter_values = [prompt_parameter_values]\n filled_prompt, rendered_blocks = _fill_prompt(\n template_file, prompt_parameter_values, engine, prompt_format\n )\n else:\n if not isinstance(filled_prompt, list):\n input_was_list = False\n filled_prompt = [filled_prompt]\n\n assert isinstance(filled_prompt, list)\n\n # Call LLM to generate outputs\n generation_output = _llm_completion_with_backoff_and_cache(\n original_engine_name=engine,\n **_set_llm_resource_fields(\n llm_resource=llm_resource,\n engine=engine,\n prompt=filled_prompt,\n max_tokens=max_tokens,\n temperature=temperature,\n top_p=top_p,\n frequency_penalty=frequency_penalty,\n presence_penalty=presence_penalty,\n stop=stop_tokens,\n )\n )\n outputs = []\n for choice in generation_output[\"choices\"]:\n if choice[\"text\"]:\n outputs.append(choice[\"text\"])\n\n logger.info(\"LLM output: %s\", json.dumps(outputs, indent=2, ensure_ascii=False))\n\n # calculate and record the cost\n cost_prompt, cost_completion = global_variables._model_name_to_cost(engine)\n total_cost = (\n generation_output[\"usage\"][\"prompt_tokens\"] * cost_prompt\n + generation_output[\"usage\"].get(\"completion_tokens\", 0) * cost_completion\n ) / 1000\n global_variables.add_to_total_cost(total_cost)\n\n # postprocess the generation outputs\n outputs = [o.strip() for o in outputs]\n if postprocess:\n outputs = [_postprocess_generations(o) for o in outputs]\n\n # add to prompt logs if needed\n if global_variables.debug_prompts:\n with global_variables.thread_lock:\n for i, o in enumerate(outputs):\n if template_file in global_variables.prompts_to_skip_for_debugging:\n continue\n global_variables.prompt_logs.append(\n {\n \"template_name\": template_file,\n \"instruction\": rendered_blocks[i][\"short_instruction\"]\n if \"short_instruction\" in rendered_blocks[i]\n else rendered_blocks[i][\"instruction\"],\n \"input\": rendered_blocks[i][\"input\"],\n \"output\": o,\n }\n )\n\n if outputs == []:\n outputs = \"\"\n\n # convert back to a single item\n if len(outputs) == 1 and not input_was_list:\n outputs = outputs[0]\n return outputs" }, { "identifier": "write_prompt_logs_to_file", "path": "llm/llm_generate.py", "snippet": "def write_prompt_logs_to_file():\n with open(global_variables.prompt_log_file, \"w\") as f:\n f.write(json.dumps(global_variables.prompt_logs, indent=4, ensure_ascii=False))" }, { "identifier": "set_debug_mode", "path": "llm/global_variables.py", "snippet": "def set_debug_mode():\n global debug_prompts\n debug_prompts = True" }, { "identifier": "get_total_cost", "path": "llm/global_variables.py", "snippet": "def get_total_cost():\n global total_cost\n return total_cost" } ]
import argparse import logging import json import random import spacy import sys from typing import List from functools import partial from tqdm.contrib.concurrent import thread_map from tqdm.contrib.logging import logging_redirect_tqdm from pipelines.dialog_turn import DialogueTurn from pipelines.chatbot import Chatbot from pipelines.pipeline_arguments import ( add_pipeline_arguments, check_pipeline_arguments, ) from pipelines.utils import make_parent_directories from llm.llm_generate import ( llm_generate, write_prompt_logs_to_file, ) from llm.global_variables import set_debug_mode, get_total_cost
13,156
""" Uses an LLM to talk to our pipelines. Used for evaluation and model distillation. """ sys.path.insert(0, "./") logging.getLogger("openai").setLevel(logging.ERROR) logger = logging.getLogger(__name__) spacy_nlp = spacy.load("en_core_web_sm") user_characteristics = [ "- Ask interesting follow-up questions when needed, and expand on the chatbot's responses using your life experiences.\n- Never volunteer information, and never correct chatbot's mistakes.", # "- You are adversarially stress-testing the chatbot.\n- Never volunteer information, and never correct chatbot's mistakes.", # "- You switch to other topics whenever possible.\n- Keep your inputs short.", # "- Ask interesting follow-up questions when needed.", # "- Ask interesting questions about the recent things that happened about the topic.\n- Never volunteer information, and never correct chatbot's mistakes.", # "- Always disagree with what the chatbot says.", ] def user_simulate_topic( dlg_history, topic, user_character, user_engine, user_temperature ): return llm_generate( template_file="benchmark/prompts/user_with_topic.prompt", prompt_parameter_values={ "dlg": dlg_history, "topic": topic, "user_character": user_character, }, engine=user_engine, max_tokens=60, temperature=user_temperature, stop_tokens=["\n"], top_p=0.5, frequency_penalty=0.0, presence_penalty=0, postprocess=True, ) def user_simulate_passage( dlg_history, title_and_passage, user_character, user_engine, user_temperature ): title, passage = title_and_passage return llm_generate( template_file="benchmark/prompts/user_with_passage.prompt", prompt_parameter_values={ "dlg": dlg_history, "title": title, "passage": passage, "user_character": user_character, }, engine=user_engine, max_tokens=60, temperature=user_temperature, stop_tokens=["\n"], top_p=0.9, frequency_penalty=0.0, presence_penalty=0, postprocess=True, ) def simulate_dialog(dialog_inputs, chatbot, args): """ Simulate one dialog """ dlg_history: List[DialogueTurn] = [] user_character = random.choice(user_characteristics) if args.mode == "topic": user_func = user_simulate_topic elif args.mode == "passage": user_func = user_simulate_passage try: for _ in range(args.num_turns): user_utterance = user_func( dlg_history, dialog_inputs, user_character, args.user_engine, args.user_temperature, ) if user_utterance == "": logger.error("Simulated user utterance is empty.") return None # logger.info('simulate user utterance: %s', user_utterance) new_dlg_turn = chatbot.generate_next_turn( dlg_history, user_utterance, pipeline=args.pipeline ) # logger.info('agent response = %s', new_dlg_turn.agent_utterance) dlg_history.append(new_dlg_turn) return dlg_history except Exception: logger.exception( "Skipping dialog due to exception. dialog_inputs=%s", str(dialog_inputs) ) def repeat_dialog_inputs(dialog_inputs, target_num_dialogs): """ repeats dialog_inputs if we don't have enough of them, truncates if there are too many """ if target_num_dialogs == -1: target_num_dialogs = len(dialog_inputs) full_rounds = target_num_dialogs // len(dialog_inputs) dialog_inputs = ( dialog_inputs * full_rounds + dialog_inputs[: target_num_dialogs % len(dialog_inputs)] ) assert len(dialog_inputs) == target_num_dialogs return dialog_inputs def main(args):
""" Uses an LLM to talk to our pipelines. Used for evaluation and model distillation. """ sys.path.insert(0, "./") logging.getLogger("openai").setLevel(logging.ERROR) logger = logging.getLogger(__name__) spacy_nlp = spacy.load("en_core_web_sm") user_characteristics = [ "- Ask interesting follow-up questions when needed, and expand on the chatbot's responses using your life experiences.\n- Never volunteer information, and never correct chatbot's mistakes.", # "- You are adversarially stress-testing the chatbot.\n- Never volunteer information, and never correct chatbot's mistakes.", # "- You switch to other topics whenever possible.\n- Keep your inputs short.", # "- Ask interesting follow-up questions when needed.", # "- Ask interesting questions about the recent things that happened about the topic.\n- Never volunteer information, and never correct chatbot's mistakes.", # "- Always disagree with what the chatbot says.", ] def user_simulate_topic( dlg_history, topic, user_character, user_engine, user_temperature ): return llm_generate( template_file="benchmark/prompts/user_with_topic.prompt", prompt_parameter_values={ "dlg": dlg_history, "topic": topic, "user_character": user_character, }, engine=user_engine, max_tokens=60, temperature=user_temperature, stop_tokens=["\n"], top_p=0.5, frequency_penalty=0.0, presence_penalty=0, postprocess=True, ) def user_simulate_passage( dlg_history, title_and_passage, user_character, user_engine, user_temperature ): title, passage = title_and_passage return llm_generate( template_file="benchmark/prompts/user_with_passage.prompt", prompt_parameter_values={ "dlg": dlg_history, "title": title, "passage": passage, "user_character": user_character, }, engine=user_engine, max_tokens=60, temperature=user_temperature, stop_tokens=["\n"], top_p=0.9, frequency_penalty=0.0, presence_penalty=0, postprocess=True, ) def simulate_dialog(dialog_inputs, chatbot, args): """ Simulate one dialog """ dlg_history: List[DialogueTurn] = [] user_character = random.choice(user_characteristics) if args.mode == "topic": user_func = user_simulate_topic elif args.mode == "passage": user_func = user_simulate_passage try: for _ in range(args.num_turns): user_utterance = user_func( dlg_history, dialog_inputs, user_character, args.user_engine, args.user_temperature, ) if user_utterance == "": logger.error("Simulated user utterance is empty.") return None # logger.info('simulate user utterance: %s', user_utterance) new_dlg_turn = chatbot.generate_next_turn( dlg_history, user_utterance, pipeline=args.pipeline ) # logger.info('agent response = %s', new_dlg_turn.agent_utterance) dlg_history.append(new_dlg_turn) return dlg_history except Exception: logger.exception( "Skipping dialog due to exception. dialog_inputs=%s", str(dialog_inputs) ) def repeat_dialog_inputs(dialog_inputs, target_num_dialogs): """ repeats dialog_inputs if we don't have enough of them, truncates if there are too many """ if target_num_dialogs == -1: target_num_dialogs = len(dialog_inputs) full_rounds = target_num_dialogs // len(dialog_inputs) dialog_inputs = ( dialog_inputs * full_rounds + dialog_inputs[: target_num_dialogs % len(dialog_inputs)] ) assert len(dialog_inputs) == target_num_dialogs return dialog_inputs def main(args):
chatbot = Chatbot(args)
1
2023-10-19 18:17:25+00:00
16k
jhejna/cpl
research/algs/off_policy_algorithm.py
[ { "identifier": "ReplayBuffer", "path": "research/datasets/replay_buffer/buffer.py", "snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functions starting with \"_\", like \"_help\" are to be used only by the replay buffer internaly. They\n are carefully setup for multiprocesing.\n 2. variables/functions named regularly without a leading \"_\" are to be used by the main thread. This includes\n standard functions like \"add\".\n\n There are a few critical setup options.\n 1. Capacity: determines if the buffer is setup upon creation. If it is set to a known value, then we can add data\n online with `add`, or by pulling more data from disk. If is set to None, the dataset is initialized to the full\n size of the offline dataset.\n 2. path: path to offline data that will be loaded\n 3. _data_generator\n\n Some options are mutually exclusive. For example, it is bad to use a non-distributed layout with\n workers and online data. This will generate a bunch of copy on writes.\n\n Data is expected to be stored in a \"next\" format. This means that data is stored like this:\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n s_3, a_2 , r_2 , d_2 ... End of episode!\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n\n This format is expected from the load(path) funciton.\n\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n sample_fn: Union[str, Callable] = \"sample\",\n sample_kwargs: Optional[Dict] = None,\n epoch_ratio: float = 1.0,\n path: Optional[str] = None,\n capacity: Optional[int] = None,\n exclude_keys: Optional[List[str]] = None,\n include_keys: Optional[Dict] = None,\n stacked_obs: bool = False,\n stacked_action: bool = False,\n distributed: bool = False,\n fetch_every: int = 1000,\n cleanup: bool = True,\n ) -> None:\n # Remove stacking if present.\n self.stacked_obs = stacked_obs\n if self.stacked_obs:\n observation_space = remove_stack_dim(observation_space)\n self.stacked_action = stacked_action\n if self.stacked_action:\n action_space = remove_stack_dim(action_space)\n\n self.observation_space = observation_space\n self.action_space = action_space\n\n # Construct the space for the buffer\n self.exclude_keys = [] if exclude_keys is None else exclude_keys # keys to exclude in the storage buffer\n buffer_space = {\n \"obs\": self.observation_space,\n \"action\": self.action_space,\n \"reward\": 0.0,\n \"done\": False,\n \"discount\": 1.0,\n }\n flattened_buffer_space = utils.flatten_dict(buffer_space)\n if include_keys is not None:\n flattened_buffer_space.update(include_keys)\n print(\"FLATTENED BUFFER SPACE\", flattened_buffer_space)\n for k in self.exclude_keys:\n if k in flattened_buffer_space:\n del flattened_buffer_space[k]\n self.buffer_space = utils.nest_dict(flattened_buffer_space)\n\n self.dummy_action = self.action_space.sample()\n self.capacity = capacity\n\n # Setup the sampler\n if isinstance(sample_fn, str):\n sample_fn = vars(sampling)[sample_fn]\n # Use functools partial to override the default args.\n sample_kwargs = {} if sample_kwargs is None else sample_kwargs\n self.sample_fn = functools.partial(sample_fn, **sample_kwargs)\n # Add sampling parameters\n self.epoch_ratio = epoch_ratio\n\n # Path for preloaded data\n self.path = path\n\n # Setup based on distributed value\n self.distributed = distributed\n if self.distributed:\n self.cleanup = cleanup\n self.fetch_every = fetch_every\n if self.capacity is not None:\n self.storage_path = tempfile.mkdtemp(prefix=\"replay_buffer_\")\n print(\"[research] Replay Buffer Storage Path\", self.storage_path)\n self.current_ep = utils.nest_dict({k: list() for k in flattened_buffer_space.keys()})\n self.num_episodes = 0\n else:\n self._alloc(self.capacity) # Alloc immediately\n\n def _alloc(self, capacity):\n # Create the data generator\n self._current_data_generator = self._data_generator()\n\n if capacity is None:\n # Allocte the entire dataset\n data = utils.concatenate(*list(self._current_data_generator), dim=0)\n self._storage = storage.FixedStorage(data)\n else:\n # Construct the buffer space. Remember to exclude any exclude keys\n self._storage = storage.CircularStorage(self.buffer_space, capacity)\n # Fill the storage.\n # if self.path is not None:\n for data in self._current_data_generator:\n self._storage.extend(data)\n if self._storage.size >= self._storage.capacity:\n break\n\n print(\"[ReplayBuffer] Allocated {:.2f} GB\".format(self._storage.bytes / 1024**3))\n\n def _data_generator(self):\n \"\"\"\n Can be overridden in order to load the initial data differently.\n By default assumes the data to be the standard format, and returned as a data dictionary.\n or\n None\n\n This function can be overriden by sub-classes in order to produce data batches.\n It should do the following:\n 1. split data across torch data workers\n 2. randomize the order of data\n 3. yield data of the form dicts\n \"\"\"\n if self.path is None:\n return\n\n # By default get all of the file names that are distributed at the correct index\n worker_info = torch.utils.data.get_worker_info()\n num_workers = 1 if worker_info is None else worker_info.num_workers\n worker_id = 0 if worker_info is None else worker_info.id\n\n ep_filenames = [os.path.join(self.path, f) for f in os.listdir(self.path) if f.endswith(\".npz\")]\n random.shuffle(ep_filenames) # Shuffle all the filenames\n\n if num_workers > 1 and len(ep_filenames) == 1:\n print(\n \"[ReplayBuffer] Warning: using multiple workers but single replay file. Reduce memory usage by sharding\"\n \" data with `save` instead of `save_flat`.\"\n )\n elif num_workers > 1 and len(ep_filenames) < num_workers:\n print(\"[ReplayBuffer] Warning: using more workers than dataset files.\")\n\n for ep_filename in ep_filenames:\n ep_idx, _ = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n # Spread loaded data across workers if we have multiple workers and files.\n if ep_idx % num_workers != worker_id and len(ep_filenames) > 1:\n continue # Only yield the files belonging to this worker.\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n yield data\n\n def _fetch_offline(self) -> int:\n \"\"\"\n This simple function fetches a new episode from the offline dataset and adds it to the buffer.\n This is done for each worker.\n \"\"\"\n try:\n data = next(self._current_data_generator)\n except StopIteration:\n self._current_data_generator = self._data_generator()\n data = next(self._current_data_generator)\n self._storage.extend(data)\n # Return the fetched size\n return len(data[\"done\"]) # data must have the done key for storage\n\n def _fetch_online(self) -> int:\n worker_info = torch.utils.data.get_worker_info()\n assert worker_info is not None, \"Must use distributed buffer for online fetching.\"\n\n ep_filenames = sorted([os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)], reverse=True)\n fetched_size = 0\n for ep_filename in ep_filenames:\n ep_idx, ep_len = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n if ep_idx % worker_info.num_workers != worker_info.id:\n continue\n if ep_filename in self._episode_filenames:\n break # We found something we have already loaded\n if fetched_size + ep_len > self._storage.capacity:\n break # do not fetch more than the size of the replay buffer\n\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n self._storage.extend(data)\n self._episode_filenames.add(ep_filename)\n if self.cleanup:\n try:\n os.remove(ep_filename)\n except OSError:\n pass\n\n return fetched_size\n\n def _get_dummy_transition(self, obs):\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n dummy_transition = {\n k: v.sample() if isinstance(v, gym.Space) else v\n for k, v in flattened_buffer_space.items()\n if not k.startswith(\"obs\") and not k.startswith(\"action\")\n }\n dummy_transition = utils.nest_dict(dummy_transition)\n dummy_transition[\"obs\"] = obs\n dummy_transition[\"action\"] = self.dummy_action\n return dummy_transition\n\n def _reset_current_ep(self):\n ep_idx = self.num_episodes\n ep_len = len(self.current_ep[\"done\"])\n self.num_episodes += 1\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n storage.save_data(self.current_ep, os.path.join(self.storage_path, ep_filename))\n\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n ep = {k: list() for k in flattened_buffer_space.keys()}\n self.current_ep = utils.nest_dict(ep)\n\n def add(self, **kwargs):\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # Preprocess here before adding to storage\n if len(kwargs) == 1:\n assert \"obs\" in kwargs\n kwargs = self._get_dummy_transition(kwargs[\"obs\"])\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n else:\n # We have a full transitions\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n if self.stacked_action:\n kwargs[\"action\"] = utils.get_from_batch(kwargs[\"action\"], -1)\n\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n\n # This function is overwritten for distributed / local buffers\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.append(self.current_ep, kwargs)\n if kwargs[\"done\"]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.add(kwargs)\n\n def extend(self, **kwargs):\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # TODO: There is a chance that if we add a full sequence we will end up with (B, T, stack, ...)\n # which is not what we want. We could compare the shapes of the observation space to fix it\n # but this code might be unnecesary, as this class shouldn't really be used like that anyways.\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.extend(self.current_ep, kwargs)\n if kwargs[\"done\"][-1]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.extend(kwargs)\n\n def save(self, path):\n os.makedirs(path, exist_ok=True)\n if self.distributed:\n if self.cleanup:\n print(\"[research] Warning, attempting to save a cleaned up replay buffer. There are likely no files\")\n srcs = os.listdir(self.storage_path)\n for src in srcs:\n shutil.move(os.path.join(self.storage_path, src), os.path.join(path, src))\n print(\"Successfully saved\", len(srcs), \"episodes.\")\n else:\n ep_len = self._storage.size\n ep_idx = 0\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n save_path = os.path.join(path, ep_filename)\n self._storage.save(save_path)\n\n def sample(self, *args, **kwargs):\n return self.sample_fn(self._storage, *args, **kwargs)\n\n def __iter__(self):\n assert not hasattr(self, \"_iterated\"), \"__iter__ called twice!\"\n self._iterated = True\n worker_info = torch.utils.data.get_worker_info()\n assert (worker_info is not None) == self.distributed, \"ReplayBuffer.distributed not set correctly!\"\n\n # allocate the buffer with the given capacity\n if self.distributed:\n self._alloc(None if self.capacity is None else self.capacity // worker_info.num_workers)\n self._episode_filenames = set()\n\n self._learning_online = False\n\n samples_since_last_offline_fetch = 0\n samples_since_last_online_fetch = 0\n last_offline_fetch_size = 0\n\n batch_size = self.sample_fn.keywords.get(\"batch_size\", 1)\n stack_size = self.sample_fn.keywords.get(\"stack\", 1)\n seq_size = self.sample_fn.keywords.get(\"seq_length\", 1)\n\n while True:\n if self._storage.size < seq_size * stack_size + 1:\n yield {} # If the buffer is too small for sampling, continue.\n else:\n sample = self.sample_fn(self._storage)\n if batch_size == 1:\n sample = utils.squeeze(sample, 0)\n yield sample\n\n # Fetch new data if we have a circular buffer.\n if isinstance(self._storage, storage.CircularStorage):\n if self.distributed: # Always check for online data\n # We fetch from the online buffer\n samples_since_last_online_fetch += 1\n if samples_since_last_online_fetch >= self.fetch_every:\n fetch_size = self._fetch_online()\n self._learning_online = self._learning_online or (fetch_size > 0)\n samples_since_last_online_fetch = 0\n\n if not self._learning_online and self.path is not None:\n # We fetch from the offline buffer\n samples_since_last_offline_fetch += 1\n data_pts_since_last_offline_fetch = (\n samples_since_last_offline_fetch * batch_size * seq_size * stack_size\n )\n if data_pts_since_last_offline_fetch >= last_offline_fetch_size * self.epoch_ratio:\n last_offline_fetch_size = self._fetch_offline()\n samples_since_last_offline_fetch = 0\n\n def __del__(self):\n if not self.distributed:\n return\n if self.cleanup:\n return\n else:\n paths = [os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)]\n for path in paths:\n try:\n os.remove(path)\n except OSError:\n pass\n try:\n os.rmdir(self.storage_path)\n except OSError:\n pass" }, { "identifier": "storage", "path": "research/datasets/replay_buffer/storage.py", "snippet": "def load_data(path: str, exclude_keys: Optional[List[str]]) -> Dict:\ndef save_data(data: Dict, path: str) -> None:\ndef get_bytes(buffer: Union[Dict, np.ndarray]) -> int:\n def capacity(self):\n def size(self):\n def starts(self):\n def ends(self):\n def lengths(self):\n def bytes(self):\n def save(self, path):\n def __getitem__(self, key):\n def __getattr__(self, name):\n def __contains__(self, key):\n def add(self, data):\n def extend(self, data):\n def __init__(self, buffers: Dict) -> None:\n def add(self, data):\n def extend(self, data):\n def __init__(self, initial_capacity: int = 100, dtype=np.int64):\n def _reset(self):\n def append(self, value):\n def pop(self):\n def popleft(self):\n def view(self):\n def __len__(self):\n def first(self):\n def last(self):\n def __str__(self):\n def __init__(self, buffer_space: Union[Dict, gym.spaces.Dict], capacity: Optional[int] = None) -> None:\n def _update_markers(self, new_ends: Iterable = ()):\n def add(self, data):\n def extend(self, data):\nclass Storage(abc.ABC):\nclass FixedStorage(Storage):\nclass NPQueue(object):\nclass CircularStorage(Storage):" }, { "identifier": "EmptyEnv", "path": "research/envs/base.py", "snippet": "class EmptyEnv(gym.Env):\n\n \"\"\"\n An empty holder for defining supervised learning problems\n It works by specifying the ranges and shapes.\n \"\"\"\n\n def __init__(\n self,\n observation_low=None,\n observation_high=None,\n observation_shape=None,\n observation_dtype=np.float32,\n observation_space=None,\n action_low=None,\n action_high=None,\n action_shape=None,\n action_dtype=np.float32,\n action_space=None,\n ):\n if observation_space is not None:\n self.observation_space = observation_space\n else:\n self.observation_space = _get_space(observation_low, observation_high, observation_shape, observation_dtype)\n if action_space is not None:\n self.action_space = action_space\n else:\n self.action_space = _get_space(action_low, action_high, action_shape, action_dtype)\n\n def step(self, action):\n raise NotImplementedError(\"Empty Env does not have step\")\n\n def reset(self, **kwargs):\n raise NotImplementedError(\"Empty Env does not have reset\")" }, { "identifier": "ModuleContainer", "path": "research/networks/base.py", "snippet": "class ModuleContainer(torch.nn.Module):\n CONTAINERS = []\n\n def __init__(self, observation_space: gym.Space, action_space: gym.Space, **kwargs) -> None:\n super().__init__()\n # save the classes and containers\n base_kwargs = {k: v for k, v in kwargs.items() if not k.endswith(\"_class\") and not k.endswith(\"_kwargs\")}\n\n output_space = observation_space\n for container in self.CONTAINERS:\n module_class = kwargs.get(container + \"_class\", torch.nn.Identity)\n module_class = vars(research.networks)[module_class] if isinstance(module_class, str) else module_class\n if module_class is torch.nn.Identity:\n module_kwargs = dict()\n else:\n module_kwargs = base_kwargs.copy()\n module_kwargs.update(kwargs.get(container + \"_kwargs\", dict()))\n # Create the module, and attach it to self\n module = module_class(output_space, action_space, **module_kwargs)\n setattr(self, container, module)\n\n # Set a reset function\n setattr(self, \"reset_\" + container, partial(self._reset, container))\n\n if hasattr(getattr(self, container), \"output_space\"):\n # update the output space\n output_space = getattr(self, container).output_space\n\n # Done creating all sub-modules.\n\n @classmethod\n def create_subset(cls, containers):\n assert all([container in cls.CONTAINERS for container in containers])\n name = \"\".join([container.capitalize() for container in containers]) + \"Subset\"\n return type(name, (ModuleContainer,), {\"CONTAINERS\": containers})\n\n def _reset(self, container: str) -> None:\n module = getattr(self, container)\n with torch.no_grad():\n module.apply(reset)\n\n def compile(self, **kwargs):\n for container in self.CONTAINERS:\n attr = getattr(self, container)\n if type(attr).forward == torch.nn.Module.forward:\n assert hasattr(attr, \"compile\"), (\n \"container \" + container + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, container, torch.compile(attr, **kwargs))\n\n def forward(self, x):\n # Use all of the modules in order\n for container in self.CONTAINERS:\n x = getattr(self, container)(x)\n return x" }, { "identifier": "runners", "path": "research/utils/runners.py", "snippet": "class CloudpickleWrapper:\nclass AsyncState(Enum):\nclass AsyncEnv(gym.Env):\nclass MPRunner(object):\n def __init__(self, fn: Callable):\n def __getstate__(self):\n def __setstate__(self, ob):\n def __call__(self):\ndef alloc_shared_buffer(space: Any):\ndef read_shared_buffer(shared_buffer: Any, space: gym.Space):\ndef write_shared_buffer(shared_buffer: Any, space: gym.Space, value: Any):\n def __init__(\n self, env_fn: Callable, observation_space: Optional[gym.Space] = None, action_space: Optional[gym.Space] = None\n ):\n def step_send(self, action):\n def step_recv(self):\n def step(self, action):\n def reset_send(self):\n def reset_recv(self):\n def reset(self):\n def close(self):\ndef _async_env_worker(env_fn, pipe, parent_pipe, obs_buffer, action_buffer):\n def __init__(\n self,\n env_fn,\n fn: Optional[Callable] = None,\n observation_space: Optional[gym.Space] = None,\n action_space: Optional[gym.Space] = None,\n **kwargs,\n ):\n def start(self, fn: Optional[Callable] = None, **kwargs):\n def started(self):\n def __call__(self, block=False):\n def step(self, *args, **kwargs):\n def reset(self, *args, **kwargs):\n def close(self):\n DEFAULT = \"default\"\n WAITING_RESET = \"reset\"\n WAITING_STEP = \"step\"" }, { "identifier": "utils", "path": "research/utils/utils.py", "snippet": "def to_device(batch: Any, device: torch.device) -> Any:\ndef to_tensor(batch: Any) -> Any:\ndef to_np(batch: Any) -> Any:\ndef remove_float64(batch: Any):\ndef unsqueeze(batch: Any, dim: int) -> Any:\ndef squeeze(batch: Any, dim: int) -> Any:\ndef get_from_batch(batch: Any, start: Union[int, np.ndarray, torch.Tensor], end: Optional[int] = None) -> Any:\ndef set_in_batch(batch: Any, value: Any, start: int, end: Optional[int] = None) -> None:\ndef batch_copy(batch: Any) -> Any:\ndef space_copy(space: gym.Space):\ndef contains_tensors(batch: Any) -> bool:\ndef get_device(batch: Any) -> Optional[torch.device]:\ndef concatenate(*args, dim: int = 0):\ndef append(lst, item):\ndef extend(lst1, lst2):\n def __init__(self, name: str = \"\"):\n def forward(self, x: Any) -> Any:\ndef np_dataset_alloc(\n space: gym.Space, capacity: int, begin_pad: Tuple[int] = tuple(), end_pad: Tuple[int] = tuple()\n) -> np.ndarray:\ndef np_bytes_per_instance(space: gym.Space) -> int:\ndef _flatten_dict_helper(flat_dict: Dict, value: Any, prefix: str, separator: str = \".\") -> None:\ndef flatten_dict(d: Dict, separator: str = \".\") -> Dict:\ndef nest_dict(d: Dict, separator: str = \".\") -> Dict:\ndef fetch_from_dict(d: Dict, keys: Union[str, List, Tuple], separator=\".\") -> List[Any]:\ndef create_optim_groups(params, kwargs):\nclass PrintNode(torch.nn.Module):" }, { "identifier": "Algorithm", "path": "research/algs/base.py", "snippet": "class Algorithm(ABC):\n _save_keys: Set[str]\n _compiled: bool\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n network_class: Type[torch.nn.Module],\n dataset_class: Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]],\n network_kwargs: Optional[Dict] = None,\n dataset_kwargs: Optional[Dict] = None,\n validation_dataset_class: Optional[\n Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]]\n ] = None,\n validation_dataset_kwargs: Optional[Dict] = None,\n optim_class: Type[torch.optim.Optimizer] = torch.optim.Adam,\n optim_kwargs: Optional[Dict] = None,\n schedulers_class: Optional[Dict] = None,\n schedulers_kwargs: Optional[Dict[str, Dict]] = None,\n processor_class: Optional[Type[Processor]] = None,\n processor_kwargs: Optional[Dict] = None,\n checkpoint: Optional[str] = None,\n device: Union[str, torch.device] = \"auto\",\n ):\n # Initialize the _save_keys attribute using the superclass.\n # These are used for automatically identifying keys for saving/loading.\n super().__setattr__(\"_save_keys\", set())\n super().__setattr__(\"_module_keys\", set())\n super().__setattr__(\"_compiled\", False)\n\n # Save relevant values\n self.observation_space = observation_space\n self.action_space = action_space\n self.optim = {}\n\n # setup devices\n if device == \"auto\":\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self._device = torch.device(device)\n\n # Setup the data preprocessor first. Thus, if we need to reference it in network setup we can.\n # Everything here is saved in self.processor\n self.setup_processor(processor_class, {} if processor_kwargs is None else processor_kwargs)\n\n # Create the network.\n network_kwargs = {} if network_kwargs is None else network_kwargs\n self.setup_network(network_class, network_kwargs)\n\n # Save values for optimizers, which will be lazily initialized later\n self.optim = {}\n self.optim_class = optim_class\n self.optim_kwargs = {\"lr\": 0.0001} if optim_kwargs is None else optim_kwargs\n\n # Save values for schedulers, which will be lazily initialized later\n self.schedulers = {}\n self.schedulers_class = {} if schedulers_class is None else schedulers_class\n self.schedulers_kwargs = {} if schedulers_kwargs is None else schedulers_kwargs\n\n # Save values for datasets, which will be lazily initialized later\n self.dataset_class = dataset_class\n self.dataset_kwargs = {} if dataset_kwargs is None else dataset_kwargs\n self.validation_dataset_class = validation_dataset_class\n self.validation_dataset_kwargs = validation_dataset_kwargs\n\n self._training = False\n\n # Load a check point if we have one -- using non-strict enforcement.\n # NOTE: this only loads the network and will _not_ load the optimizer checkpoint.\n if checkpoint is not None:\n self.load(checkpoint, strict=False)\n\n @property\n def device(self):\n return self._device\n\n @property\n def training(self) -> bool:\n return self._training\n\n def __setattr__(self, name: str, value: Any) -> None:\n # Check to see if the value is a module etc.\n if (hasattr(self, \"_save_keys\") and name in self._save_keys) or (\n hasattr(self, \"_module_keys\") and name in self._module_keys\n ):\n pass\n elif isinstance(value, torch.nn.Parameter):\n self._save_keys.add(name)\n elif isinstance(value, torch.nn.Module):\n self._module_keys.add(name)\n if sum(p.numel() for p in value.parameters()) > 0:\n self._save_keys.add(name) # store if we have a module with more than zero parameters.\n return super().__setattr__(name, value)\n\n @property\n def save_keys(self) -> List[str]:\n return self._save_keys\n\n @property\n def module_keys(self) -> List[str]:\n return self._module_keys\n\n @property\n def compiled(self) -> bool:\n return self._compiled\n\n def to(self, device) -> \"Algorithm\":\n for k in self.save_keys:\n if k == \"processor\" and not self.processor.supports_gpu:\n continue\n else:\n setattr(self, k, getattr(self, k).to(device))\n return self\n\n def compile(self, **kwargs):\n for k in self.save_keys:\n attr = getattr(self, k)\n if isinstance(attr, torch.nn.Module):\n if type(attr).forward == torch.nn.Module.forward:\n # In this case, the forward method hasn't been overriden.\n # Thus we assume there is a compile argument.\n assert hasattr(attr, \"compile\"), (\n \"save key \" + k + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, k, torch.compile(attr, **kwargs))\n # indicate that we have compiled the models.\n self._compiled = True\n\n def train(self) -> None:\n for k in self._module_keys:\n getattr(self, k).train()\n self._training = True\n\n def eval(self) -> None:\n for k in self._module_keys:\n getattr(self, k).eval()\n self._training = False\n\n @property\n def num_params(self):\n _num_params = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n _num_params += sum(p.numel() for p in attr.parameters() if p.requires_grad)\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n if attr.requires_grad:\n _num_params += attr.numel()\n return _num_params\n\n @property\n def nbytes(self):\n # Returns the size of all the parameters in bytes\n _bytes = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n for p in attr.parameters():\n _bytes += p.nelement() * p.element_size()\n if hasattr(attr, \"buffers\"):\n for b in attr.buffers():\n _bytes += b.nelement() * b.element_size()\n return _bytes\n\n def setup_processor(self, processor_class: Optional[Type[Processor]], processor_kwargs: Dict) -> None:\n if processor_class is None:\n processor = Identity(self.observation_space, self.action_space)\n else:\n processor = processor_class(self.observation_space, self.action_space, **processor_kwargs)\n\n if processor.supports_gpu: # move it to device if it supports GPU computation.\n self.processor = processor.to(self.device)\n else:\n self.processor = processor\n\n def setup_network(self, network_class: Type[torch.nn.Module], network_kwargs: Dict) -> None:\n self.network = network_class(\n self.processor.observation_space, self.processor.action_space, **network_kwargs\n ).to(self.device)\n\n def setup_optimizers(self) -> None:\n \"\"\"\n This is only called by the Trainer, and not called when we load the model.\n This is done so that inference jobs don't load the optimizer state.\n \"\"\"\n # Setup Optimizers\n assert len(self.optim) == 0, \"setup_optimizers called twice!\"\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n parameters = attr.parameters()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n parameters = [attr]\n # Constrcut the optimizer\n self.optim[k] = self.optim_class(parameters, **self.optim_kwargs)\n\n def setup_schedulers(self):\n assert len(self.schedulers) == 0, \"setup_schedulers called twice!\"\n for k in self.schedulers_class.keys():\n if self.schedulers_class[k] is not None:\n assert k in self.optim, \"Did not find schedule key in optimizers dict.\"\n self.schedulers[k] = self.schedulers_class[k](self.optim[k], **self.schedulers_kwargs.get(k, dict()))\n\n def setup_datasets(self, env: gym.Env, total_steps: int):\n \"\"\"\n Called after everything else has been setup, right before training starts\n This is _only_ called by the trainer and is not called by default.\n This function is responsible for creating the following attributes:\n self.dataset (required)\n self.validation_dataset\n \"\"\"\n assert not hasattr(self, \"dataset\"), \"setup_datasets called twice!\"\n assert not hasattr(self, \"validation_dataset\"), \"setup_datasets called twice!\"\n # Setup the train dataset\n self.dataset = self.dataset_class(self.observation_space, self.action_space, **self.dataset_kwargs)\n # Setup the validation dataset\n if self.validation_dataset_class is not None:\n self.validation_dataset = self.validation_dataset_class(\n self.observation_space, self.action_space, **self.validation_dataset_kwargs\n )\n elif self.validation_dataset_kwargs is not None:\n validation_dataset_kwargs = copy.deepcopy(self.dataset_kwargs)\n validation_dataset_kwargs.update(self.validation_dataset_kwargs)\n self.validation_dataset = self.dataset_class(\n self.observation_space, self.action_space, **validation_dataset_kwargs\n )\n else:\n self.validation_dataset = None\n\n def save(self, path: str, extension: str, metadata: Optional[Dict] = None) -> None:\n \"\"\"\n Saves a checkpoint of the model and the optimizers\n \"\"\"\n save_dict = {}\n if len(self.optim) > 0:\n save_dict[\"optim\"] = {k: v.state_dict() for k, v in self.optim.items()}\n if len(self.schedulers) > 0:\n save_dict[\"schedulers\"] = {k: v.state_dict() for k, v in self.schedulers.items()}\n for k in self._save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"state_dict\"):\n save_dict[k] = attr.state_dict()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n save_dict[k] = attr\n\n # Add the metadata\n save_dict[\"metadata\"] = {} if metadata is None else metadata\n save_path = os.path.join(path, extension)\n if not save_path.endswith(\".pt\"):\n save_path += \".pt\"\n torch.save(save_dict, save_path)\n\n def load(self, checkpoint: str, strict: bool = True) -> Dict:\n \"\"\"\n Loads the model and its associated checkpoints.\n If we haven't created the optimizers and schedulers, do not load those.\n \"\"\"\n print(\"[research] loading checkpoint:\", checkpoint)\n checkpoint = torch.load(checkpoint, map_location=self.device)\n remaining_checkpoint_keys = set(checkpoint.keys())\n\n # First load everything except for the optim\n for k in self.save_keys: # Loop through keys in the Algorithm.\n if k not in checkpoint:\n if strict:\n raise ValueError(\"Checkpoint did not have key \" + str(k))\n else:\n print(\"[research] Warning: Checkpoint did not have key\", k)\n continue\n\n if isinstance(getattr(self, k), torch.nn.Parameter):\n # directly set the data, this is for nn.Parameters\n getattr(self, k).data = checkpoint[k].data\n else:\n # Otherwise, load via state dict\n getattr(self, k).load_state_dict(checkpoint[k], strict=strict)\n remaining_checkpoint_keys.remove(k)\n\n # Now load the optimizer and its associated keys\n for k in self.optim.keys():\n if strict and k not in checkpoint[\"optim\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find optimizer key\")\n elif k not in checkpoint[\"optim\"]:\n print(\"[research] Warning: Checkpoint did not have optimizer key\", k)\n continue\n self.optim[k].load_state_dict(checkpoint[\"optim\"][k])\n if \"optim\" in checkpoint:\n remaining_checkpoint_keys.remove(\"optim\")\n\n # Now load the schedulers\n for k in self.schedulers.keys():\n if strict and k not in checkpoint[\"schedulers\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find scheduler key\")\n elif k not in checkpoint[\"schedulers\"]:\n print(\"[research] Warning: Checkpoint did not have scheduler key\", k)\n continue\n self.schedulers[k].load_state_dict(checkpoint[\"schedulers\"][k])\n if \"schedulers\" in checkpoint:\n remaining_checkpoint_keys.remove(\"schedulers\")\n\n remaining_checkpoint_keys.remove(\"metadata\") # Do not count metadata key, which is always addded.\n if strict and len(remaining_checkpoint_keys) > 0:\n raise ValueError(\"Algorithm did not have keys \", +str(remaining_checkpoint_keys))\n elif len(remaining_checkpoint_keys) > 0:\n print(\"[research] Warning: Checkpoint keys\", remaining_checkpoint_keys, \"were not loaded.\")\n\n return checkpoint[\"metadata\"]\n\n def format_batch(self, batch: Any) -> Any:\n # Convert items to tensor if they are not.\n # Checking first makes sure we do not distrub memory pinning\n if not utils.contains_tensors(batch):\n batch = utils.to_tensor(batch)\n if self.processor.supports_gpu:\n # Move to CUDA first.\n batch = utils.to_device(batch, self.device)\n batch = self.processor(batch)\n else:\n batch = self.processor(batch)\n batch = utils.to_device(batch, self.device)\n return batch\n\n @abstractmethod\n def train_step(self, batch: Any, step: int, total_steps: int) -> Dict:\n \"\"\"\n Train the model. Should return a dict of loggable values\n \"\"\"\n return {}\n\n def validation_step(self, batch: Any) -> Dict:\n \"\"\"\n perform a validation step. Should return a dict of loggable values.\n \"\"\"\n raise NotImplementedError\n\n def env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict:\n \"\"\"\n Perform any extra training operations. This is done before the train step is called.\n A common use case for this would be stepping the environment etc.\n \"\"\"\n return {}\n\n def validation_extras(self, path: str, step: int) -> Dict:\n \"\"\"\n Perform any extra validation operations.\n A common usecase for this is saving visualizations etc.\n \"\"\"\n return {}\n\n def _predict(self, batch: Any, **kwargs) -> Any:\n \"\"\"\n Internal prediction function, can be overridden\n By default, we call torch.no_grad(). If this behavior isn't desired,\n override the _predict funciton in your algorithm.\n \"\"\"\n with torch.no_grad():\n if len(kwargs) > 0:\n raise ValueError(\"Default predict method does not accept key word args, but they were provided.\")\n pred = self.network(batch)\n return pred\n\n def predict(self, batch: Any, is_batched: bool = False, **kwargs) -> Any:\n is_np = not utils.contains_tensors(batch)\n if not is_batched:\n # Unsqeeuze everything\n batch = utils.unsqueeze(batch, 0)\n batch = self.format_batch(batch)\n pred = self._predict(batch, **kwargs)\n if not is_batched:\n pred = utils.get_from_batch(pred, 0)\n if is_np:\n pred = utils.to_np(pred)\n return pred" } ]
import datetime import functools import os import sys import tempfile import gym import numpy as np import torch from abc import abstractmethod from typing import Any, Dict, Optional, Union from research.datasets import ReplayBuffer from research.datasets.replay_buffer import storage from research.envs.base import EmptyEnv from research.networks.base import ModuleContainer from research.utils import runners, utils from .base import Algorithm from research.utils.config import Config
12,280
elif isinstance(self.processor.action_space, gym.spaces.Discrete): logits = dist.logits if isinstance(dist, torch.distributions.Categorical) else dist if sample: action = torch.distributions.Categorical(logits=logits / temperature).sample() else: action = logits.argmax(dim=-1) return action else: raise ValueError("Complex action_space incompatible with default _predict.") def _off_policy_collector_subprocess( env_fn, queue, config_path: str, checkpoint_path: str, storage_path: str, exclude_keys: Optional[Optional[list]] = None, device: Union[str, torch.device] = "auto", random_steps: int = 0, total_steps: int = 0, ): """ This subprocess loads a train environemnt. It then collects episodes with a loaded policy and saves them to disk. Afterwards, we check to see if there is an updated policy that we can use. """ try: env = env_fn() # Load the model config = Config.load(config_path) config = config.parse() model = config.get_model(observation_space=env.observation_space, action_space=env.action_space, device=device) model.eval() # Compute the buffer space buffer_space = { "obs": env.observation_space, "action": env.action_space, "reward": 0.0, "done": False, "discount": 1.0, } exclude_keys = [] if exclude_keys is None else exclude_keys flattened_buffer_space = utils.flatten_dict(buffer_space) for k in exclude_keys: del flattened_buffer_space[k] def make_dummy_transition(obs): return { "obs": obs, "action": env.action_space.sample(), "reward": 0.0, "discount": 1.0, "done": False, } # Metrics: num_ep = 0 env_steps = 0 current_checkpoint = None # Get the evaluation function. while True: # First, look for a checkpoint. checkpoints = os.listdir(checkpoint_path) if len(checkpoints) > 0: # Sort the the checkpoints by path checkpoints = sorted(checkpoints, key=lambda x: int(x[:-3])) checkpoints = [os.path.join(checkpoint_path, checkpoint) for checkpoint in checkpoints] if checkpoints[-1] != current_checkpoint and os.path.getsize(checkpoints[-1]) > 0: try: _ = model.load(checkpoints[-1]) # load the most recent one # Remove all checkpoints that are not equal to the current one. current_checkpoint = checkpoints[-1] for checkpoint in checkpoints[:-1]: # Ignore the last checkpoint, we loaded it. os.remove(checkpoint) except (EOFError, RuntimeError): _ = model.load(current_checkpoint) # Then, collect an episode current_ep = {k: list() for k in flattened_buffer_space.keys()} current_ep = utils.nest_dict(current_ep) obs = env.reset() utils.append(current_ep, make_dummy_transition(obs)) done = False while not done: if env_steps < random_steps: action = env.action_space.sample() else: with torch.no_grad(): action = model._get_train_action(obs, env_steps, total_steps) obs, reward, done, info = env.step(action) env_steps += 1 if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and len(current_ep["done"]) - 1 == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) transition = dict(obs=obs, action=action, reward=reward, done=done, discount=discount) utils.append(current_ep, transition) # The episode has terminated. num_ep += 1 metrics = dict( steps=env_steps, reward=np.sum(current_ep["reward"]), length=len(current_ep["done"]) - 1, num_ep=num_ep ) queue.put(metrics) # Timestamp it and add the ep idx (num ep - 1 so we start at zero.) ts = datetime.datetime.now().strftime("%Y%m%dT%H%M%S") ep_len = len(current_ep["done"]) ep_filename = f"{ts}_{num_ep - 1}_{ep_len}.npz"
class OffPolicyAlgorithm(Algorithm): def __init__( self, *args, offline_steps: int = 0, # Run fully offline by setting to -1 random_steps: int = 1000, async_runner_ep_lag: int = 1, **kwargs, ): super().__init__(*args, **kwargs) self.offline_steps = offline_steps self.random_steps = random_steps self.async_runner_ep_lag = async_runner_ep_lag def setup_datasets(self, env: gym.Env, total_steps: int): super().setup_datasets(env, total_steps) # Assign the correct update function based on what is passed in. if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0: self.env_step = self._empty_step elif isinstance(env, runners.AsyncEnv): self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 self._resetting = True env.reset_send() # Ask the env to start resetting. self.env_step = self._async_env_step elif isinstance(env, runners.MPRunner): assert isinstance(self.dataset, ReplayBuffer), "must use replaybuffer for MP RUnner." assert self.dataset.distributed, "ReplayBuffer must be distributed for use with Fully MPRunner." # Launch the runner subprocess. self._eps_since_last_checkpoint = 0 self._checkpoint_dir = tempfile.mkdtemp(prefix="checkpoints_") assert self.offline_steps <= 0, "MPRunner does not currently support offline to online." env.start( fn=_off_policy_collector_subprocess, checkpoint_path=self._checkpoint_dir, storage_path=self.dataset.storage_path, random_steps=self.random_steps, exclude_keys=self.dataset.exclude_keys, total_steps=total_steps, ) self.env_step = self._runner_env_step elif isinstance(env, gym.Env): # Setup Env Metrics self._current_obs = env.reset() self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 # Note that currently the very first (s, a) pair is thrown away because # we don't add to the dataset here. # This was done for better compatibility for offline to online learning. self.dataset.add(obs=self._current_obs) # add the first observation. self.env_step = self._env_step else: raise ValueError("Invalid env passed") def _empty_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: return dict() def _env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Return if env is Empty or we we aren't at every env_freq steps if step <= self.offline_steps: # Purposefully set to nan so we write CSV log. return dict(steps=self._env_steps, reward=-np.inf, length=np.inf, num_ep=self._num_ep) if step < self.random_steps: action = env.action_space.sample() else: self.eval() action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): action = np.clip(action, env.action_space.low, env.action_space.high) next_obs, reward, done, info = env.step(action) self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and self._episode_length == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) # Store the consequences. self.dataset.add(obs=next_obs, action=action, reward=reward, done=done, discount=discount) if done: self._num_ep += 1 # Compute metrics metrics = dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) # Reset the environment self._current_obs = env.reset() self.dataset.add(obs=self._current_obs) # Add the first timestep self._episode_length = 0 self._episode_reward = 0 return metrics else: self._current_obs = next_obs return dict(steps=self._env_steps) def _async_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Recieve Data from the last step and add to buffer. Should only call recv! if self._resetting: self._current_obs = env.reset_recv() self._num_ep += 1 self._episode_length = 0 self._episode_reward = 0 self.dataset.add(obs=self._current_obs) self._resetting = False done = False else: self._current_obs, reward, done, info = env.step_recv() self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward self.dataset.add( obs=self._current_obs, action=self._current_action, reward=reward, done=done, discount=info["discount"] ) # Send data for the next step and return metrics. Should only call send! if done: # If the episode terminated, then we need to reset and send the reset message self._resetting = True env.reset_send() return dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) else: # Otherwise, compute the action we should take and send it. self._resetting = False if step < self.random_steps: self._current_action = env.action_space.sample() else: self.eval() self._current_action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): self._current_action = np.clip(self._current_action, env.action_space.low, env.action_space.high) env.step_send(self._current_action) return dict(steps=self._env_steps) def _runner_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # All we do is check the pipe to see if there is data! metrics = env() if len(metrics) > 0: # If the metrics are non-empty, then it means that we have completed an episode. # As such, decrement the counter self._eps_since_last_checkpoint += 1 if self._eps_since_last_checkpoint == self.async_runner_ep_lag: self.save(self._checkpoint_dir, str(step), dict(step=step)) self._eps_since_last_checkpoint = 0 return metrics @abstractmethod def _get_train_action(self, obs: Any, step: int, total_steps: int) -> np.ndarray: raise NotImplementedError @functools.cached_property def action_range(self): action_range = (self.processor.action_space.low, self.processor.action_space.high) return utils.to_device(utils.to_tensor(action_range), self.device) def _predict( self, batch: Dict, sample: bool = False, noise: float = 0.0, noise_clip: Optional[float] = None, temperature=1.0 ) -> torch.Tensor: with torch.no_grad(): if isinstance(self.network, ModuleContainer) and "encoder" in self.network.CONTAINERS: obs = self.network.encoder(batch["obs"]) else: obs = batch["obs"] # Could be: Logits (discrete), Float (continuous), or torch Dist dist = self.network.actor(obs) if isinstance(self.processor.action_space, gym.spaces.Box): if isinstance(dist, torch.distributions.Independent): # Guassian Distribution action = dist.sample() if sample else dist.base_dist.loc elif isinstance(dist, torch.distributions.MixtureSameFamily): # Mixture of Gaussians. if sample: action = dist.sample() else: # Robomimic always samples from the Categorical, but then does the mixture deterministically. loc = dist.component_distribution.base_dist.loc category = dist.mixture_distribution.sample() # Expand to add Mixture Dim, Action Dim es = dist.component_distribution.event_shape mix_sample_r = category.reshape(category.shape + torch.Size([1] * (len(es) + 1))) mix_sample_r = mix_sample_r.repeat(torch.Size([1] * len(category.shape)) + torch.Size([1]) + es) action = torch.gather(loc, len(dist.batch_shape), mix_sample_r) action = action.squeeze(len(dist.batch_shape)) elif torch.is_tensor(dist): action = dist else: raise ValueError("Model output incompatible with default _predict.") if noise > 0.0: eps = noise * torch.randn_like(action) if noise_clip is not None: eps = torch.clamp(eps, -noise_clip, noise_clip) action = action + eps action = action.clamp(*self.action_range) return action elif isinstance(self.processor.action_space, gym.spaces.Discrete): logits = dist.logits if isinstance(dist, torch.distributions.Categorical) else dist if sample: action = torch.distributions.Categorical(logits=logits / temperature).sample() else: action = logits.argmax(dim=-1) return action else: raise ValueError("Complex action_space incompatible with default _predict.") def _off_policy_collector_subprocess( env_fn, queue, config_path: str, checkpoint_path: str, storage_path: str, exclude_keys: Optional[Optional[list]] = None, device: Union[str, torch.device] = "auto", random_steps: int = 0, total_steps: int = 0, ): """ This subprocess loads a train environemnt. It then collects episodes with a loaded policy and saves them to disk. Afterwards, we check to see if there is an updated policy that we can use. """ try: env = env_fn() # Load the model config = Config.load(config_path) config = config.parse() model = config.get_model(observation_space=env.observation_space, action_space=env.action_space, device=device) model.eval() # Compute the buffer space buffer_space = { "obs": env.observation_space, "action": env.action_space, "reward": 0.0, "done": False, "discount": 1.0, } exclude_keys = [] if exclude_keys is None else exclude_keys flattened_buffer_space = utils.flatten_dict(buffer_space) for k in exclude_keys: del flattened_buffer_space[k] def make_dummy_transition(obs): return { "obs": obs, "action": env.action_space.sample(), "reward": 0.0, "discount": 1.0, "done": False, } # Metrics: num_ep = 0 env_steps = 0 current_checkpoint = None # Get the evaluation function. while True: # First, look for a checkpoint. checkpoints = os.listdir(checkpoint_path) if len(checkpoints) > 0: # Sort the the checkpoints by path checkpoints = sorted(checkpoints, key=lambda x: int(x[:-3])) checkpoints = [os.path.join(checkpoint_path, checkpoint) for checkpoint in checkpoints] if checkpoints[-1] != current_checkpoint and os.path.getsize(checkpoints[-1]) > 0: try: _ = model.load(checkpoints[-1]) # load the most recent one # Remove all checkpoints that are not equal to the current one. current_checkpoint = checkpoints[-1] for checkpoint in checkpoints[:-1]: # Ignore the last checkpoint, we loaded it. os.remove(checkpoint) except (EOFError, RuntimeError): _ = model.load(current_checkpoint) # Then, collect an episode current_ep = {k: list() for k in flattened_buffer_space.keys()} current_ep = utils.nest_dict(current_ep) obs = env.reset() utils.append(current_ep, make_dummy_transition(obs)) done = False while not done: if env_steps < random_steps: action = env.action_space.sample() else: with torch.no_grad(): action = model._get_train_action(obs, env_steps, total_steps) obs, reward, done, info = env.step(action) env_steps += 1 if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and len(current_ep["done"]) - 1 == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) transition = dict(obs=obs, action=action, reward=reward, done=done, discount=discount) utils.append(current_ep, transition) # The episode has terminated. num_ep += 1 metrics = dict( steps=env_steps, reward=np.sum(current_ep["reward"]), length=len(current_ep["done"]) - 1, num_ep=num_ep ) queue.put(metrics) # Timestamp it and add the ep idx (num ep - 1 so we start at zero.) ts = datetime.datetime.now().strftime("%Y%m%dT%H%M%S") ep_len = len(current_ep["done"]) ep_filename = f"{ts}_{num_ep - 1}_{ep_len}.npz"
storage.save_data(current_ep, os.path.join(storage_path, ep_filename))
1
2023-10-19 17:25:45+00:00
16k
nbasyl/LLM-FP4
configs/FPQ_config_llama.py
[ { "identifier": "FPPTQSLBatchingQuantLinear_fpq", "path": "quant_layers/fp_linear.py", "snippet": "class FPPTQSLBatchingQuantLinear_fpq(FPPTQSLQuantLinear):\n def __init__(self, \n in_features: int,\n out_features: int,\n bias: bool = True,\n mode = \"raw\",\n w_bit = 8,\n a_bit = 8,\n w_exponent_bit = 4, a_exponent_bit = 4,\n bias_bit = None,\n bias_correction = False,\n metric=\"L2_norm\", search_round=1, eq_alpha=0, eq_beta=1, eq_n=100, parallel_eq_n=10, n_H=1, n_V=1, n_a=1):\n super().__init__(in_features, out_features, bias=bias, mode=mode, w_bit=w_bit, a_bit=a_bit, w_exponent_bit= w_exponent_bit, a_exponent_bit=a_exponent_bit, bias_bit=bias_bit, bias_correction=bias_correction, metric=metric, search_round=search_round, eq_alpha=eq_alpha, eq_beta=eq_beta, eq_n=eq_n, parallel_eq_n=parallel_eq_n, n_H=n_H, n_V=n_V, n_a=n_a)\n self.calib_size = None\n self.calib_batch_size = None\n self.calib_need_batching = False\n self.w_maxval = None\n self.w_intervals = None\n self.a_maxval = None\n self.register_buffer('a_bias',None)\n self.a_biases = None ## fix channel-wise biases\n self.a_intervals = None ## now search for tensor scale not the channel-wise biases\n self.register_buffer('a_interval_zero_point',None)\n self.a_intervals_zero_point = None\n self.n_ls = 1\n\n def _initialize_calib_parameters(self):\n \"\"\" \n set parameters for feeding calibration data\n \"\"\"\n self.calib_size = int(self.raw_input.shape[0])\n self.calib_batch_size = int(self.raw_input.shape[0])\n i = 0\n while True:\n numel = (2*(self.raw_input.numel()+self.raw_out.numel())/self.calib_size*self.calib_batch_size) # number of parameters on GPU\n self.parallel_eq_n = int((3*1024*1024*1024/4)//numel)\n if self.parallel_eq_n <= 1:\n self.calib_need_batching = True\n self.calib_batch_size //= 2\n else:\n break\n \n def _initialize_intervals(self):\n # weight intervals \n ## channel wise\n ## specific for QKV\n if self.n_V != 1:\n # print(\"tackling QKV linear\")\n self.n_ls = 3 # number of tensor scale \n print(\"channel-wise weight\")\n self.n_V = self.out_features\n self.crb_rows = self.out_features // self.n_V\n w_maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.w_maxval = w_maxval\n self.w_interval=(2**self.w_exponent_bit - torch.log2(w_maxval) + math.log2(2 - 2 ** (-self.w_mantissa_bit)) - 1)\n self.w_intervals = []\n if self.w_bit == 8:\n for i in range(self.w_bit-3):\n M = i + 2\n E = self.w_bit - 1 - M\n self.w_intervals.append(2**E - torch.log2(self.w_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n \n else:\n for i in range(self.w_bit-1):\n M = i\n E = self.w_bit - 1 - M\n self.w_intervals.append(2**E - torch.log2(self.w_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n \n # activation intervals\n tmp_a_maxvals = []\n for b_st in range(0,self.calib_size,self.calib_batch_size):\n b_ed = min(self.calib_size, b_st+self.calib_batch_size)\n x_ = self.raw_input[b_st:b_ed].to(self.weight.device)\n self.n_a = self.in_features\n self.crb_acts = self.in_features // self.n_a\n x_maxval = x_.view(*x_.shape[:-1],self.n_a,self.crb_acts).abs().amax(list(range(len(x_.shape)-1))+[-1],keepdim=False).unsqueeze(-1)\n tmp_a_maxvals.append(x_maxval)\n\n \n tmp_a_maxvals = torch.cat(tmp_a_maxvals, dim=1)\n self.a_maxval = tmp_a_maxvals.amax(dim=1, keepdim=True)\n self.a_bias = 2**self.a_exponent_bit - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-self.a_mantissa_bit)) - 1\n\n\n self.a_interval = (self.a_bias.min())\n self.a_interval_zero_point = torch.round(self.a_interval)\n\n self.a_biases = []\n self.a_intervals = []\n self.a_intervals_zero_point = []\n if self.a_bit == 8:\n for i in range(self.a_bit-3):\n M = i + 2\n E = self.a_bit - 1 - M\n cur_a_bias = (2**E - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n self.a_biases.append(cur_a_bias)\n cur_a_interval = (cur_a_bias.min())\n self.a_intervals.append(cur_a_interval.reshape(1,1))\n self.a_intervals_zero_point.append(torch.round(cur_a_bias.min()))\n \n else:\n for i in range(self.a_bit-1):\n M = i\n E = self.a_bit - 1 - M\n cur_a_bias = (2**E - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-M)) - 1)\n self.a_biases.append(cur_a_bias)\n cur_a_interval = (cur_a_bias.min())\n self.a_intervals.append(cur_a_interval.reshape(1,1))\n self.a_intervals_zero_point.append(torch.round(cur_a_bias.min()))\n\n def _initialize_intervals_eval(self):\n self._initialize_calib_parameters()\n # weight intervals \n ## channel wise\n ## specific for QKV\n if self.n_V != 1:\n # print(\"tackling QKV linear\")\n self.n_ls = 3 # number of tensor scale \n print(\"channel-wise weight\")\n self.n_V = self.out_features\n self.crb_rows = self.out_features // self.n_V\n w_maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.w_maxval = w_maxval\n self.w_interval=(2**self.w_exponent_bit - torch.log2(w_maxval) + math.log2(2 - 2 ** (-self.w_mantissa_bit)) - 1) \n # activation intervals\n tmp_a_maxvals = []\n for b_st in range(0,self.calib_size,self.calib_batch_size):\n b_ed = min(self.calib_size, b_st+self.calib_batch_size)\n x_ = self.raw_input[b_st:b_ed].to(self.weight.device)\n self.n_a = self.in_features\n self.crb_acts = self.in_features // self.n_a\n x_maxval = x_.view(*x_.shape[:-1],self.n_a,self.crb_acts).abs().amax(list(range(len(x_.shape)-1))+[-1],keepdim=False).unsqueeze(-1)\n tmp_a_maxvals.append(x_maxval)\n\n tmp_a_maxvals = torch.cat(tmp_a_maxvals, dim=1)\n self.a_maxval = tmp_a_maxvals.amax(dim=1, keepdim=True)\n self.a_bias = 2**self.a_exponent_bit - torch.log2(self.a_maxval) + math.log2(2 - 2 ** (-self.a_mantissa_bit)) - 1\n\n self.a_interval = (self.a_bias.min()).view(1,1)\n self.a_interval_zero_point = torch.round(self.a_interval).view(1,1)\n self.calibrated = True\n\n\n def get_maxval_from_bias(self, rescale_bias, act_or_weight):\n \n \n if act_or_weight == 0:\n \n return (2 - 2 ** (-self.a_mantissa_bit)) * 2 ** (\n 2**self.a_exponent_bit - 1 - rescale_bias\n )\n elif act_or_weight == 1:\n \n return (2 - 2 ** (-self.w_mantissa_bit)) * 2 ** (\n 2**self.w_exponent_bit - 1 - rescale_bias\n )\n\n def get_log_scale(self, x ,act_or_weight):\n \n if act_or_weight == 0:\n \n a_bias = self.a_bias\n a_bias = torch.clamp(torch.round(a_bias), torch.round(self.a_interval), torch.round(self.a_interval) + 2**(self.a_exponent_bit) - 1 ) - self.a_interval_zero_point + self.a_interval\n a_bias = a_bias.float()\n a_maxval = self.get_maxval_from_bias(rescale_bias = a_bias, act_or_weight=0)\n a_minval = -a_maxval\n a = torch.min(torch.max(x, a_minval), a_maxval)\n \n a_log_scales = torch.clamp((torch.floor(torch.log2(torch.abs(a)) + a_bias)).detach(), 1.0)\n return a, 2.0 ** (a_log_scales - self.a_mantissa_bit - a_bias)\n \n elif act_or_weight == 1:\n \n w_bias = self.w_interval\n w_bias = w_bias.float()\n w_maxval = self.get_maxval_from_bias(w_bias, 1)\n w_minval = -w_maxval\n w = torch.min(torch.max(x, w_minval), w_maxval)\n w_log_scales = torch.clamp((torch.floor(torch.log2(torch.abs(w)) + w_bias)).detach(), 1.0)\n return w, 2.0 ** (w_log_scales - self.w_mantissa_bit - w_bias)\n\n def get_w_scale(self, input, bits, mantissa_bit, bias):\n \n M = mantissa_bit\n E = bits - 1 - M\n bias = bias.float()\n maxval = (2 - 2 ** (-M)) * 2 ** (\n 2**E - 1 - bias\n )\n\n minval = -maxval\n input = torch.min(torch.max(input, minval), maxval)\n input_log_scales = torch.clamp((torch.floor(torch.log2(torch.abs(input)) + bias)).detach(), 1.0)\n return input, 2.0 ** (input_log_scales - M - bias)\n \n def get_scale(self, input, bits, mantissa_bit, bias, tensor_scale, tensor_scale_zero_point):\n \n M = mantissa_bit\n E = bits - 1 - M\n \n rescale_bias = torch.clamp(torch.round(bias), torch.round(tensor_scale), torch.round(tensor_scale) + 2**E - 1) - tensor_scale_zero_point + tensor_scale\n rescale_bias = rescale_bias.float()\n\n maxval = (2 - 2 ** (-M)) * 2 ** (\n 2**E - 1 - rescale_bias\n )\n\n minval = -maxval\n input = torch.min(torch.max(input, minval), maxval)\n \n input_log_scales = torch.clamp((torch.floor(torch.log2(torch.abs(input)) + rescale_bias)).detach(), 1.0)\n\n return input, 2.0 ** (input_log_scales - M - rescale_bias) \n\n def _get_similarity(self, tensor_raw, tensor_sim, metric=None, raw_grad=None):\n \"\"\"\n tensor_raw: *, features\n tensor_sim: *, features\n similarity: *\n It's your job to calculate mean on * dims!\n \"\"\"\n if metric == \"cosine\":\n similarity = F.cosine_similarity(tensor_raw, tensor_sim, dim=-1)\n else:\n if metric == \"L1_norm\":\n similarity = -torch.abs(tensor_raw - tensor_sim)\n elif metric == \"L2_norm\":\n similarity = -(tensor_raw - tensor_sim) ** 2\n elif metric == \"linear_weighted_L2_norm\":\n similarity = -tensor_raw.abs() * (tensor_raw - tensor_sim) ** 2\n elif metric == \"square_weighted_L2_norm\":\n similarity = -(tensor_raw * (tensor_raw - tensor_sim)) ** 2\n else:\n raise NotImplementedError(f\"metric {metric} not implemented!\")\n similarity = torch.mean(similarity, dim=-1)\n return similarity\n\n def _get_pearson_w(self, tensor_raw, tensor_sim):\n \"\"\"\n Quick implementation of similarity-aware linear quantization\n tensor_sim: b,*,parallel_eq_n,n_V,crb_rows\n tensor_raw: b,*,1,n_V,crb_rows\n \"\"\"\n b, parallel_eq_n, n_V = tensor_sim.shape[0],tensor_sim.shape[-3],tensor_sim.shape[-2]\n tensor_sim = tensor_sim.transpose(-1,-3).contiguous_().view(b,-1,n_V,parallel_eq_n)\n tensor_raw = tensor_raw.transpose(-1,-3).view(b,-1,n_V,1)\n tensor_sim_mean = tensor_sim.mean(dim=[0,1],keepdim=True)\n tensor_raw_mean = tensor_raw.mean(dim=[0,1],keepdim=True)\n similarity = torch.cosine_similarity(tensor_raw-tensor_raw_mean, tensor_sim-tensor_sim_mean, dim=1) # shape: b,n_V,parallel_eq_n\n similarity = similarity.permute(0,2,1).contiguous_()\n return similarity\n \n def _get_pearson_a(self, tensor_raw, tensor_sim):\n \"\"\"\n Quick implementation of similarity-aware linear quantization\n tensor_sim: b,*,parallel_eq_n,oc\n tensor_raw: b,*,1,oc\n \"\"\"\n b, parallel_eq_n = tensor_sim.shape[0],tensor_sim.shape[-2]\n tensor_sim = tensor_sim.transpose(-1,-2).contiguous_().view(b,-1,parallel_eq_n)\n tensor_raw = tensor_raw.transpose(-1,-2).view(b,-1,1)\n tensor_sim_mean = tensor_sim.mean(dim=[0,1],keepdim=True)\n tensor_raw_mean = tensor_raw.mean(dim=[0,1],keepdim=True)\n similarity = torch.cosine_similarity(tensor_raw-tensor_raw_mean, tensor_sim-tensor_sim_mean, dim=1) # shape: b,parallel_eq_n\n return similarity\n\n def _search_best_w_interval(self, weight_interval_candidates):\n \n # print(f\"weight_interval_candidates shape {weight_interval_candidates.shape}\")\n for man in range(weight_interval_candidates.shape[0]):\n # print(f\"CUR w E{self.w_bit - 1 - man}M{man}\")\n tmp_w_interval = self.w_intervals[man].unsqueeze(0) # shape: 1,n_V,1,n_H,1\n for h in range(self.n_H):\n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n # print(f\"before search E{self.w_bit-1-man}M{man} self.w_intervals[man] {self.w_intervals[man][0][0]}\")\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out_expanded = self.raw_out[b_st:b_ed].to(self.weight.device).unsqueeze(-2) # shape: b,*,1,oc\n raw_out_expanded = torch.cat(torch.chunk(raw_out_expanded.unsqueeze(-2), chunks=self.n_V, dim=-1), dim=-2) # shape: b,*,1,n_V,crb_rows\n raw_grad = self.raw_grad\n similarities = []\n \n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_w_interval = tmp_w_interval.repeat(p_ed-p_st,1,1,1,1)\n # print(f\"cur_w_interval {cur_w_interval.shape}\")\n cur_w_interval[:,:,:,h:h+1,:] = weight_interval_candidates[man][p_st:p_ed,:,:,h:h+1,:]\n # quantize weight and bias \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols).unsqueeze(0) # shape: 1,n_V,crb_rows,n_H,crb_cols\n \n if self.w_bit == 8:\n w, cur_w_scale = self.get_w_scale(w_sim, bits = self.w_bit, mantissa_bit= man+2, bias= cur_w_interval)\n else:\n w, cur_w_scale = self.get_w_scale(w_sim, bits = self.w_bit, mantissa_bit= man, bias= cur_w_interval)\n \n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale) # shape: parallel_eq_n,n_V,crb_rows,n_H,crb_cols\n w_sim = w_sim.view(-1,self.in_features) # shape: parallel_eq_n*oc,ic\n bias_sim = self.bias.repeat(p_ed-p_st) if self.bias is not None else None\n # quantize input\n x_sim = self.quant_input(x)\n # calculate similarity and store them\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: b,*,parallel_eq_n*oc\n out_sim = torch.cat(torch.chunk(out_sim.unsqueeze(-2), chunks=p_ed-p_st, dim=-1), dim=-2) # shape: b,*,parallel_eq_n,oc\n out_sim = torch.cat(torch.chunk(out_sim.unsqueeze(-2), chunks=self.n_V, dim=-1), dim=-2) # shape: b,*,parallel_eq_n,n_V,crb_rows\n if self.metric != \"pearson\":\n similarity = self._get_similarity(raw_out_expanded, out_sim, self.metric, raw_grad) # shape: b,*,parallel_eq_n,n_V\n if len(similarity.shape) > 3:\n similarity = torch.mean(similarity, dim=list(range(1,len(similarity.shape)-2))) # shape: b, parallel_eq_n, n_V\n else:\n similarity = self._get_pearson_w(raw_out_expanded, out_sim)\n similarity = similarity.sum(dim=0, keepdim=True) # shape: 1, parallel_eq_n, n_V\n similarities.append(similarity)\n # store best weight interval of h into tmp_w_interval\n similarities = torch.cat(similarities, dim=1) # shape: 1, eq_n, n_V\n batch_similarities.append(similarities)\n batch_similarities = torch.cat(batch_similarities, dim=0).sum(dim=0, keepdim=False) # shape: eq_n, n_V\n h_best_index = batch_similarities.argmax(dim=0).reshape(1,-1,1,1,1) # shape: 1,n_V,1,1,1\n tmp_w_interval[:,:,:,h:h+1,:] = torch.gather(weight_interval_candidates[man][:,:,:,h:h+1,:],dim=0,index=h_best_index)\n self.w_intervals[man] = tmp_w_interval.squeeze(dim=0)\n\n def _search_best_w_format(self):\n \n # print(f\"before search linear weight E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n \n # format candidate\n if self.w_bit == 8:\n w_mantissa_bits_candidate = [i for i in range(self.w_bit-3)]\n else:\n w_mantissa_bits_candidate = [i for i in range(self.w_bit-1)]\n \n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out = self.raw_out[b_st:b_ed].to(self.weight.device) # shape: b,*,1,oc\n raw_grad = self.raw_grad\n similarities = []\n # quantize input\n x_sim = self.quant_input(x)\n \n for w_mantissa_bit in w_mantissa_bits_candidate:\n if self.w_bit == 8:\n shift_w_mantissa_bit = w_mantissa_bit + 2\n else:\n shift_w_mantissa_bit = w_mantissa_bit\n \n # print(f\"CUR w E{self.w_bit - 1 - shift_w_mantissa_bit}M{shift_w_mantissa_bit}\")\n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols)\n w,cur_w_scale = self.get_w_scale(w_sim, bits = self.w_bit, mantissa_bit= shift_w_mantissa_bit, bias= self.w_intervals[w_mantissa_bit])\n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale)\n w_sim = w_sim.view(-1,self.in_features)\n bias_sim = self.bias if self.bias is not None else None\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: B,*,oc\n similarity = self._get_similarity(raw_out, out_sim, self.metric, raw_grad) #B,*,oc\n similarity = torch.mean(similarity) # shape: 1\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n batch_similarities.append(similarities)\n batch_similarities = torch.vstack(batch_similarities)\n best_mantissa_bit = batch_similarities.sum(dim=0, keepdim=True).argmax(dim=1).item()\n\n if self.w_bit == 8:\n self.w_mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.w_exponent_bit = torch.tensor(self.w_bit - 1 - self.w_mantissa_bit).to(self.weight.device) \n \n else:\n self.w_mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device)\n self.w_exponent_bit = torch.tensor(self.w_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n self.w_interval = self.w_intervals[best_mantissa_bit]\n # print(f\"search result E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n # print(\"finish searching fp format for linear weight\")\n\n def _search_best_a_interval(self, input_interval_candidates):\n \n for man in range(input_interval_candidates.shape[0]):\n \n tmp_a_interval = self.a_intervals[man].unsqueeze(-1) # shape: n_a,1,1\n\n for a in range(tmp_a_interval.shape[0]): # the whole tensor only has one scaling factor\n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out_expanded = self.raw_out[b_st:b_ed].to(self.weight.device).unsqueeze(-2) # shape: b,*,1,oc\n raw_grad = self.raw_grad\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_a_interval = tmp_a_interval.repeat(1,1,p_ed-p_st) # shape: n_a,1,parallel_eq_n\n cur_a_interval[a:a+1,:,:] = input_interval_candidates[man][a:a+1,:,p_st:p_ed]\n # quantize weight and bias \n w_sim, bias_sim = self.quant_weight_bias()\n # quantize input\n x_sim=torch.cat(torch.chunk(x.unsqueeze(-2), chunks=self.n_a, dim=-1), dim=-2).unsqueeze(-1)\n cur_a_bias = self.a_biases[man].unsqueeze(-1)\n \n cur_a_interval_zero_point = torch.round(cur_a_interval)\n # print(f\"cur_a_interval_zero_point {cur_a_interval_zero_point.shape}\")\n # print(f\"cur_a_bias {cur_a_bias.shape}\")\n if self.a_bit == 8:\n # print(f\"CUR a E{self.a_bit - 1 - man -2}M{man+2}\")\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= man+2, bias= cur_a_bias,tensor_scale= cur_a_interval,tensor_scale_zero_point=cur_a_interval_zero_point)\n else:\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= man, bias= cur_a_bias,tensor_scale= cur_a_interval,tensor_scale_zero_point=cur_a_interval_zero_point)\n\n x_sim=(cur_a/(cur_a_scale)).round_()*(cur_a_scale) # shape: b,*,n_a,crb_acts,parallel_eq_n\n # print(f\"unique a values{torch.unique(x_sim[0]).shape[0]}\")\n x_sim = x_sim.permute(*list(range(len(x_sim.shape)-3)),-1,-3,-2).reshape(*x.shape[:-1],p_ed-p_st,x.shape[-1]) # shape: b,*,parallel_eq_n,ic\n # print(f\"x_sim {x_sim.shape}\")\n # calculate similarity and store them\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: b,*,parallel_eq_n,oc\n if self.metric != \"pearson\":\n similarity = self._get_similarity(raw_out_expanded, out_sim, self.metric, raw_grad) # shape: b,*,parallel_eq_n\n if len(similarity.shape) > 2:\n similarity = torch.mean(similarity, dim=list(range(1,len(similarity.shape)-1))) # shape: b, parallel_eq_n\n else:\n similarity = self._get_pearson_a(raw_out_expanded, out_sim)\n similarity = torch.sum(similarity, dim=0, keepdim=True) # shape: 1, parallel_eq_n\n \n similarities.append(similarity)\n # store best input interval and store in tmp_a_interval\n similarities = torch.cat(similarities, dim=1) # shape: 1, eq_n\n batch_similarities.append(similarities)\n batch_similarities = torch.cat(batch_similarities, dim=0).sum(dim=0, keepdim=False) # shape: eq_n\n # print(f\"linear similarity {batch_similarities.sum()}\")\n a_best_index = batch_similarities.argmax(dim=0, keepdim=True).reshape(1,1,-1)\n # a_best_index = batch_similarities.argmax(dim=0, keepdim=True)\n # print(f\"a_best_index {a_best_index.shape}\")\n # print(f\"input_interval_candidates[man] {input_interval_candidates[man].shape}\")\n tmp_a_interval[a:a+1,:,:] = torch.gather(input_interval_candidates[man][a:a+1,:,:],dim=2,index=a_best_index)\n \n self.a_intervals[man] = tmp_a_interval.squeeze(-1)\n self.a_intervals_zero_point[man] = torch.round(self.a_intervals[man])\n\n def _search_best_a_format(self):\n \n batch_similarities = [] # similarities, need to concatenate and calculate sum (equivalent to mean with argmax)\n\n # format candidate\n if self.a_bit == 8:\n a_mantissa_bits_candidate = [i for i in range(self.a_bit-3)]\n else:\n a_mantissa_bits_candidate = [i for i in range(self.a_bit-1)]\n # quantize input\n w_sim, bias_sim = self.quant_weight_bias()\n # print(f\"before search linear activation E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n for b_st in range(0, self.calib_size, self.calib_batch_size):\n b_ed = min(self.calib_size, b_st + self.calib_batch_size)\n x = self.raw_input[b_st:b_ed].to(self.weight.device)\n raw_out = self.raw_out[b_st:b_ed].to(self.weight.device) # shape: b,*,oc\n raw_grad = self.raw_grad\n similarities = []\n \n for a_mantissa_bit in a_mantissa_bits_candidate:\n if self.a_bit == 8:\n shift_a_mantissa_bit = a_mantissa_bit + 2\n else:\n shift_a_mantissa_bit = a_mantissa_bit\n \n x_sim = torch.cat(torch.chunk(x.unsqueeze(-2), chunks=self.n_a, dim=-1), dim=-2)\n\n cur_a_bias = self.a_biases[a_mantissa_bit]\n cur_a_interval = self.a_intervals[a_mantissa_bit]\n cur_a_interval_zero_point = self.a_intervals_zero_point[a_mantissa_bit]\n cur_a, cur_a_scale = self.get_scale(x_sim, bits = self.a_bit, mantissa_bit= shift_a_mantissa_bit, bias= cur_a_bias,tensor_scale= cur_a_interval,tensor_scale_zero_point=cur_a_interval_zero_point)\n \n x_sim=(cur_a/(cur_a_scale)).round_()*(cur_a_scale) # shape: B,*,n_a,crb_acts\n if len(x.shape) == 3:\n x_sim = x_sim.view(x.shape[0],x.shape[1],x.shape[2])\n else:\n x_sim = x_sim.view(x.shape[0],1,x.shape[1])\n out_sim = F.linear(x_sim, w_sim, bias_sim) # shape: B,*,oc \n if len(raw_out.shape) == 2:\n out_sim = out_sim.view(raw_out.shape[0],raw_out.shape[1])\n similarity = self._get_similarity(raw_out, out_sim, self.metric, raw_grad) #B,*,oc\n similarity = torch.mean(similarity)\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n batch_similarities.append(similarities)\n \n batch_similarities = torch.vstack(batch_similarities)\n best_mantissa_bit = batch_similarities.sum(dim=0, keepdim=True).argmax(dim=1).item()\n\n if self.a_bit == 8:\n self.a_mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.a_exponent_bit = torch.tensor(self.a_bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.a_mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device)\n self.a_exponent_bit = torch.tensor(self.a_bit - 1 - best_mantissa_bit).to(self.weight.device) \n\n self.a_interval = self.a_intervals[best_mantissa_bit]\n self.a_interval_zero_point = self.a_intervals_zero_point[best_mantissa_bit]\n self.a_bias = self.a_biases[best_mantissa_bit]\n # print(f\"search result linear activation E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n # print(\"finish searching fp format for linear activation\")\n\n def calibration_step2(self):\n \"\"\"\n Only use cached raw inputs/outs/grads\n \"\"\"\n self._initialize_calib_parameters()\n self._initialize_intervals()\n\n # prepare weight intervals and similarities\n weight_interval_candidates = []\n if self.w_bit == 8:\n for m in range(self.w_bit-3):\n weight_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.w_intervals[m].unsqueeze(0)\n weight_interval_candidates.append(weight_interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n else:\n for m in range(self.w_bit-1):\n weight_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.w_intervals[m].unsqueeze(0)\n weight_interval_candidates.append(weight_interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n weight_interval_candidates = torch.vstack(weight_interval_candidates)\n\n input_interval_candidates = []\n if self.a_bit == 8:\n for m in range(self.a_bit-3): \n input_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(1,1,-1) * self.a_intervals[m].unsqueeze(-1)\n input_interval_candidates.append(input_interval_candidate.unsqueeze(0)) # shape: n_a,1,eq_n\n \n else:\n for m in range(self.a_bit-1): \n input_interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(1,1,-1) * self.a_intervals[m].unsqueeze(-1)\n input_interval_candidates.append(input_interval_candidate.unsqueeze(0)) # shape: n_a,1,eq_n\n input_interval_candidates = torch.vstack(input_interval_candidates)\n \n \n for e in range(self.search_round):\n # search for best weight interval\n self._search_best_w_interval(weight_interval_candidates)\n # search for best input interval\n self._search_best_a_interval(input_interval_candidates)\n # search for best weight format\n self._search_best_w_format()\n # search for best input format\n self._search_best_a_format()\n\n print(f\"final w format E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n # print(f\"final self.w_interval {self.w_interval}\")\n # print(f\"final self.w_interval_zero_point {self.w_interval_zero_point}\")\n print(f\"final a format E{self.a_exponent_bit}M{self.a_mantissa_bit}\")\n # print(f\"final self.a_interval {self.a_interval}\")\n # print(f\"final self.a_interval_zero_point {self.a_interval_zero_point}\")\n self.calibrated = True\n # self._bias_correction_quant_forward(self.raw_input.to(self.weight.device)) # debugging\n del self.raw_input, self.raw_out, self.raw_grad\n return None" }, { "identifier": "FPPTQSLQuantEmbedding_fpq_baseline", "path": "quant_layers/fp_embed.py", "snippet": "class FPPTQSLQuantEmbedding_fpq_baseline(FPPTQSLQuantEmbedding):\n def __init__(self, \n num_embeddings: int,\n embedding_dim: int,\n padding_idx: int,\n mode = \"raw\",\n bit = 8,\n exponent_bit = 4,\n bias_bit = None,\n bias_correction = False,\n metric=\"L2_norm\", search_round=1, eq_alpha=0, eq_beta=1, eq_n=100, parallel_eq_n=1, n_H=1, n_V=1):\n super().__init__(num_embeddings, embedding_dim, padding_idx, mode=mode, bit=bit, exponent_bit= exponent_bit, bias_bit=bias_bit, bias_correction=bias_correction, metric=metric, search_round=search_round, eq_alpha=eq_alpha, eq_beta=eq_beta, eq_n=eq_n, parallel_eq_n=parallel_eq_n, n_H=n_H, n_V=n_V)\n self.maxval = None\n self.intervals = None\n\n def _initialize_intervals_eval(self):\n\n self.n_V = self.num_embeddings\n self.crb_rows = self.num_embeddings // self.n_V\n maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.maxval = maxval\n self.interval=(2**self.exponent_bit - torch.log2(maxval) + math.log2(2 - 2 ** (-self.mantissa_bit)) - 1)\n self.calibrated = True\n\n def _initialize_intervals(self):\n\n self.n_V = self.num_embeddings\n self.crb_rows = self.num_embeddings // self.n_V\n maxval = self.weight.view(self.n_V, self.crb_rows,self.n_H,self.crb_cols).abs().amax([1,3],keepdim=True)\n self.maxval = maxval\n self.interval=(2**self.exponent_bit - torch.log2(maxval) + math.log2(2 - 2 ** (-self.mantissa_bit)) - 1)\n self.intervals = []\n if self.bit == 8: ## need to constrain the exponent as too big exponent bits will result in overflow\n # E7M0, E6M1, E5M2, E4M3, E3M4, E2M5, E1M6, start with E5M2 as E7M0 and E6M1 usually performs quite bad and results in overflow\n for i in range(self.bit-3):\n M = i + 2\n E = self.bit - 1 - M\n self.intervals.append(2**E - torch.log2(self.maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n else:\n for i in range(self.bit-1):\n M = i\n E = self.bit - 1 - M\n self.intervals.append(2**E - torch.log2(self.maxval) + math.log2(2 - 2 ** (-M)) - 1)\n\n def _get_similarity(self, tensor_raw, tensor_sim, metric=None):\n \"\"\"\n tensor_raw: *, features\n tensor_sim: *, features\n similarity: *\n It's your job to calculate mean on * dims!\n \"\"\"\n if metric == \"cosine\":\n similarity = F.cosine_similarity(tensor_raw, tensor_sim, dim=-1)\n else:\n if metric == \"L1_norm\":\n similarity = -torch.abs(tensor_raw - tensor_sim)\n elif metric == \"L2_norm\":\n similarity = -(tensor_raw - tensor_sim) ** 2\n elif metric == \"linear_weighted_L2_norm\":\n similarity = -tensor_raw.abs() * (tensor_raw - tensor_sim) ** 2\n elif metric == \"square_weighted_L2_norm\":\n similarity = -(tensor_raw * (tensor_raw - tensor_sim)) ** 2\n else:\n raise NotImplementedError(f\"metric {metric} not implemented!\")\n similarity = torch.mean(similarity, dim=-1)\n return similarity\n\n def _search_best_interval(self, interval_candidates):\n \n # print(f\"interval_candidates shape {interval_candidates.shape}\")\n for man in range(interval_candidates.shape[0]):\n tmp_interval = self.intervals[man].unsqueeze(0) # shape: 1,n_V,1,n_H,1\n for h in range(self.n_H):\n similarities = []\n for p_st in range(0,self.eq_n,self.parallel_eq_n):\n p_ed = min(self.eq_n, p_st+self.parallel_eq_n)\n cur_w_interval = tmp_interval.repeat(p_ed-p_st,1,1,1,1)\n cur_w_interval[:,:,:,h:h+1,:] = interval_candidates[man][p_st:p_ed,:,:,h:h+1,:]\n # quantize weight and bias \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols).unsqueeze(0) # shape: 1,n_V,crb_rows,n_H,crb_cols\n \n if self.bit >= 8:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= man+2, bias= cur_w_interval)\n else:\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= man, bias= cur_w_interval)\n\n w_sim = (w/cur_w_scale).round_().mul_(cur_w_scale) # shape: parallel_eq_n,n_V,crb_rows,n_H,crb_cols\n w_sim = w_sim.view(-1,self.num_embeddings,self.embedding_dim) # shape: parallel_eq_n*oc,ic\n \n\n similarity = self._get_similarity(self.weight.unsqueeze(0), w_sim, self.metric) # shape: B,*,parallel_eq_n,n_V\n if self.n_V == 1:\n similarity = similarity.sum(dim=1, keepdim=True)\n \n similarities.append(similarity)\n # store best weight interval of h into tmp_interval\n similarities = torch.cat(similarities, dim=0) # shape: eq_n, n_V\n h_best_index = similarities.argmax(dim=0).reshape(1,-1,1,1,1) # shape: 1,n_V,1,1,1\n tmp_interval[:,:,:,h:h+1,:] = torch.gather(interval_candidates[man][:,:,:,h:h+1,:],dim=0,index=h_best_index)\n self.intervals[man] = tmp_interval.squeeze(dim=0)\n\n def _search_best_format(self):\n \n # print(f\"before search linear weight E{self.w_exponent_bit}M{self.w_mantissa_bit}\")\n \n # format candidate\n if self.bit >= 8:\n mantissa_bits_candidate = [i for i in range(self.bit-3)]\n else:\n mantissa_bits_candidate = [i for i in range(self.bit-1)]\n \n similarities = []\n for mantissa_bit in mantissa_bits_candidate:\n if self.bit >= 8:\n shift_mantissa_bit = mantissa_bit + 2\n else:\n shift_mantissa_bit = mantissa_bit\n \n w_sim = self.weight.view(self.n_V,self.crb_rows,self.n_H,self.crb_cols)\n w, cur_w_scale = self.get_scale(w_sim, bits = self.bit, mantissa_bit= shift_mantissa_bit, bias= self.intervals[mantissa_bit])\n \n w_sim = (w/cur_w_scale)\n \n w_sim = w_sim.round_().mul_(cur_w_scale)\n\n \n w_sim = w_sim.view(-1,self.num_embeddings,self.embedding_dim)\n\n similarity = self._get_similarity(self.weight.unsqueeze(0), w_sim, self.metric) #B,*,oc\n similarity = torch.mean(similarity) # shape: 1\n similarities.append(similarity)\n similarities = torch.tensor(similarities)\n best_mantissa_bit = similarities.argmax(dim=0).item()\n \n if self.bit >= 8:\n self.mantissa_bit = torch.tensor(best_mantissa_bit + 2).to(self.weight.device)\n self.exponent_bit = torch.tensor(self.bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n else:\n self.mantissa_bit = torch.tensor(best_mantissa_bit).to(self.weight.device) \n self.exponent_bit = torch.tensor(self.bit - 1 - best_mantissa_bit).to(self.weight.device) \n \n self.interval = self.intervals[best_mantissa_bit]\n\n def calibration_step2(self):\n\n self._initialize_intervals()\n\n # prepare intervals and similarities\n interval_candidates = []\n if self.bit >=8:\n for m in range(self.bit-3): #m 2 ~ 6\n interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.intervals[m].unsqueeze(0)\n interval_candidates.append(interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n \n else:\n for m in range(self.bit-1): #m 0 ~ 6\n interval_candidate = torch.tensor([self.eq_alpha + i*(self.eq_beta - self.eq_alpha)/self.eq_n for i in range(self.eq_n + 1)]).to(self.weight.device).view(-1,1,1,1,1) * self.intervals[m].unsqueeze(0)\n interval_candidates.append(interval_candidate.unsqueeze(0)) # shape: num_man_options,eq_n,n_V,1,n_H,1\n interval_candidates = torch.vstack(interval_candidates)\n\n for e in range(self.search_round):\n # search for best weight interval\n self._search_best_interval(interval_candidates)\n # search for best weight format\n self._search_best_format()\n\n print(f\"search format E{self.exponent_bit}M{self.mantissa_bit}\")\n\n self.calibrated = True\n return None" } ]
from quant_layers.fp_linear import FPPTQSLBatchingQuantLinear_fpq from quant_layers.fp_embed import FPPTQSLQuantEmbedding_fpq_baseline
11,186
bit = 8 exp_bit = 4 embed_name_list = ["qembedding"] fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"] matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"] w_bit = {name: bit for name in fc_name_list} a_bit = {name: bit for name in fc_name_list} embed_bit = {name: bit for name in embed_name_list} A_bit = {name: bit for name in matmul_name_list} B_bit = {name: bit for name in matmul_name_list} w_exp_bit = {name: exp_bit for name in fc_name_list} a_exp_bit = {name: exp_bit for name in fc_name_list} embed_exp_bit = {name: exp_bit for name in embed_name_list} A_exp_bit = {name: exp_bit for name in matmul_name_list} B_exp_bit = {name: exp_bit for name in matmul_name_list} ptqsl_embedding_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1 } ptqsl_linear_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1, "n_a": 1, "bias_correction":True # Conventionally I'll not add an actual bias correction in linear } def get_module(module_type, *args, **kwargs): if "embedding" in module_type: kwargs.update(ptqsl_embedding_kwargs)
bit = 8 exp_bit = 4 embed_name_list = ["qembedding"] fc_name_list = [ "qlinear_query", "qlinear_key", "qlinear_value", "qlinear_o","qlinear_gate","qlinear_down","qlinear_up","qlinear_score"] matmul_name_list = [ "qmatmul_qk", "qmatmul_scorev"] w_bit = {name: bit for name in fc_name_list} a_bit = {name: bit for name in fc_name_list} embed_bit = {name: bit for name in embed_name_list} A_bit = {name: bit for name in matmul_name_list} B_bit = {name: bit for name in matmul_name_list} w_exp_bit = {name: exp_bit for name in fc_name_list} a_exp_bit = {name: exp_bit for name in fc_name_list} embed_exp_bit = {name: exp_bit for name in embed_name_list} A_exp_bit = {name: exp_bit for name in matmul_name_list} B_exp_bit = {name: exp_bit for name in matmul_name_list} ptqsl_embedding_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1 } ptqsl_linear_kwargs = { "metric": "L2_norm", "eq_alpha": 0.01, "eq_beta": 1.2, "eq_n": 100, 'search_round': 3, "n_V": 1, "n_H": 1, "n_a": 1, "bias_correction":True # Conventionally I'll not add an actual bias correction in linear } def get_module(module_type, *args, **kwargs): if "embedding" in module_type: kwargs.update(ptqsl_embedding_kwargs)
module= FPPTQSLQuantEmbedding_fpq_baseline(*args,**kwargs,bit= embed_bit[module_type], exponent_bit=embed_exp_bit[module_type], padding_idx=0)
1
2023-10-15 06:05:13+00:00
16k
bcmi/libcom
libcom/painterly_image_harmonization/source/PHDiffusion/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('assets/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)\n\n # alphas_cumprod = self.model.alphas_cumprod\n # assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n # self.register_buffer('betas', to_torch(self.model.betas))\n # self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n # self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n betas = torch.linspace(0.00085, 0.012, 1000, dtype=torch.float32).cpu()\n alphas = 1.0 - betas\n alphas_cumprod = torch.cumprod(alphas, dim=0)\n alphas_cumprod_prev = torch.from_numpy(np.append(1., alphas_cumprod[:-1]))\n\n self.register_buffer('betas', to_torch(betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev.to(torch.float32).to(self.model.device)\n)\n\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta, verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n\n print(ddim_sigmas)\n print(ddim_alphas)\n\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n features_adapter=None,\n append_to_context=None,\n cond_tau=0.4,\n style_cond_tau=1.0,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n features_adapter=features_adapter,\n append_to_context=append_to_context,\n cond_tau=cond_tau,\n style_cond_tau=style_cond_tau,\n\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, features_adapter=None,\n append_to_context=None, cond_tau=0.4, style_cond_tau=1.0,):\n device = self.model.betas.device\n b = shape[0]\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n\n time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n def get_timesteps(num_inference_steps, strength, tt):\n # get the original timestep using init_timestep\n print(time_range)\n\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n t_start = max(num_inference_steps - init_timestep, 0)\n print(t_start)\n\n tt = tt[t_start-1:]\n return tt, num_inference_steps - t_start\n time_range, num_inference_steps = get_timesteps(100, 0.3, time_range)\n\n print(time_range)\n\n\n if x_T is None:\n # img = torch.randn(shape, device=device)\n\n ts = torch.full((b,),time_range[0], device=device, dtype=torch.long)\n img = self.model.q_sample(x0, ts)\n\n else:\n img = x_T\n\n\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n\n\n iterator = tqdm(time_range[1:], desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n \n img = img_orig * (1.-mask) + mask * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n features_adapter=None if index < int(\n (1 - cond_tau) * total_steps) else features_adapter,\n append_to_context=None if index < int(\n (1 - style_cond_tau) * total_steps) else append_to_context,\n )\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, features_adapter=None,\n append_to_context=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n if append_to_context is not None:\n model_output = self.model.apply_model(x, t, torch.cat([c, append_to_context], dim=1),\n features_adapter=features_adapter)\n else:\n model_output = self.model.apply_model(x, t, c, features_adapter=features_adapter)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n if append_to_context is not None:\n pad_len = append_to_context.size(1)\n new_unconditional_conditioning = torch.cat(\n [unconditional_conditioning, unconditional_conditioning[:, -pad_len:, :]], dim=1)\n new_c = torch.cat([c, append_to_context], dim=1)\n c_in = torch.cat([new_unconditional_conditioning, new_c])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in, features_adapter=features_adapter).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.ema import LitEma from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.models.diffusion.ddim import DDIMSampler
12,452
c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out def decode_first_stage_training(self, z, predict_cids=False, force_not_quantize=False): # print('decoding...') # # def print_message(grad): # print('backward decoding') # # z.register_hook(print_message) if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c, **kwargs) return loss def get_time_with_schedule(self, scheduler, bs): if scheduler == 'linear': t = torch.randint(0, self.num_timesteps, (bs,), device=self.device).long() elif scheduler == 'cosine': t = torch.rand((bs, ), device=self.device) t = torch.cos(torch.pi / 2. * t) * self.num_timesteps t = t.long() elif scheduler == 'cubic': t = torch.rand((bs,), device=self.device) t = (1 - t ** 3) * self.num_timesteps t = t.long() else: raise NotImplementedError t = torch.clamp(t, min=0, max=self.num_timesteps-1) return t def forward(self, x,mask, c, *args, **kwargs): if 't' not in kwargs: t = torch.randint(0, self.num_timesteps, (x.shape[0], ), device=self.device).long() else: t = kwargs.pop('t') return self.p_losses(x,mask, c, t, *args, **kwargs) def apply_model(self, x_noisy, mask,t, cond, return_ids=False, **kwargs): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, mask,t, **cond, **kwargs) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=512, channels=3, log_every_t=100, clip_denoised=True, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization # print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) # count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3): linear_start = 0.00085 linear_end = 0.012 # if exists(given_betas): # betas = given_betas # else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] # if len(x.shape) == 3: # x = x[..., None] # x = rearrange(x, 'b h w c -> b c h w') # x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3): linear_start = 0.00085 linear_end = 0.012 super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out def decode_first_stage_training(self, z, predict_cids=False, force_not_quantize=False): # print('decoding...') # # def print_message(grad): # print('backward decoding') # # z.register_hook(print_message) if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c, **kwargs) return loss def get_time_with_schedule(self, scheduler, bs): if scheduler == 'linear': t = torch.randint(0, self.num_timesteps, (bs,), device=self.device).long() elif scheduler == 'cosine': t = torch.rand((bs, ), device=self.device) t = torch.cos(torch.pi / 2. * t) * self.num_timesteps t = t.long() elif scheduler == 'cubic': t = torch.rand((bs,), device=self.device) t = (1 - t ** 3) * self.num_timesteps t = t.long() else: raise NotImplementedError t = torch.clamp(t, min=0, max=self.num_timesteps-1) return t def forward(self, x,mask, c, *args, **kwargs): if 't' not in kwargs: t = torch.randint(0, self.num_timesteps, (x.shape[0], ), device=self.device).long() else: t = kwargs.pop('t') return self.p_losses(x,mask, c, t, *args, **kwargs) def apply_model(self, x_noisy, mask,t, cond, return_ids=False, **kwargs): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, mask,t, **cond, **kwargs) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
9
2023-10-19 05:08:12+00:00
16k
e4s2023/E4S2023
training/coach.py
[ { "identifier": "torch_utils", "path": "utils/torch_utils.py", "snippet": "def saveTensorToFile(tensor, save_path):\ndef interpolate(img, size):\ndef readImgAsTensor(img_path, gray=False, to_tensor=True, size=1024):\ndef featMap2im(var):\ndef tensor2im(var, is_zero_center: bool = True, ):\ndef im2tensor(var, add_c_dim: bool = False, norm: bool = True, std: bool = False):\ndef tensor2map(var,shown_mask_indices=None):\ndef vis_mask_in_color(mask):\ndef get_colors():\ndef vis_faces(log_hooks1):\ndef vis_faces_no_id(hooks_dict1, fig, gs, i):\ndef aggregate_loss_dict(agg_loss_dict):\ndef labelMap2OneHot(label, num_cls):\ndef remove_module_prefix(state_dict,prefix):\ndef requires_grad(model, flag=True):\ndef accumulate(model1, model2, decay=0.999):\n C, H, W = tensor.size()" }, { "identifier": "CelebAHQDataset", "path": "datasets/dataset.py", "snippet": "class CelebAHQDataset(Dataset):\n \"\"\"\n CelebA-HQ数据集,具体数据来自于 https://github.com/ZPdesu/SEAN\n \"\"\"\n def __init__(self, dataset_root, mode=\"test\",\n img_transform=TO_TENSOR, label_transform=TO_TENSOR,\n load_vis_img=False, fraction=1.0,\n flip_p=-1, # negative means not flipping\n specific_ids: Union[list, tuple] = None,\n paired: bool = False,\n shuffle: bool = False,\n ):\n assert mode in (\"train\", \"test\", \"all\"), \"CelebAHQDataset mode type unsupported!\"\n self.mode = mode\n if mode in (\"all\",):\n self.roots = [osp.join(dataset_root, \"train\"), osp.join(dataset_root, \"test\")]\n else:\n self.roots = [osp.join(dataset_root, self.mode)]\n self.img_transform = img_transform\n self.label_transform = label_transform\n self.load_vis_img = load_vis_img\n self.fraction = fraction\n self.flip_p = flip_p\n self.paired = paired\n\n self.imgs = []\n self.labels = []\n self.labels_vis = []\n for root in self.roots:\n imgs = sorted(make_dataset(osp.join(root, \"images\")))\n imgs = imgs[:int(len(imgs)*self.fraction)]\n\n labels = sorted(make_dataset(osp.join(root, \"labels\")))\n labels = labels[:int(len(labels)*self.fraction)]\n\n labels_vis = sorted(make_dataset(osp.join(root, \"vis\"))) if self.load_vis_img else None\n labels_vis = labels_vis[:int(len(labels_vis)*self.fraction)] if self.load_vis_img else []\n\n self.imgs.extend(imgs)\n self.labels.extend(labels)\n self.labels_vis.extend(labels_vis)\n\n self.imgs, self.labels, self.labels_vis = self._filter_specific_ids(specific_ids)\n\n if self.load_vis_img:\n assert len(self.imgs) == len(self.labels) == len(self.labels_vis)\n else:\n assert len(self.imgs) == len(self.labels)\n\n print(f\"[CelebAHQDataset] files loaded. mode={self.mode}, #imgs={len(self.imgs)}, \"\n f\"#labels={len(self.labels)}, #vis={len(self.labels_vis)}\")\n\n # # 优化 600 个iteration 的style code保存路径\n # self.optim_codes_dir = \"/apdcephfs/share_1290939/zhianliu/py_projects/pytorch-DDP-demo/work_dirs/v0_8_stage2_entypeSEAN/optim_Results\"\n \n # image pairs indices\n self.indices = np.arange(len(self.imgs))\n\n # TODO: shuffle the indices\n if shuffle:\n np.random.shuffle(self.indices)\n\n self.pair_indices = self.indices.reshape(-1, 2)\n\n def __len__(self):\n if not self.paired:\n return len(self.indices)\n else:\n return len(self.pair_indices)\n\n def _filter_specific_ids(self, specific_ids: tuple):\n \"\"\" filter the images according to the specific_ids\n \"\"\"\n if specific_ids is None:\n return self.imgs, self.labels, self.labels_vis\n elif self.fraction < 1.0:\n raise ValueError(\"[CelebAHQDataset] specific_ids and fraction cannot be set simultaneously!\")\n\n # parse the tuple into two lists, e.g. ((\"train\",\"12\"), (\"test\",\"45\")) -> (\"train\",\"train\") and (\"12\",\"45\")\n spec_modes, spec_ids = [], []\n id_order_dict = {}\n for idx, spec_id in enumerate(specific_ids):\n one_mode, one_id = spec_id[0], spec_id[1]\n spec_modes.append(one_mode)\n spec_ids.append(one_id)\n id_order_dict[one_id] = {\n \"mode\": one_mode, \"order\": idx,\n }\n\n # filter and re-order\n ret_imgs = [\"\"] * len(specific_ids)\n ret_labels = [\"\"] * len(specific_ids)\n ret_labels_vis = [\"\"] * len(specific_ids)\n found_cnt = 0\n for k in range(len(spec_ids)): # target specific ids\n one_spec_mode = spec_modes[k]\n one_spec_id = spec_ids[k]\n for idx in range(len(self.imgs)): # full dataset\n one_img = self.imgs[idx]\n one_label = self.labels[idx]\n one_label_vis = self.labels_vis[idx] if self.load_vis_img else None\n if one_spec_mode in one_img and one_spec_id == osp.basename(one_img): # found one\n found_cnt += 1\n one_spec_order = id_order_dict[one_spec_id][\"order\"]\n ret_imgs[one_spec_order] = one_img\n ret_labels[one_spec_order] = one_label\n ret_labels_vis[one_spec_order] = one_label_vis\n break\n\n if found_cnt < len(specific_ids):\n print(f\"[[Warning]][CelebAHQDataset] not enough images found (={found_cnt}) for \"\n f\"specific ids (={len(specific_ids)})!\")\n\n ret_imgs = list(filter(None, ret_imgs))\n ret_labels = list(filter(None, ret_labels))\n ret_labels_vis = list(filter(None, ret_labels_vis))\n return ret_imgs, ret_labels, ret_labels_vis\n\n def load_single_image(self, index):\n \"\"\"把一张图片的 原图, seg mask, 以及mask对应可视化的图都加载进来\n Args:\n index (int): 图片的索引\n Return:\n img: RGB图\n label: seg mask\n label_vis: seg mask的可视化图\n \"\"\"\n img_path = self.imgs[index]\n img = Image.open(img_path).convert('RGB')\n if self.img_transform is not None:\n img = self.img_transform(img)\n\n label = self.labels[index]\n # label = osp.join(\"/apdcephfs/share_1290939/zhianliu/py_projects/our_editing/ui_results\",\"%s_mask.png\"%osp.basename(label)[:-4])\n label = Image.open(label).convert('L')\n if self.label_transform is not None:\n label = self.label_transform(label)\n\n if self.load_vis_img:\n label_vis = self.labels_vis[index]\n label_vis = Image.open(label_vis).convert('RGB')\n label_vis = TO_TENSOR(label_vis)\n else:\n label_vis = -1 # unified interface\n return img, label, label_vis, img_path\n\n def _output_item(self, idx):\n if not self.paired:\n index = self.indices[idx]\n img, label, label_vis, img_path = self.load_single_image(index)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img = TF.hflip(img)\n label = TF.hflip(label)\n return img, label, label_vis, img_path\n else:\n index1 = self.indices[idx * 2]\n index2 = self.indices[idx * 2 + 1]\n img1, label1, label_vis1, img_path1 = self.load_single_image(index1)\n img2, label2, label_vis2, img_path2 = self.load_single_image(index2)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img1 = TF.hflip(img1)\n label1 = TF.hflip(label1)\n if random.random() < self.flip_p:\n img2 = TF.hflip(img2)\n label2 = TF.hflip(label2)\n return {\n \"bag1\": (img1, label1, label_vis1, img_path1),\n \"bag2\": (img2, label2, label_vis2, img_path2)\n }\n\n def __getitem__(self, idx):\n return self._output_item(idx)\n \n # # 1阶段重建的图片\n # img_name = osp.basename(self.imgs[index])[:-4]\n # recon_img = Image.open(osp.join(self.optim_codes_dir,img_name,\"%s_recon.png\"%img_name)).convert('RGB')\n # if self.img_transform is not None:\n # recon_img = self.img_transform(recon_img)\n \n # # 优化后的code\n # optim_code_path = osp.join(self.optim_codes_dir,img_name,\"%s_0600.npy\"%img_name)\n # assert osp.exists(optim_code_path), \"%s 文件不存在!\"%optim_code_path\n # optimed_style_code = np.load(optim_code_path)[0]\n \n # return img, recon_img, optimed_style_code, label, label_vis\n \n # pair_indices = self.pair_indices[idx, :]\n\n # img1, label1, label_vis1 = self.load_single_image(pair_indices[0])\n # img2, label2, label_vis2 = self.load_single_image(pair_indices[1])\n\n # return (img1, img2), (label1, label2), (label_vis1, label_vis2)" }, { "identifier": "get_transforms", "path": "datasets/dataset.py", "snippet": "def get_transforms(normalize=True, toTensor=True):\n transform_list = []\n if toTensor:\n transform_list += [transforms.ToTensor()]\n\n if normalize:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)" }, { "identifier": "TO_TENSOR", "path": "datasets/dataset.py", "snippet": "TO_TENSOR = transforms.ToTensor()" }, { "identifier": "NORMALIZE", "path": "datasets/dataset.py", "snippet": "NORMALIZE = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))" }, { "identifier": "MASK_CONVERT_TF", "path": "datasets/dataset.py", "snippet": "MASK_CONVERT_TF = transforms.Lambda(\n lambda celebAHQ_mask: __celebAHQ_masks_to_faceParser_mask(celebAHQ_mask))" }, { "identifier": "FFHQDataset", "path": "datasets/dataset.py", "snippet": "class FFHQDataset(Dataset):\n \"\"\"\n FFHQ数据集,提取 mask 的方式参照了Babershop,用的是BiSegNet提取的\n \"\"\"\n\n def __init__(self, dataset_root,\n img_transform=TO_TENSOR, label_transform=TO_TENSOR,\n fraction=1.0,\n load_raw_label=False,\n flip_p = -1):\n\n self.root = dataset_root\n self.img_transform = img_transform\n self.label_transform = label_transform\n self.fraction=fraction\n self.load_raw_label = load_raw_label\n self.flip_p = flip_p\n \n with open(osp.join(self.root,\"images_1024\",\"ffhq_list.txt\"),\"r\") as f:\n f_lines = f.readlines()\n \n self.imgs = sorted([osp.join(self.root, \"images_1024\", line.replace(\"\\n\",\"\")) for line in f_lines])\n self.imgs = self.imgs[:int(len(self.imgs)*self.fraction)]\n self.labels = [img.replace(\"images_1024\",\"BiSeNet_mask\") for img in self.imgs]\n \n assert len(self.imgs) == len(self.labels)\n \n self.indices = np.arange(len(self.imgs))\n\n def __len__(self):\n return len(self.indices)\n\n def load_single_image(self, index):\n \"\"\"把一张图片的 原图, seg mask, 以及mask对应可视化的图都加载进来\n\n Args:\n index (int): 图片的索引\n Return:\n img: RGB图\n label: seg mask\n label_vis: seg mask的可视化图\n \"\"\"\n img = self.imgs[index]\n img = Image.open(img).convert('RGB')\n if self.img_transform is not None:\n img = self.img_transform(img)\n\n label = self.labels[index]\n label = Image.open(label).convert('L')\n \n if self.load_raw_label:\n original_label = TO_TENSOR(label)\n \n if self.label_transform is not None:\n label = self.label_transform(label)\n\n label_vis = -1 # unified interface\n \n if self.load_raw_label:\n return img, original_label, label, label_vis\n else:\n return img, label, label_vis\n \n def __getitem__(self, idx):\n index = self.indices[idx]\n\n img, label, label_vis = self.load_single_image(index)\n \n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img = TF.hflip(img)\n label = TF.hflip(label)\n \n return img, label, label_vis " }, { "identifier": "FFHQ_MASK_CONVERT_TF", "path": "datasets/dataset.py", "snippet": "FFHQ_MASK_CONVERT_TF = transforms.Lambda(\n lambda mask: __ffhq_masks_to_faceParser_mask(mask))" }, { "identifier": "MASK_CONVERT_TF_DETAILED", "path": "datasets/dataset.py", "snippet": "MASK_CONVERT_TF_DETAILED = transforms.Lambda(\n lambda celebAHQ_mask: __celebAHQ_masks_to_faceParser_mask_detailed(celebAHQ_mask))" }, { "identifier": "FFHQ_MASK_CONVERT_TF_DETAILED", "path": "datasets/dataset.py", "snippet": "FFHQ_MASK_CONVERT_TF_DETAILED = transforms.Lambda(\n lambda mask: __ffhq_masks_to_faceParser_mask_detailed(mask))" }, { "identifier": "WNormLoss", "path": "criteria/w_norm.py", "snippet": "class WNormLoss(nn.Module):\n\n\tdef __init__(self, start_from_latent_avg=True):\n\t\tsuper(WNormLoss, self).__init__()\n\t\tself.start_from_latent_avg = start_from_latent_avg\n\n\tdef forward(self, latent, latent_avg=None):\n\t\tif self.start_from_latent_avg:\n\t\t\tlatent = latent - latent_avg\n\t\treturn torch.sum(latent.norm(2, dim=(2, 3))) / (latent.shape[0]*latent.shape[1])" }, { "identifier": "IDLoss", "path": "criteria/id_loss.py", "snippet": "class IDLoss(nn.Module):\n def __init__(self,opts):\n super(IDLoss, self).__init__()\n print('Loading ResNet ArcFace')\n self.opts = opts \n \n self.face_pool_1 = torch.nn.AdaptiveAvgPool2d((256, 256))\n self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se')\n self.facenet.load_state_dict(torch.load(opts.ir_se50_path))\n self.face_pool_2 = torch.nn.AdaptiveAvgPool2d((112, 112))\n self.facenet.eval()\n \n self.set_requires_grad(False)\n \n def set_requires_grad(self, flag=True):\n for p in self.parameters():\n p.requires_grad = flag\n \n def extract_feats(self, x):\n x = self.face_pool_1(x) if x.shape[2]!=256 else x # (1) resize to 256 if needed\n x = x[:, :, 35:223, 32:220] # (2) Crop interesting region\n x = self.face_pool_2(x) # (3) resize to 112 to fit pre-trained model\n x_feats = self.facenet(x, multi_scale=self.opts.id_loss_multiscale)\n return x_feats\n\n def forward(self, y_hat, y):\n n_samples = y.shape[0]\n y_feats_ms = self.extract_feats(y) # Otherwise use the feature from there\n y_hat_feats_ms = self.extract_feats(y_hat)\n y_feats_ms = [y_f.detach() for y_f in y_feats_ms] # 各个层的特征\n \n loss_all = 0\n sim_improvement_all = 0\n # 不同尺度\n for y_hat_feats, y_feats in zip(y_hat_feats_ms, y_feats_ms):\n \n loss = 0\n sim_improvement = 0\n count = 0\n # 不同的sample\n for i in range(n_samples):\n sim_target = y_hat_feats[i].dot(y_feats[i])\n sim_views = y_feats[i].dot(y_feats[i])\n \n loss += 1 - sim_target # id loss\n sim_improvement += float(sim_target) - float(sim_views)\n count += 1\n \n loss_all += loss / count\n sim_improvement_all += sim_improvement / count\n \n return loss_all, sim_improvement_all, None" }, { "identifier": "FaceParsingLoss", "path": "criteria/face_parsing/face_parsing_loss.py", "snippet": "class FaceParsingLoss(nn.Module):\n def __init__(self,opts):\n super(FaceParsingLoss, self).__init__()\n print('Loading Face Parsing Net')\n \n self.opts = opts\n self.face_pool = torch.nn.AdaptiveAvgPool2d((512, 512))\n \n self.G = unet()\n self.G.load_state_dict(torch.load(opts.face_parsing_model_path))\n self.G.eval()\n \n self.set_requires_grad(False)\n \n def set_requires_grad(self, flag=True):\n for p in self.parameters():\n p.requires_grad = flag\n \n\n def inference(self, x):\n x = self.face_pool(x) if x.shape[2]!=512 else x # resize to 512 if needed\n labels_predict = self.G(x)\n \n labels_predict_plain = generate_label_plain(labels_predict,imsize=512) # np.array [N,H,W]\n labels_predict_color = generate_label(labels_predict,imsize=512) # torch.Tensor [N,3,H,W]\n \n return labels_predict_plain, labels_predict_color\n \n def extract_feats(self, x):\n x = self.face_pool(x) if x.shape[2]!=512 else x # resize to 512 if needed\n x_feats = self.G.extract_feats(x)\n return x_feats\n\n def forward(self, y_hat, y):\n n_samples = y.shape[0]\n y_feats_ms = self.extract_feats(y) # Otherwise use the feature from there\n y_hat_feats_ms = self.extract_feats(y_hat)\n y_feats_ms = [y_f.detach() for y_f in y_feats_ms] # 各个层的特征\n \n loss_all = 0\n sim_improvement_all = 0\n # 不同尺度\n for y_hat_feats, y_feats in zip(y_hat_feats_ms, y_feats_ms):\n loss = 0\n sim_improvement = 0\n count = 0\n # 不同的sample\n for i in range(n_samples):\n sim_target = y_hat_feats[i].dot(y_feats[i])\n sim_views = y_feats[i].dot(y_feats[i])\n \n loss += 1 - sim_target # id loss\n sim_improvement += float(sim_target) - float(sim_views)\n count += 1\n \n loss_all += loss / count\n sim_improvement_all += sim_improvement / count\n \n return loss_all, sim_improvement_all" }, { "identifier": "LPIPS", "path": "criteria/lpips/lpips.py", "snippet": "class LPIPS(nn.Module):\n r\"\"\"Creates a criterion that measures\n Learned Perceptual Image Patch Similarity (LPIPS).\n Arguments:\n net_type (str): the network type to compare the features:\n 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.\n version (str): the version of LPIPS. Default: 0.1.\n \"\"\"\n def __init__(self, net_type: str = 'alex', version: str = '0.1'):\n\n assert version in ['0.1'], 'v0.1 is only supported now'\n\n super(LPIPS, self).__init__()\n\n # pretrained network\n self.net = get_network(net_type)\n\n # linear layers\n self.lin = LinLayers(self.net.n_channels_list)\n self.lin.load_state_dict(get_state_dict(net_type, version))\n\n def forward(self, x: torch.Tensor, y: torch.Tensor):\n feat_x, feat_y = self.net(x), self.net(y)\n\n diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]\n res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]\n\n return torch.sum(torch.cat(res, 0)) / x.shape[0]" }, { "identifier": "AdvDLoss", "path": "criteria/adv_loss.py", "snippet": "class AdvDLoss(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper(AdvDLoss, self).__init__()\n\n\tdef forward(self, real_pred, fake_pred):\n\t\treal_loss = F.softplus(-real_pred)\n\t\tfake_loss = F.softplus(fake_pred)\n\t\treturn real_loss.mean() + fake_loss.mean()" }, { "identifier": "AdvGLoss", "path": "criteria/adv_loss.py", "snippet": "class AdvGLoss(nn.Module):\n\n\tdef __init__(self):\n\t\tsuper(AdvGLoss, self).__init__()\n\n\tdef forward(self, fake_pred):\n\t\tloss = F.softplus(-fake_pred).mean()\n\t\treturn loss" }, { "identifier": "DR1Loss", "path": "criteria/adv_loss.py", "snippet": "class DR1Loss(nn.Module):\n def __init__(self):\n super(DR1Loss, self).__init__()\n\n def forward(self,real_pred, real_img):\n with conv2d_gradfix.no_weight_gradients():\n grad_real, = autograd.grad(\n outputs=real_pred.sum(), inputs=real_img, create_graph=True\n )\n grad_penalty = grad_real.pow(2).reshape(grad_real.shape[0], -1).sum(1).mean()\n\n return grad_penalty" }, { "identifier": "GPathRegularizer", "path": "criteria/adv_loss.py", "snippet": "class GPathRegularizer(nn.Module):\n def __init__(self):\n super(GPathRegularizer, self).__init__()\n \n def forward(self, fake_img, latents, mean_path_length, decay=0.01):\n noise = torch.randn_like(fake_img) / math.sqrt(\n fake_img.shape[2] * fake_img.shape[3]\n )\n grad, = autograd.grad(\n outputs=(fake_img * noise).sum(), inputs=latents, create_graph=True\n )\n path_lengths = torch.sqrt(grad.pow(2).sum(2).mean(1))\n\n path_mean = mean_path_length + decay * (path_lengths.mean() - mean_path_length)\n\n path_penalty = (path_lengths - path_mean).pow(2).mean()\n\n return path_penalty, path_mean.detach(), path_lengths" }, { "identifier": "StyleLoss", "path": "criteria/style_loss.py", "snippet": "class StyleLoss(nn.Module):\n def __init__(self, VGG16_ACTIVATIONS_LIST=[21], normalize=False, distance=\"l2\", in_size=256):\n\n super(StyleLoss, self).__init__()\n\n self.vgg16_act = VGG16_Activations(VGG16_ACTIVATIONS_LIST)\n self.vgg16_act.eval()\n\n ## ===== 修改 =====\n self.in_size = in_size\n # self.upsample2d = nn.Upsample(scale_factor=256 / in_size, mode=\"bilinear\", align_corners=True)\n ## ================\n \n self.normalize = normalize\n self.distance = distance\n\n def normalize_img(self, x):\n \"\"\"\n 将x的范围变到 适配 VGG 输入范围 \n \n https://pytorch.org/vision/stable/models.html\n \n x: [bs,3,H,W] 假设范围是 [-1,1]\n \"\"\"\n x = (x + 1) / 2\n \n mean = torch.from_numpy(VGG_MEAN).view(1,3,1,1).to(x.device)\n std = torch.from_numpy(VGG_STD).view(1,3,1,1).to(x.device)\n \n x = (x - mean) / std\n \n return x\n \n def forward(self, x, x_hat, mask_x=None, mask_x_hat=None):\n # x = x.cuda()\n # x_hat = x_hat.cuda()\n # resize images to 256px resolution\n \n N, C, H, W = x.shape\n \n # x = self.upsample2d(x)\n # x_hat = self.upsample2d(x_hat)\n \n x = F.interpolate(x, size=(256,256), mode=\"bilinear\")\n x_hat = F.interpolate(x_hat, size=(256,256), mode=\"bilinear\")\n\n if self.normalize:\n x = self.normalize_img(x)\n x_hat = self.normalize_img(x_hat)\n \n loss = self.cal_style(self.vgg16_act, x, x_hat, mask_x=mask_x, mask_x_hat=mask_x_hat)\n\n return loss\n\n def cal_style(self, model, x, x_hat, mask_x=None, mask_x_hat=None):\n # Get features from the model for x and x_hat\n \n # with torch.no_grad():\n # act_x = self.get_features(model, x)\n # for layer in range(0, len(act_x)):\n # act_x[layer].detach_()\n \n # mask 图片\n if mask_x is not None:\n assert mask_x_hat is not None, \"mask_x_hat 必须存在!\"\n H, W = x.size(2), x.size(3)\n mask_x = F.interpolate(mask_x, size=(H,W),mode=\"bilinear\")\n x = x * mask_x\n \n mask_x_hat = F.interpolate(mask_x_hat, size=(H,W),mode=\"bilinear\")\n x_hat = x_hat * mask_x_hat\n\n act_x = self.get_features(model, x)\n act_x_hat = self.get_features(model, x_hat)\n\n loss = 0.0\n for layer in range(0, len(act_x)):\n # # mask features if present\n # if mask_x is not None:\n # feat_x = self.mask_features(act_x[layer], mask_x)\n # else:\n # feat_x = act_x[layer]\n \n # if mask_x_hat is not None:\n # feat_x_hat = self.mask_features(act_x_hat[layer], mask_x_hat)\n # else:\n # feat_x_hat = act_x_hat[layer]\n \n feat_x = act_x[layer]\n feat_x_hat = act_x_hat[layer]\n\n \"\"\" 可视化 feature maps\n import ipdb; ipdb.set_trace()\n fx = feat_x[0, ...].detach().cpu().numpy()\n fx = (fx - fx.min()) / (fx.max() - fx.min())\n fx = fx * 255.\n fxhat = feat_x_hat[0, ...].detach().cpu().numpy()\n fxhat = (fxhat - fxhat.min()) / (fxhat.max() - fxhat.min())\n fxhat = fxhat * 255\n from PIL import Image\n import numpy as np\n for idx, img in enumerate(fx):\n img = fx[idx, ...]\n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n img.save('plot/feat_x/{}.png'.format(str(idx)))\n img = fxhat[idx, ...]\n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n img.save('plot/feat_x_hat/{}.png'.format(str(idx)))\n import ipdb; ipdb.set_trace()\n \"\"\"\n\n # compute Gram matrix for x and x_hat\n G_x = self.gram_matrix(feat_x)\n G_x_hat = self.gram_matrix(feat_x_hat)\n\n # compute layer wise loss and aggregate\n loss += custom_loss(\n G_x, G_x_hat, mask=None, loss_type=self.distance, include_bkgd=True\n )\n\n loss = loss / len(act_x)\n\n return loss\n\n def get_features(self, model, x):\n\n return model(x)\n\n def mask_features(self, x, mask):\n\n mask = prepare_mask(x, mask)\n return x * mask\n\n def gram_matrix(self, x):\n \"\"\"\n :x is an activation tensor\n \"\"\"\n N, C, H, W = x.shape\n x = x.view(N * C, H * W)\n G = torch.mm(x, x.t())\n\n return G.div(N * H * W * C)" }, { "identifier": "Ranger", "path": "training/ranger.py", "snippet": "class Ranger(Optimizer):\n\n\tdef __init__(self, params, lr=1e-3, # lr\n\t\t\t\t alpha=0.5, k=6, N_sma_threshhold=5, # Ranger options\n\t\t\t\t betas=(.95, 0.999), eps=1e-5, weight_decay=0, # Adam options\n\t\t\t\t use_gc=True, gc_conv_only=False\n\t\t\t\t # Gradient centralization on or off, applied to conv layers only or conv + fc layers\n\t\t\t\t ):\n\n\t\t# parameter checks\n\t\tif not 0.0 <= alpha <= 1.0:\n\t\t\traise ValueError(f'Invalid slow update rate: {alpha}')\n\t\tif not 1 <= k:\n\t\t\traise ValueError(f'Invalid lookahead steps: {k}')\n\t\tif not lr > 0:\n\t\t\traise ValueError(f'Invalid Learning Rate: {lr}')\n\t\tif not eps > 0:\n\t\t\traise ValueError(f'Invalid eps: {eps}')\n\n\t\t# parameter comments:\n\t\t# beta1 (momentum) of .95 seems to work better than .90...\n\t\t# N_sma_threshold of 5 seems better in testing than 4.\n\t\t# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.\n\n\t\t# prep defaults and init torch.optim base\n\t\tdefaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold,\n\t\t\t\t\t\teps=eps, weight_decay=weight_decay)\n\t\tsuper().__init__(params, defaults)\n\n\t\t# adjustable threshold\n\t\tself.N_sma_threshhold = N_sma_threshhold\n\n\t\t# look ahead params\n\n\t\tself.alpha = alpha\n\t\tself.k = k\n\n\t\t# radam buffer for state\n\t\tself.radam_buffer = [[None, None, None] for ind in range(10)]\n\n\t\t# gc on or off\n\t\tself.use_gc = use_gc\n\n\t\t# level of gradient centralization\n\t\tself.gc_gradient_threshold = 3 if gc_conv_only else 1\n\n\tdef __setstate__(self, state):\n\t\tsuper(Ranger, self).__setstate__(state)\n\n\tdef step(self, closure=None):\n\t\tloss = None\n\n\t\t# Evaluate averages and grad, update param tensors\n\t\tfor group in self.param_groups:\n\n\t\t\tfor p in group['params']:\n\t\t\t\tif p.grad is None:\n\t\t\t\t\tcontinue\n\t\t\t\tgrad = p.grad.data.float()\n\n\t\t\t\tif grad.is_sparse:\n\t\t\t\t\traise RuntimeError('Ranger optimizer does not support sparse gradients')\n\n\t\t\t\tp_data_fp32 = p.data.float()\n\n\t\t\t\tstate = self.state[p] # get state dict for this param\n\n\t\t\t\tif len(state) == 0: # if first time to run...init dictionary with our desired entries\n\t\t\t\t\t# if self.first_run_check==0:\n\t\t\t\t\t# self.first_run_check=1\n\t\t\t\t\t# print(\"Initializing slow buffer...should not see this at load from saved model!\")\n\t\t\t\t\tstate['step'] = 0\n\t\t\t\t\tstate['exp_avg'] = torch.zeros_like(p_data_fp32)\n\t\t\t\t\tstate['exp_avg_sq'] = torch.zeros_like(p_data_fp32)\n\n\t\t\t\t\t# look ahead weight storage now in state dict\n\t\t\t\t\tstate['slow_buffer'] = torch.empty_like(p.data)\n\t\t\t\t\tstate['slow_buffer'].copy_(p.data)\n\n\t\t\t\telse:\n\t\t\t\t\tstate['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)\n\t\t\t\t\tstate['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)\n\n\t\t\t\t# begin computations\n\t\t\t\texp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']\n\t\t\t\tbeta1, beta2 = group['betas']\n\n\t\t\t\t# GC operation for Conv layers and FC layers\n\t\t\t\tif grad.dim() > self.gc_gradient_threshold:\n\t\t\t\t\tgrad.add_(-grad.mean(dim=tuple(range(1, grad.dim())), keepdim=True))\n\n\t\t\t\tstate['step'] += 1\n\n\t\t\t\t# compute variance mov avg\n\t\t\t\texp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)\n\t\t\t\t# compute mean moving avg\n\t\t\t\texp_avg.mul_(beta1).add_(1 - beta1, grad)\n\n\t\t\t\tbuffered = self.radam_buffer[int(state['step'] % 10)]\n\n\t\t\t\tif state['step'] == buffered[0]:\n\t\t\t\t\tN_sma, step_size = buffered[1], buffered[2]\n\t\t\t\telse:\n\t\t\t\t\tbuffered[0] = state['step']\n\t\t\t\t\tbeta2_t = beta2 ** state['step']\n\t\t\t\t\tN_sma_max = 2 / (1 - beta2) - 1\n\t\t\t\t\tN_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)\n\t\t\t\t\tbuffered[1] = N_sma\n\t\t\t\t\tif N_sma > self.N_sma_threshhold:\n\t\t\t\t\t\tstep_size = math.sqrt(\n\t\t\t\t\t\t\t(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (\n\t\t\t\t\t\t\t\t\t\tN_sma_max - 2)) / (1 - beta1 ** state['step'])\n\t\t\t\t\telse:\n\t\t\t\t\t\tstep_size = 1.0 / (1 - beta1 ** state['step'])\n\t\t\t\t\tbuffered[2] = step_size\n\n\t\t\t\tif group['weight_decay'] != 0:\n\t\t\t\t\tp_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)\n\n\t\t\t\t# apply lr\n\t\t\t\tif N_sma > self.N_sma_threshhold:\n\t\t\t\t\tdenom = exp_avg_sq.sqrt().add_(group['eps'])\n\t\t\t\t\tp_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)\n\t\t\t\telse:\n\t\t\t\t\tp_data_fp32.add_(-step_size * group['lr'], exp_avg)\n\n\t\t\t\tp.data.copy_(p_data_fp32)\n\n\t\t\t\t# integrated look ahead...\n\t\t\t\t# we do it at the param level instead of group level\n\t\t\t\tif state['step'] % group['k'] == 0:\n\t\t\t\t\tslow_p = state['slow_buffer'] # get access to slow param tensor\n\t\t\t\t\tslow_p.add_(self.alpha, p.data - slow_p) # (fast weights - slow weights) * alpha\n\t\t\t\t\tp.data.copy_(slow_p) # copy interpolated weights to RAdam param tensor\n\n\t\treturn loss" }, { "identifier": "Net", "path": "models/networks.py", "snippet": "class LocalMLP(nn.Module):\nclass Net3(nn.Module):\n def __init__(self, dim_component=512, dim_style=512, num_w_layers=18,latent_squeeze_ratio=1):\n def forward(self, x):\n def __init__(self,opts,):\n def forward(self, img,mask, resize=False, randomize_noise=True,return_latents=False):\n def get_style(self, img, mask):\n def get_style_vectors(self, img, mask):\n def cal_style_codes(self,style_vectors):\n def gen_img(self, struc_codes, style_codes, mask, randomize_noise=True, noise=None, return_latents=False):" }, { "identifier": "Generator", "path": "models/stylegan2/model.py", "snippet": "class Generator(nn.Module):\n def __init__(\n self,\n size,\n style_dim,\n n_mlp,\n channel_multiplier=2,\n blur_kernel=[1, 3, 3, 1],\n lr_mlp=0.01,\n split_layer_idx = 7, \n remaining_layer_idx = 18, \n ):\n super().__init__()\n self.split_layer_idx = split_layer_idx\n self.remaining_layer_idx = remaining_layer_idx\n self.size = size\n\n self.style_dim = style_dim\n\n layers = [PixelNorm()]\n\n for i in range(n_mlp):\n layers.append(\n EqualLinear(\n style_dim, style_dim, lr_mul=lr_mlp, activation=\"fused_lrelu\"\n )\n )\n\n self.style = nn.Sequential(*layers)\n\n self.channels = {\n 4: 512,\n 8: 512,\n 16: 512,\n 32: 512,\n 64: 256 * channel_multiplier,\n 128: 128 * channel_multiplier,\n 256: 64 * channel_multiplier,\n 512: 32 * channel_multiplier,\n 1024: 16 * channel_multiplier,\n }\n\n self.input = ConstantInput(self.channels[4])\n self.conv1 = StyledConv(\n self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel,\n mask_op = True\n )\n self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False,\n mask_op = True\n )\n\n self.log_size = int(math.log(size, 2))\n self.num_layers = (self.log_size - 2) * 2 + 1\n\n self.convs = nn.ModuleList()\n self.upsamples = nn.ModuleList()\n self.to_rgbs = nn.ModuleList()\n self.noises = nn.Module()\n\n in_channel = self.channels[4]\n\n for layer_idx in range(self.num_layers):\n res = (layer_idx + 5) // 2\n shape = [1, 1, 2 ** res, 2 ** res]\n self.noises.register_buffer(\n f\"noise_{layer_idx}\", torch.randn(*shape))\n\n for i in range(3, self.log_size + 1):\n out_channel = self.channels[2 ** i]\n\n self.convs.append(\n StyledConv(\n in_channel,\n out_channel,\n 3,\n style_dim,\n upsample=True,\n blur_kernel=blur_kernel,\n mask_op= False if i > (2+self.remaining_layer_idx//2) else True,\n # mask_op = True\n )\n )\n\n self.convs.append(\n StyledConv(\n out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel,\n mask_op= False if i > (2+self.remaining_layer_idx//2) else True,\n # mask_op = True\n )\n )\n\n self.to_rgbs.append(\n ToRGB(\n out_channel, style_dim, \n mask_op= False if self.remaining_layer_idx != 17 and i >= (2+self.remaining_layer_idx//2) else True, # 这里一定是大于等于\n # mask_op = True\n )\n )\n\n in_channel = out_channel\n\n self.n_latent = self.log_size * 2 - 2\n\n def make_noise(self):\n device = self.input.input.device\n\n noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]\n\n for i in range(3, self.log_size + 1):\n for _ in range(2):\n noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))\n\n return noises\n\n def mean_latent(self, n_latent):\n latent_in = torch.randn(\n n_latent, self.style_dim, device=self.input.input.device\n )\n latent = self.style(latent_in).mean(0, keepdim=True)\n\n return latent\n\n def get_latent(self, input):\n return self.style(input)\n\n def forward(\n self,\n styles,\n structure_feats, # 第7层输出的残差\n mask,\n return_latents=False,\n inject_index=None,\n truncation=1,\n truncation_latent=None,\n input_is_latent=False, # 输入是否是W空间的latetnt code\n noise=None,\n randomize_noise=True,\n use_structure_code=False,\n ):\n if not input_is_latent:\n styles = [self.style(s) for s in styles] # 两个随机的z得到的对应的两组styles\n\n if noise is None:\n if randomize_noise:\n noise = [None] * self.num_layers\n else:\n noise = [\n getattr(self.noises, f\"noise_{i}\") for i in range(self.num_layers)\n ]\n\n if truncation < 1:\n style_t = []\n\n for style in styles:\n style_t.append(\n truncation_latent + truncation * (style - truncation_latent)\n )\n\n styles = style_t\n\n if len(styles) < 2:\n inject_index = self.n_latent\n\n if styles[0].ndim < 4:\n latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)\n\n else:\n latent = styles[0]\n\n else:\n if inject_index is None: # 选择出两组style交叉交换的位置, TODO 还没改成多个compnent的\n inject_index = random.randint(1, self.n_latent - 1)\n\n latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)\n latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)\n\n latent = torch.cat([latent, latent2], 1) # 交叉后style\n\n # constant层,其实这里没必要传入latent code,只是为了获取batch size\n out = self.input(latent)\n out = self.conv1(out, latent[:, :, 0], mask, noise=noise[0])\n skip = self.to_rgb1(out, latent[:, :, 1], mask) # 重复使用 latent?\n\n i = 1\n for conv1, conv2, noise1, noise2, to_rgb in zip(\n self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs\n ):\n \n if i<self.remaining_layer_idx: \n out = conv1(out, latent[:, :, i], mask, noise=noise1)\n \n if i+2 == self.split_layer_idx:\n if use_structure_code:\n # skip = torch.zeros_like(skip)\n out = structure_feats # 第5层的特征\n \n intermediate_feats = out\n \n out = conv2(out, latent[:, :, i + 1], mask, noise=noise2)\n if self.remaining_layer_idx == 17 or i+2 != self.remaining_layer_idx: \n skip = to_rgb(out, latent[:, :, i + 2], mask, skip)\n else:\n skip = to_rgb(out, latent[:, 0, i + 2], mask, skip) \n else:\n out = conv1(out, latent[:, 0, i], mask, noise=noise1)\n out = conv2(out, latent[:, 0, i + 1], mask, noise=noise2)\n skip = to_rgb(out, latent[:, 0, i + 2], mask, skip)\n\n i += 2\n\n image = skip\n\n if return_latents:\n return image, latent,intermediate_feats\n\n else:\n return image, None,intermediate_feats" }, { "identifier": "Discriminator", "path": "models/stylegan2/model.py", "snippet": "class Discriminator(nn.Module):\n def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):\n super().__init__()\n\n channels = {\n 4: 512,\n 8: 512,\n 16: 512,\n 32: 512,\n 64: 256 * channel_multiplier,\n 128: 128 * channel_multiplier,\n 256: 64 * channel_multiplier,\n 512: 32 * channel_multiplier,\n 1024: 16 * channel_multiplier,\n }\n\n convs = [ConvLayer(3, channels[size], 1)]\n\n log_size = int(math.log(size, 2))\n\n in_channel = channels[size]\n\n for i in range(log_size, 2, -1):\n out_channel = channels[2 ** (i - 1)]\n\n convs.append(ResBlock(in_channel, out_channel, blur_kernel))\n\n in_channel = out_channel\n\n self.convs = nn.Sequential(*convs)\n\n self.stddev_group = 4\n self.stddev_feat = 1\n\n self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)\n self.final_linear = nn.Sequential(\n EqualLinear(channels[4] * 4 * 4, channels[4],\n activation=\"fused_lrelu\"),\n EqualLinear(channels[4], 1),\n )\n\n def forward(self, input):\n out = self.convs(input)\n\n batch, channel, height, width = out.shape\n group = min(batch, self.stddev_group)\n stddev = out.view(\n group, -1, self.stddev_feat, channel // self.stddev_feat, height, width\n )\n stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)\n stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)\n stddev = stddev.repeat(group, 1, height, width)\n out = torch.cat([out, stddev], 1)\n\n out = self.final_conv(out)\n\n out = out.view(batch, -1)\n out = self.final_linear(out)\n\n return out" } ]
from utils import torch_utils from datasets.dataset import CelebAHQDataset, get_transforms, TO_TENSOR, NORMALIZE, MASK_CONVERT_TF, FFHQDataset, FFHQ_MASK_CONVERT_TF, MASK_CONVERT_TF_DETAILED, FFHQ_MASK_CONVERT_TF_DETAILED from criteria.w_norm import WNormLoss from criteria.id_loss import IDLoss from criteria.face_parsing.face_parsing_loss import FaceParsingLoss from criteria.lpips.lpips import LPIPS from criteria.adv_loss import AdvDLoss,AdvGLoss,DR1Loss,GPathRegularizer from criteria.style_loss import StyleLoss from training.ranger import Ranger from models.networks import Net, Net2, Net3, NetStage2,MultiScaleNet from tensorboardX import SummaryWriter from torch.utils.data import DataLoader from torch import nn from models.stylegan2.model import Generator,Discriminator from collections import OrderedDict from models.encoder_with_optim import EncoderPlusOptimNet import torchvision.transforms as transforms import torch.nn.functional as F import torch import os import matplotlib import matplotlib.pyplot as plt import torch.distributed as dist import math
11,811
matplotlib.use('Agg') # torch.autograd.set_detect_anomaly(True) ACCUM = 0.5 ** (32 / (100 * 1000)) # 0.9977843871238888 class Coach: def __init__(self, opts): self.opts = opts self.global_step = 0 # 分布式训练 if self.opts.dist_train: self.num_gpus = torch.cuda.device_count() self.rank = int(os.environ["RANK"]) self.world_size = int(os.environ["WORLD_SIZE"]) self.local_rank = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(self.rank % self.num_gpus) dist.init_process_group( backend='nccl', world_size=self.world_size, rank=self.rank, ) self.device = torch.device("cuda", self.local_rank) else: self.rank=0 # dummy rank self.device = torch.device("cuda", 0) self.opts.device=self.device # ==== Initialize network ==== self.net = Net3(self.opts) # print(self.device) self.net = nn.SyncBatchNorm.convert_sync_batchnorm(self.net) self.net = self.net.to(self.device) self.net_ema = Net3(self.opts).to(self.device).eval() torch_utils.accumulate(self.net_ema,self.net, 0) if self.opts.train_D:
matplotlib.use('Agg') # torch.autograd.set_detect_anomaly(True) ACCUM = 0.5 ** (32 / (100 * 1000)) # 0.9977843871238888 class Coach: def __init__(self, opts): self.opts = opts self.global_step = 0 # 分布式训练 if self.opts.dist_train: self.num_gpus = torch.cuda.device_count() self.rank = int(os.environ["RANK"]) self.world_size = int(os.environ["WORLD_SIZE"]) self.local_rank = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(self.rank % self.num_gpus) dist.init_process_group( backend='nccl', world_size=self.world_size, rank=self.rank, ) self.device = torch.device("cuda", self.local_rank) else: self.rank=0 # dummy rank self.device = torch.device("cuda", 0) self.opts.device=self.device # ==== Initialize network ==== self.net = Net3(self.opts) # print(self.device) self.net = nn.SyncBatchNorm.convert_sync_batchnorm(self.net) self.net = self.net.to(self.device) self.net_ema = Net3(self.opts).to(self.device).eval() torch_utils.accumulate(self.net_ema,self.net, 0) if self.opts.train_D:
self.D = Discriminator(self.opts.out_size).to(self.device).eval()
22
2023-10-15 12:15:01+00:00
16k
sotopia-lab/sotopia
examples/fix_missing_episodes.py
[ { "identifier": "LLMAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n script_like: bool = False,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.script_like = script_like\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n assert (\n len(self.inbox) > 0\n ), \"attribute goal has to be called after at least one step\"\n goal = generate_goal(\n self.model_name,\n background=self.inbox[0][\n 1\n ].to_natural_language(), # Only consider the first message for now\n )\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action = gen_func(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n )\n return action\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action, prompt = await agenerate_action(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n script_like=self.script_like,\n )\n return action" }, { "identifier": "EnvAgentComboStorage", "path": "sotopia/database/env_agent_combo_storage.py", "snippet": "class EnvAgentComboStorage(JsonModel):\n env_id: str = Field(default_factory=lambda: \"\", index=True)\n agent_ids: list[str] = Field(default_factory=lambda: [], index=True)" }, { "identifier": "EpisodeLog", "path": "sotopia/database/logs.py", "snippet": "class EpisodeLog(JsonModel):\n # Note that we did not validate the following constraints:\n # 1. The number of turns in messages and rewards should be the same or off by 1\n # 2. The agents in the messages are the same as the agetns\n\n environment: str = Field(index=True)\n agents: list[str] = Field(index=True)\n tag: str | None = Field(index=True)\n models: list[str] | None = Field(index=True)\n messages: list[list[tuple[str, str, str]]] # Messages arranged by turn\n reasoning: str\n rewards: list[\n tuple[float, dict[str, float]] | float\n ] # Rewards arranged by turn\n rewards_prompt: str\n\n @root_validator\n def agent_number_message_number_reward_number_turn_number_match(\n cls, values: Any\n ) -> Any:\n agents, _, reasoning, rewards = (\n values.get(\"agents\"),\n values.get(\"messages\"),\n values.get(\"reasoning\"),\n values.get(\"rewards\"),\n )\n agent_number = len(agents)\n\n assert (\n len(rewards) == agent_number\n ), f\"Number of agents in rewards {len(rewards)} and agents {agent_number} do not match\"\n return values\n\n def render_for_humans(self) -> tuple[list[AgentProfile], list[str]]:\n \"\"\"Generate a human readable version of the episode log.\n\n Returns:\n A tuple of (a list of agent_profiles, a list of str): The agent profiles, and the messages and rewards in each turn.\n \"\"\"\n\n agent_profiles = [\n AgentProfile.get(pk=uuid_str) for uuid_str in self.agents\n ]\n messages_and_rewards = []\n for idx, turn in enumerate(self.messages):\n messages_in_this_turn = []\n if idx == 0:\n assert (\n len(turn) >= 2\n ), \"The first turn should have at least environemnt messages\"\n messages_in_this_turn.append(turn[0][2])\n messages_in_this_turn.append(turn[1][2])\n for sender, receiver, message in turn:\n if receiver == \"Environment\":\n if sender != \"Environment\":\n if \"did nothing\" in message:\n continue\n else:\n if \"said:\" in message:\n messages_in_this_turn.append(\n f\"{sender} {message}\"\n )\n else:\n messages_in_this_turn.append(\n f\"{sender}: {message}\"\n )\n else:\n messages_in_this_turn.append(message)\n messages_and_rewards.append(\"\\n\".join(messages_in_this_turn))\n messages_and_rewards.append(f\"The reasoning is:\\n{self.reasoning}\")\n messages_and_rewards.append(\n f\"The rewards are:\\nAgent 1: {self.rewards[0]}\\nAgent 2: {self.rewards[1]}\"\n )\n return agent_profiles, messages_and_rewards" }, { "identifier": "AgentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")" }, { "identifier": "EnvironmentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class EnvironmentProfile(JsonModel):\n codename: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The codename of the environment\",\n )\n source: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The source of the environment\",\n )\n scenario: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"A concrete scenario of where the social interaction takes place, the scenario should have two agents (agent1 and agent2), and you should illustrate the relationship between the two agents, and for what purpose agent1 is interacting with agent2. Please avoid mentioning specific names and occupations in the scenario and keep all the mentions gender-neutral. Also avoid generating scenarios that requires childrend (below 18) or elderly (above 70) to be involved.\",\n )\n agent_goals: list[str] = Field(\n default_factory=lambda: [],\n description=\"The social goals of each agent, which could include <extra_info>...</extra_info>, <clarification_hint>...</clarification_hint>, and <strategy_hint>...</strategy_hint> to help the agent achieve the goal. Avoid providing too specific strategy hint, try to be as abstract as possible. For example, use 'you can provide financial benefits to achieve your goal' instead of 'you can buy him a boba tea to achieve your goal.'\",\n )\n relationship: RelationshipType = Field(\n index=True,\n default_factory=lambda: RelationshipType.stranger,\n description=\"The relationship between the two agents, choose from: stranger, know_by_name, acquaintance, friend, romantic_relationship, family_member. Do not make up a relationship, but choose from the list, 0 means stranger, 1 means know_by_name, 2 means acquaintance, 3 means friend, 4 means romantic_relationship, 5 means family_member\",\n )\n age_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The age constraint of the environment, a list of tuples, each tuple is a range of age, e.g., '[(18, 25), (30, 40)]' means the environment is only available to agent one between 18 and 25, and agent two between 30 and 40\",\n )\n occupation_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The occupation constraint of the environment, a list of lists, each list is a list of occupations, e.g., '[['student', 'teacher'], ['doctor', 'nurse']]' means the environment is only available to agent one if agent one is a student or a teacher, and agent two is a doctor or a nurse\",\n )\n agent_constraint: list[list[str]] | None = Field(\n default_factory=lambda: None,\n )" }, { "identifier": "ReachGoalLLMEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class ReachGoalLLMEvaluator(Evaluator):\n @beartype\n def __init__(\n self, model_name: LLM_Name, response_format: str = \"basic\"\n ) -> None:\n self.model_name = model_name\n self.prompt = \"\"\n self.response_format = response_format\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n raise NotImplementedError(\n \"ReachGoalLLMEvaluator is not implemented for synchronous evaluation\"\n )\n\n @gin.configurable\n @beartype\n async def __acall__(\n self,\n turn_number: int,\n messages: list[tuple[str, Message]] | None,\n history: str = \"\",\n temperature: float = 0.7,\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # filter did nothing\n if not history and messages:\n messages_filtered = [\n (x, y)\n for x, y in messages\n if \"did nothing\" not in y.to_natural_language()\n ]\n history = \"\\n\".join(\n [\n f\"{x} {y.to_natural_language()}\"\n if x != \"Environment\"\n else y.to_natural_language()\n for x, y in messages_filtered\n ]\n )\n response_format_class = (\n EnvResponsePlus if self.response_format == \"plus\" else EnvResponse\n )\n\n try:\n response: EnvResponsePlus | EnvResponse # fix type error from langchain 0.0.264. we don't need this line for langchain 0.0.263\n response, prompt = await agenerate(\n model_name=self.model_name,\n template=\"\"\"{history},\n Based on previous interactions, evaluate how well participants achieve their goals.\n Please following the format:\n {format_instructions}\n \"\"\",\n input_values=dict(history=history),\n output_parser=PydanticOutputParser[\n EnvResponsePlus | EnvResponse\n ](pydantic_object=response_format_class),\n temperature=temperature,\n )\n self.prompt = prompt\n response_list = []\n # TODO: multiple agents\n for dimension in response.agent_1_evaluation.dict().keys():\n response_list.append(\n (\n \"agent_1\",\n (\n (\n dimension,\n response.agent_1_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_1_evaluation.dict()[dimension][0],\n ),\n )\n )\n response_list.append(\n (\n \"agent_2\",\n (\n (\n dimension,\n response.agent_2_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_2_evaluation.dict()[dimension][0],\n ),\n )\n )\n return response_list\n except Exception as e:\n log.debug(f\"[red] Failed to generate environment response. {e}\")\n return []" }, { "identifier": "RuleBasedTerminatedEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class RuleBasedTerminatedEvaluator(Evaluator):\n def __init__(\n self, max_turn_number: int = 20, max_stale_turn: int = 2\n ) -> None:\n self.max_turn_number = max_turn_number\n self.max_stale_turn = max_stale_turn\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # Rule 1: If the conversation is too long, terminate the conversation\n conversation_too_long = turn_number > self.max_turn_number\n # Rule 2: If one of the players leaves, terminate the conversation\n p1_leaving = (\n len(messages) > 1\n and isinstance(messages[-2][1], AgentAction)\n and messages[-2][1].action_type == \"leave\"\n )\n p2_leaving = (\n bool(len(messages))\n and isinstance(messages[-1][1], AgentAction)\n and messages[-1][1].action_type == \"leave\"\n )\n # Rule 3: If the conversation is stale for too long, terminate the conversation\n stale_count = 0\n for message in messages[::-1]:\n if message[0] == \"Environment\":\n continue\n assert isinstance(message[1], AgentAction)\n if message[1].action_type == \"none\":\n stale_count += 1\n else:\n break\n if stale_count > self.max_stale_turn:\n break\n stale_too_long = stale_count > self.max_stale_turn\n terminated = (\n conversation_too_long or p1_leaving or p2_leaving or stale_too_long\n )\n reasons_for_termination = (\n f\"{'The conversation is too long; ' if conversation_too_long else ''}\"\n f\"{'Agent 1 is leaving; ' if p1_leaving else ''}\"\n f\"{'Agent 2 is leaving; ' if p2_leaving else ''}\"\n f\"{'The conversation stales for too long; ' if stale_too_long else ''}\"\n )\n return [\n (\n \"environment\",\n ((\"terminated\", terminated), reasons_for_termination),\n )\n ]\n\n async def __acall__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n return self(turn_number, messages)" }, { "identifier": "ParallelSotopiaEnv", "path": "sotopia/envs/parallel.py", "snippet": "class ParallelSotopiaEnv(\n ParallelEnv[str, Observation, AgentAction], MessengerMixin\n):\n def __init__(\n self,\n available_action_types: set[ActionType] = set(\n [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"]\n ),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"simutaneous\",\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n evaluators: list[Evaluator] = [],\n terminal_evaluators: list[Evaluator] = [],\n uuid_str: str | None = None,\n env_profile: EnvironmentProfile | None = None,\n ) -> None:\n \"\"\"A sotopia environment for parallel agents.\n\n Args:\n available_action_types (set[ActionType], optional): The action types that are available to the agents. Defaults to set([\"none\", \"speak\", \"non-verbal communication\", \"action\"]).\n action_order (Literal[\"simutaneous\", \"round-robin\", \"random\"], optional): The order in which the agents take actions. Defaults to \"simutaneous\".\n model_name (LLM_Name, optional): The name of the language model to use. Defaults to \"gpt-3.5-turbo\".\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.background = ScriptBackground(\n scenario=\"\",\n p1_background=\"\",\n p2_background=\"\",\n p1_goal=\"\",\n p2_goal=\"\",\n p1_name=\"\",\n p2_name=\"\",\n )\n\n self.agents = []\n self.action_spaces = {}\n self.available_action_types = list(available_action_types)\n self.action_order = action_order\n self.action_mask: list[bool] = []\n self.evaluators = evaluators\n self.terminal_evaluators = terminal_evaluators\n\n # if an environment profile is provided, use it\n assert (\n env_profile or uuid_str\n ), \"Either env_profile or uuid_str must be provided\"\n if env_profile is not None:\n self.profile = env_profile\n # if a uuid is provided, try to load the environment profile from the database\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = EnvironmentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n\n @configurable\n def reset(\n self,\n seed: int | None = None,\n options: dict[str, str] | None = None,\n agents: Agents | None = None,\n omniscient: bool = False,\n lite: bool = False,\n ) -> dict[str, Observation]:\n \"\"\"Starting a new episode. Must be called before step().\n\n Args:\n seed (int, optional): Seed for the environment. Defaults to None. Not used right now.\n options (dict, optional): Options for the environment. Defaults to None.\n \"partial_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound can be incompleted (\"unknown\" for missing parts), and the missing parts will be filled in by the environment.\n \"full_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound must be completed (no \"unknown\" for missing parts).\n omniscient (bool, optional): Whether the agents know the other agent's goal. Defaults to False.\n \"\"\"\n super().__init__()\n MessengerMixin.reset_inbox(self)\n assert (\n not options\n or not (\"partial_background_file\" in options)\n and not (\"full_background_file\" in options)\n ), \"partial_background_file and full_background_file are not supported anymore\"\n if agents is not None:\n assert agents, \"agents must be provided\"\n assert len(agents) == 2, \"Only supporting two agents right now\"\n agent_names = list(agents.keys())\n agent_goals = self.profile.agent_goals\n assert (\n len(agent_goals) == 2\n ), \"Only supporting two agents right now\"\n\n raw_background = ScriptBackground(\n scenario=self.profile.scenario,\n p1_background=get_bio(\n self.profile.relationship,\n agents[agent_names[0]].profile,\n agent_id=0,\n ),\n p2_background=get_bio(\n self.profile.relationship,\n agents[agent_names[1]].profile,\n agent_id=1,\n ),\n p1_goal=f\"<root viewer='agent_0'>{agent_goals[0]}</root>\",\n p2_goal=f\"<root viewer='agent_1'>{agent_goals[1]}</root>\",\n p1_name=agent_names[0],\n p2_name=agent_names[1],\n )\n\n if lite:\n raw_background.p1_background = \"\"\n raw_background.p2_background = \"\"\n\n self.background = ScriptBackground(\n scenario=render_text_for_environment(raw_background.scenario),\n p1_background=render_text_for_environment(\n raw_background.p1_background\n ),\n p2_background=render_text_for_environment(\n raw_background.p2_background\n ),\n p1_goal=render_text_for_environment(raw_background.p1_goal),\n p2_goal=render_text_for_environment(raw_background.p2_goal),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n else:\n raise ValueError(\"agents must be provided\")\n\n self.agents = [self.background.p1_name, self.background.p2_name]\n agent_backgrounds: list[ScriptBackground] = []\n if omniscient:\n for i in range(self.num_agents):\n agent_backgrounds.append(copy.deepcopy(self.background))\n else:\n for i in range(self.num_agents):\n agent_backgrounds.append(\n ScriptBackground(\n scenario=render_text_for_agent(\n raw_background.scenario, i\n ),\n p1_background=render_text_for_agent(\n raw_background.p1_background, i\n ),\n p2_background=render_text_for_agent(\n raw_background.p2_background, i\n ),\n p1_goal=render_text_for_agent(\n raw_background.p1_goal, i\n ),\n p2_goal=render_text_for_agent(\n raw_background.p2_goal, i\n ),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n )\n background_for_a = agent_backgrounds[0]\n background_for_b = agent_backgrounds[1]\n\n print(\"Is the agent omniscient?\", omniscient)\n if not omniscient:\n background_for_a.p2_goal = \"Unknown\"\n background_for_b.p1_goal = \"Unknown\"\n\n self.action_spaces = {\n agent: Dict(\n dict(\n action_type=Discrete(len(self.available_action_types)),\n argument=Text(256),\n )\n )\n for agent in self.agents\n }\n self.turn_number = 0\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[0] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n\n self.recv_message(\"Environment\", self.background)\n\n return {\n self.background.p1_name: Observation(\n last_turn=background_for_a.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=background_for_b.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n }\n\n @beartype\n def step(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *(\n evaluator(\n turn_number=self.turn_number, messages=self.inbox\n )\n for evaluator in self.evaluators\n )\n )\n )\n )\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n },\n )\n\n @beartype\n async def astep(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.evaluators\n ]\n )\n )\n )\n )\n\n if response.terminated:\n terminal_response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.terminal_evaluators\n ]\n )\n )\n )\n )\n # incorporate terminal response into response\n response.p1_rate = response.p1_rate or terminal_response.p1_rate\n response.p2_rate = response.p2_rate or terminal_response.p2_rate\n if response.comments and terminal_response.comments:\n response.comments += terminal_response.comments\n elif terminal_response.comments:\n response.comments = terminal_response.comments\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n info = {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n }\n if response.terminated:\n info[\"rewards_prompt\"] = {\"overall_prompt\": self.terminal_evaluators[0].prompt} # type: ignore\n\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n info,\n )\n\n def render(self, mode: str = \"human\") -> None:\n pass\n\n def close(self) -> None:\n pass" }, { "identifier": "LLM_Name", "path": "sotopia/generation_utils/generate.py", "snippet": "class EnvResponse(BaseModel):\nclass EnvResponsePydanticOutputParser(PydanticOutputParser[EnvResponse]):\nclass ListOfIntOutputParser(BaseOutputParser[list[int]]):\nclass ListOfStrOutputParser(BaseOutputParser[list[str]]):\nclass StrOutputParser(BaseOutputParser[str]):\nclass ScriptOutputParser(BaseOutputParser[ScriptInteractionReturnType]):\n def __init__(self, pydantic_object: Type[BaseModel] = EnvResponse) -> None:\n def parse(self, text: str) -> EnvResponse:\n def get_format_instructions(self) -> str:\n def __init__(\n self,\n number_of_int: int | None = None,\n range_of_int: tuple[int, int] | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[int]:\n def _type(self) -> str:\n def __init__(\n self,\n number_of_str: int | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[str]:\n def _type(self) -> str:\n def __init__(self) -> None:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> str:\n def _type(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> ScriptInteractionReturnType:\n def _type(self) -> str:\ndef _return_fixed_model_version(\n model_name: Literal[\"gpt-3.5-turbo\", \"gpt-4\", \"gpt-4-turbo\"]\n) -> str:\ndef obtain_chain(\n model_name: LLM_Name,\n template: str,\n input_variables: list[str],\n temperature: float = 0.7,\n max_retries: int = 6,\n) -> LLMChain:\ndef format_bad_output_for_script(\n ill_formed_output: str,\n format_instructions: str,\n agents: list[str],\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef format_bad_output(\n ill_formed_output: str,\n format_instructions: str,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef generate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> OutputType:\nasync def agenerate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> tuple[OutputType, str]:\ndef generate_episode(\n model_name: LLM_Name,\n participants: str = \"Jack (a greedy person), Rose\",\n topic: str = \"lawsuit\",\n extra_info: str = \"\",\n) -> EnvResponse:\nasync def agenerate_env_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n temperature: float = 0.7,\n) -> tuple[EnvironmentProfile, str]:\nasync def agenerate_relationship_profile(\n model_name: LLM_Name,\n agents_profiles: list[str],\n) -> tuple[RelationshipProfile, str]:\nasync def agenerate_enviroment_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n) -> tuple[EnvironmentProfile, str]:\ndef fill_in_background(\n model_name: LLM_Name,\n partial_background: ScriptBackground,\n) -> ScriptBackground:\ndef generate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\ndef generate_action_speak(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\nasync def agenerate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n temperature: float = 0.7,\n script_like: bool = False,\n) -> tuple[AgentAction, str]:\nasync def agenerate_script(\n model_name: LLM_Name,\n background: ScriptBackground,\n temperature: float = 0.7,\n agent_names: list[str] = [],\n agent_name: str = \"\",\n history: str = \"\",\n single_step: bool = False,\n) -> tuple[ScriptInteractionReturnType, str]:\ndef process_history(\n script: ScriptBackground | EnvResponse | dict[str, AgentAction]\n) -> str:\ndef generate_init_profile(\n model_name: LLM_Name, basic_info: dict[str, str]\n) -> str:\ndef convert_narratives(model_name: LLM_Name, narrative: str, text: str) -> str:\ndef generate_goal(model_name: LLM_Name, background: str) -> str:" }, { "identifier": "AgentAction", "path": "sotopia/messages/message_classes.py", "snippet": "class AgentAction(Message):\n action_type: ActionType = Field(\n description=\"whether to speak at this turn or choose to not do anything\"\n )\n argument: str = Field(\n description=\"the utterance if choose to speak, the expression or gesture if choose non-verbal communication, or the physical action if choose action\"\n )\n\n def to_natural_language(self) -> str:\n match self.action_type:\n case \"none\":\n return \"did nothing\"\n case \"speak\":\n return f'said: \"{self.argument}\"'\n case \"non-verbal communication\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"action\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"leave\":\n return \"left the conversation\"" }, { "identifier": "Observation", "path": "sotopia/messages/message_classes.py", "snippet": "class Observation(Message):\n last_turn: str = Field(description=\"the last turn of the conversation\")\n turn_number: int = Field(description=\"the turn number of the conversation\")\n available_actions: list[ActionType] = Field(\n description=\"the available actions\"\n )\n\n def to_natural_language(self) -> str:\n if self.turn_number == 0:\n return f\"\\n{self.last_turn}\\nConversation Starts:\\n\"\n else:\n return f\"Turn #{self.turn_number-1}: {self.last_turn}\\n\"" }, { "identifier": "BaseSampler", "path": "sotopia/samplers/base_sampler.py", "snippet": "class BaseSampler(Generic[ObsType, ActType]):\n def __init__(\n self,\n env_candidates: Sequence[EnvironmentProfile | str] | None = None,\n agent_candidates: Sequence[AgentProfile | str] | None = None,\n ) -> None:\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:" }, { "identifier": "run_async_server", "path": "sotopia/server.py", "snippet": "@gin.configurable\n@beartype\nasync def run_async_server(\n model_dict: dict[str, LLM_Name],\n sampler: BaseSampler[Observation, AgentAction] = BaseSampler(),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"round-robin\",\n env_agent_combo_list: list[EnvAgentCombo[Observation, AgentAction]] = [],\n omniscient: bool = False,\n script_like: bool = False,\n json_in_script: bool = False,\n tag: str | None = None,\n push_to_db: bool = False,\n using_async: bool = True,\n) -> list[list[tuple[str, str, Message]]]:\n \"\"\"\n Doc incomplete\n\n Args:\n omniscient (bool): Whether the agent knows the goal of the other, default to False\n script_like (bool): Whether we generate the turn in script like manner, default to False\n json_in_script (bool): Whether we requires the script generator to return json (Only valid when script_like is True), default to False\n\n Note: env_agent_combo_list is optional. When it defaults to [], sampler is used\n else the sampler is not used. Please pass in BaseSampler or simply not specify it when using this option.\n \"\"\"\n\n assert not (\n push_to_db and tag is None\n ), \"please provide a tag when push to db\"\n\n # Create Environment and agents\n # This step will be moved to outside this function\n\n env_params = {\n \"model_name\": model_dict[\"env\"],\n \"action_order\": action_order,\n \"evaluators\": [\n RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),\n ],\n \"terminal_evaluators\": [\n ReachGoalLLMEvaluator(model_dict[\"env\"]),\n ],\n }\n agents_model_dict = {\n \"agent1\": model_dict[\"agent1\"],\n \"agent2\": model_dict[\"agent2\"],\n }\n\n def get_agent_class(\n model_name: str,\n ) -> Type[BaseAgent[Observation, AgentAction]]:\n if model_name == \"human\":\n return HumanAgent\n elif script_like and not json_in_script:\n return ScriptWritingAgent\n else:\n return LLMAgent\n\n if env_agent_combo_list:\n assert (\n type(sampler) is BaseSampler\n ), \"No sampler should be used when `env_agent_combo_list` is empty\"\n env_agent_combo_iter = iter(env_agent_combo_list)\n else:\n env_agent_combo_iter = sampler.sample(\n agent_classes=[\n get_agent_class(model_name)\n for model_name in agents_model_dict.values()\n ],\n n_agent=len(agents_model_dict),\n env_params=env_params,\n agents_params=[\n {\"model_name\": model_name} if model_name != \"human\" else {}\n for model_name in agents_model_dict.values()\n ],\n )\n episode_futures = [\n arun_one_episode(\n env=env_agent_combo[0],\n agent_list=env_agent_combo[1],\n model_dict=model_dict,\n omniscient=omniscient,\n script_like=script_like,\n json_in_script=json_in_script,\n tag=tag,\n push_to_db=push_to_db,\n )\n for env_agent_combo in env_agent_combo_iter\n ]\n\n batch_results = (\n await tqdm_asyncio.gather(*episode_futures, desc=\"Running one batch\")\n if using_async\n else [await i for i in episode_futures]\n )\n\n return cast(list[list[tuple[str, str, Message]]], batch_results)" }, { "identifier": "parse_gin_flags", "path": "sotopia_conf/gin_utils.py", "snippet": "def parse_gin_flags(\n gin_search_paths: Sequence[str],\n gin_files: Sequence[str],\n gin_bindings: Sequence[str],\n skip_unknown: Union[bool, Sequence[str]] = False,\n finalize_config: bool = True,\n) -> None:\n \"\"\"Parses provided gin files override params.\n Args:\n gin_search_paths: paths that will be searched for gin files.\n gin_files: paths to gin config files to be parsed. Files will be parsed in\n order with conflicting settings being overriden by later files. Paths may\n be relative to paths in `gin_search_paths`.\n gin_bindings: individual gin bindings to be applied after the gin files are\n parsed. Will be applied in order with conflicting settings being overriden\n by later oens.\n skip_unknown: whether to ignore unknown bindings or raise an error (default\n behavior). Alternatively, a list of configurable names to skip if unknown.\n finalize_config: whether to finalize the config so that it cannot be\n modified (default behavior).\n \"\"\"\n # Register .gin file search paths with gin\n for gin_file_path in gin_search_paths:\n gin.add_config_file_search_path(gin_file_path)\n\n # Parse config files and bindings passed via flag.\n gin.parse_config_files_and_bindings(\n gin_files,\n gin_bindings,\n skip_unknown=skip_unknown,\n finalize_config=finalize_config,\n )\n logging.info(\"Gin Configuration:\")\n for line in gin.config_str().splitlines():\n logging.info(\"%s\", line)" }, { "identifier": "run", "path": "sotopia_conf/gin_utils.py", "snippet": "def run(main: Any) -> None:\n \"\"\"Wrapper for app.run that rewrites gin args before parsing.\"\"\"\n app.run(\n main,\n flags_parser=lambda args: app.parse_flags_with_usage(\n rewrite_gin_args(args)\n ),\n )" }, { "identifier": "_DEFAULT_GIN_SEARCH_PATHS", "path": "sotopia_conf/server.py", "snippet": "_DEFAULT_GIN_SEARCH_PATHS = [\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n]" } ]
import asyncio import logging import gin from collections import Counter, defaultdict from typing import ( Any, Dict, Generator, List, Literal, Optional, Set, cast, ) from absl import flags from absl.flags import FLAGS from rich.logging import RichHandler from rich.terminal_theme import MONOKAI from tqdm import tqdm from sotopia.agents.llm_agent import LLMAgent from sotopia.database.env_agent_combo_storage import ( EnvAgentComboStorage, ) from sotopia.database.logs import EpisodeLog from sotopia.database.persistent_profile import ( AgentProfile, EnvironmentProfile, ) from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, ) from sotopia.envs.parallel import ParallelSotopiaEnv from sotopia.generation_utils.generate import LLM_Name from sotopia.messages.message_classes import AgentAction, Observation from sotopia.samplers.base_sampler import BaseSampler, EnvAgentCombo from sotopia.server import run_async_server from sotopia_conf.gin_utils import parse_gin_flags, run from sotopia_conf.server import _DEFAULT_GIN_SEARCH_PATHS
12,884
bad_rewards_count += 1 if tuple(curr_ep.models) == ("gpt4", "gpt4", "gpt4"): bad_gpt4_rewards_count += 1 continue # find combo pk by env pk and agent ids curr_combo_pk = find_combo_pk( curr_ep.environment, curr_ep.agents[0], curr_ep.agents[1], all_combos_map, ) if curr_combo_pk: model_pair: tuple[LLM_Name, LLM_Name, LLM_Name] = cast( tuple[LLM_Name, LLM_Name, LLM_Name], tuple(curr_ep.models) ) combo_model_map[curr_combo_pk][model_pair] += 1 valid_count += 1 else: bad_combos.append( (curr_ep.environment, curr_ep.agents[0], curr_ep.agents[1]) ) bad_combo_count += 1 print("-" * 20 + "Episode Parsing Summary" + "-" * 20) print(f"valid episodes: {valid_count}") print(f"invalid episodes (missing episode.models): {invalid_count}") print(f"bad combo: {bad_combo_count}") print(f"bad rewards: {bad_rewards_count}") print(f"bad gpt4 rewards: {bad_gpt4_rewards_count}") return combo_model_map def get_all_model_pairs( combo_model_map: Dict[str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]]] ) -> Set[tuple[LLM_Name, LLM_Name, LLM_Name]]: all_model_pairs = set() for key in combo_model_map: for combo in combo_model_map[key]: all_model_pairs.add(combo) # print all model pairs print("-" * 20 + "All Model Pairs" + "-" * 20) for pair in all_model_pairs: print(pair) print() return all_model_pairs def get_all_missing_model_pairs( combo_model_map: Dict[str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]]], all_model_pairs: Set[tuple[LLM_Name, LLM_Name, LLM_Name]], num_required: int, ) -> Dict[str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]]]: combo_missing_model_map: Dict[ str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]] ] = defaultdict(Counter) missing_count = 0 for key in combo_model_map: for model_pair in all_model_pairs: if combo_model_map[key][model_pair] < num_required: combo_missing_model_map[key][model_pair] += ( num_required - combo_model_map[key][model_pair] ) missing_count += ( num_required - combo_model_map[key][model_pair] ) print("-" * 20 + f"Missing {missing_count} Model Pairs" + "-" * 20) print() return combo_missing_model_map # temporally used for making sure unique (env, agents, models) setting; need to change # according to the Counter in the case needing to run multiple experiments for one setting def get_missing_model_combo_map( combo_missing_model_map: Dict[ str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]] ], all_combos_map: Dict[str, EnvAgentComboStorage], ) -> Dict[tuple[LLM_Name, LLM_Name], List[tuple[str, str, str]]]: missing_model_combo_map = defaultdict(list) for combo_pk in combo_missing_model_map: model_counter = combo_missing_model_map[combo_pk] for model_pair in model_counter: model_pair_key = (model_pair[1], model_pair[2]) combo_model = all_combos_map[combo_pk] missing_model_combo_map[model_pair_key].append( ( combo_model.env_id, combo_model.agent_ids[0], combo_model.agent_ids[1], ) ) print("-" * 20 + "Missing Model to Combo Map" + "-" * 20) for key in missing_model_combo_map: print(f"Model pair: {key}") print(f"Number of missing combos: {len(missing_model_combo_map[key])}") return missing_model_combo_map def yield_env_agent_combo( combo_ids: list[tuple[str, str, str]], model_names: dict[str, LLM_Name] ) -> Generator[EnvAgentCombo[Observation, AgentAction], None, None]: for combo_id in combo_ids: env_id, agent_id1, agent_id2 = combo_id env_profile = EnvironmentProfile.get(env_id) env = ParallelSotopiaEnv( env_profile=env_profile, model_name=model_names["env"], action_order="round-robin", evaluators=[ RuleBasedTerminatedEvaluator( max_turn_number=20, max_stale_turn=2 ), ], terminal_evaluators=[ ReachGoalLLMEvaluator(model_names["env"]), ], ) agent_profiles = [
# date and message only FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), ], ) # get all episode logs def get_all_episodes() -> List[EpisodeLog]: episode_pks: List[str] = list(EpisodeLog.all_pks()) all_episodes = [] for pk in tqdm(episode_pks): try: curr_ep = EpisodeLog.get(pk) except: continue all_episodes.append(curr_ep) print(f"all episodes loaded {len(all_episodes)}") return all_episodes # all env-agent combos def get_all_env_agent_combos( start_combo_idx: int, end_combo_idx: int ) -> Dict[str, EnvAgentComboStorage]: experiment_env_pks = list(EnvironmentProfile.all_pks()) all_combos_map: Dict[str, EnvAgentComboStorage] = {} for env_pk in experiment_env_pks: env_agent_combo_storage_list = list( EnvAgentComboStorage.find( EnvAgentComboStorage.env_id == env_pk ).all() )[start_combo_idx:end_combo_idx] for combo in env_agent_combo_storage_list: all_combos_map[cast(str, combo.pk)] = cast( EnvAgentComboStorage, combo ) print(f"all combos loaded {len(all_combos_map)}") return all_combos_map def find_combo_pk( env_pk: str, agent_id1: str, agent_id2: str, all_combos_map: Dict[str, EnvAgentComboStorage], ) -> str | None: for combo_key in all_combos_map: combo = all_combos_map[combo_key] curr_tuple = (combo.env_id, combo.agent_ids[0], combo.agent_ids[1]) if curr_tuple == (env_pk, agent_id1, agent_id2): return combo_key return None def get_combo_model_map( all_episodes: List[EpisodeLog], all_combos_map: Dict[str, EnvAgentComboStorage], ) -> Dict[str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]]]: combo_model_map: Dict[ str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]] ] = defaultdict(Counter) bad_combos = [] valid_count = 0 invalid_count = 0 bad_rewards_count = 0 bad_gpt4_rewards_count = 0 bad_combo_count = 0 # iterate through episodes for i in tqdm(range(len(all_episodes))): curr_ep = all_episodes[i] bad_rewards = False # check if episode is valid if not curr_ep.models: invalid_count += 1 continue # check if rewards are valid for idx, model in enumerate(curr_ep.models[1:]): if not isinstance(curr_ep.rewards[idx], tuple): bad_rewards = True break if bad_rewards: bad_rewards_count += 1 if tuple(curr_ep.models) == ("gpt4", "gpt4", "gpt4"): bad_gpt4_rewards_count += 1 continue # find combo pk by env pk and agent ids curr_combo_pk = find_combo_pk( curr_ep.environment, curr_ep.agents[0], curr_ep.agents[1], all_combos_map, ) if curr_combo_pk: model_pair: tuple[LLM_Name, LLM_Name, LLM_Name] = cast( tuple[LLM_Name, LLM_Name, LLM_Name], tuple(curr_ep.models) ) combo_model_map[curr_combo_pk][model_pair] += 1 valid_count += 1 else: bad_combos.append( (curr_ep.environment, curr_ep.agents[0], curr_ep.agents[1]) ) bad_combo_count += 1 print("-" * 20 + "Episode Parsing Summary" + "-" * 20) print(f"valid episodes: {valid_count}") print(f"invalid episodes (missing episode.models): {invalid_count}") print(f"bad combo: {bad_combo_count}") print(f"bad rewards: {bad_rewards_count}") print(f"bad gpt4 rewards: {bad_gpt4_rewards_count}") return combo_model_map def get_all_model_pairs( combo_model_map: Dict[str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]]] ) -> Set[tuple[LLM_Name, LLM_Name, LLM_Name]]: all_model_pairs = set() for key in combo_model_map: for combo in combo_model_map[key]: all_model_pairs.add(combo) # print all model pairs print("-" * 20 + "All Model Pairs" + "-" * 20) for pair in all_model_pairs: print(pair) print() return all_model_pairs def get_all_missing_model_pairs( combo_model_map: Dict[str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]]], all_model_pairs: Set[tuple[LLM_Name, LLM_Name, LLM_Name]], num_required: int, ) -> Dict[str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]]]: combo_missing_model_map: Dict[ str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]] ] = defaultdict(Counter) missing_count = 0 for key in combo_model_map: for model_pair in all_model_pairs: if combo_model_map[key][model_pair] < num_required: combo_missing_model_map[key][model_pair] += ( num_required - combo_model_map[key][model_pair] ) missing_count += ( num_required - combo_model_map[key][model_pair] ) print("-" * 20 + f"Missing {missing_count} Model Pairs" + "-" * 20) print() return combo_missing_model_map # temporally used for making sure unique (env, agents, models) setting; need to change # according to the Counter in the case needing to run multiple experiments for one setting def get_missing_model_combo_map( combo_missing_model_map: Dict[ str, Counter[tuple[LLM_Name, LLM_Name, LLM_Name]] ], all_combos_map: Dict[str, EnvAgentComboStorage], ) -> Dict[tuple[LLM_Name, LLM_Name], List[tuple[str, str, str]]]: missing_model_combo_map = defaultdict(list) for combo_pk in combo_missing_model_map: model_counter = combo_missing_model_map[combo_pk] for model_pair in model_counter: model_pair_key = (model_pair[1], model_pair[2]) combo_model = all_combos_map[combo_pk] missing_model_combo_map[model_pair_key].append( ( combo_model.env_id, combo_model.agent_ids[0], combo_model.agent_ids[1], ) ) print("-" * 20 + "Missing Model to Combo Map" + "-" * 20) for key in missing_model_combo_map: print(f"Model pair: {key}") print(f"Number of missing combos: {len(missing_model_combo_map[key])}") return missing_model_combo_map def yield_env_agent_combo( combo_ids: list[tuple[str, str, str]], model_names: dict[str, LLM_Name] ) -> Generator[EnvAgentCombo[Observation, AgentAction], None, None]: for combo_id in combo_ids: env_id, agent_id1, agent_id2 = combo_id env_profile = EnvironmentProfile.get(env_id) env = ParallelSotopiaEnv( env_profile=env_profile, model_name=model_names["env"], action_order="round-robin", evaluators=[ RuleBasedTerminatedEvaluator( max_turn_number=20, max_stale_turn=2 ), ], terminal_evaluators=[ ReachGoalLLMEvaluator(model_names["env"]), ], ) agent_profiles = [
AgentProfile.get(id) for id in (agent_id1, agent_id2)
3
2023-10-23 19:47:26+00:00
16k
uukuguy/multi_loras
multi_loras/slora/router/manager.py
[ { "identifier": "SamplingParams", "path": "multi_loras/slora/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n top_p: float = 1.0,\n top_k: int = -1, # -1 is for all \n ignore_eos: bool = False,\n max_new_tokens: int = 16,\n stop_sequences: Optional[Union[str, List[str]]] = None # 停止句子条件\n ) -> None:\n self.do_sample = do_sample\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.ignore_eos = ignore_eos\n self.max_new_tokens = max_new_tokens\n self.stop_sequences = stop_sequences\n if self.do_sample == False:\n self.temperature = 1.0\n self.top_p = 1.0\n self.top_k = 1\n if self.temperature >= 0.0 and self.temperature < _SAMPLING_EPS: # temperature is too slow, change to greedy search\n self.temperature = 1.0\n self.top_k = 1\n return\n \n def verify(self):\n if self.presence_penalty < 0.0:\n raise ValueError(f\"presence_penalty must >= 0.0, got {self.presence_penalty}\")\n if self.frequency_penalty < 0.0:\n raise ValueError(f\"frequency_penalty must >= 0.0, got {self.frequency_penalty}\")\n if self.temperature <= 0.0:\n raise ValueError(f\"temperature must > 0.0, got {self.temperature}\")\n if self.top_p <= 0.0 or self.top_p > 1.0:\n raise ValueError(f\"top_p must in (0.0, 1.0], got {self.top_p}\")\n if self.top_k < -1 or self.top_k == 0:\n raise ValueError(f\"top_k must be -1 (disable), or at least 1, got {self.top_k}.\")\n if self.max_new_tokens < 1:\n raise ValueError(f\"max_new_tokens must be at least 1 , got {self.max_new_tokens}.\")\n return\n\n def stop_sentences_to_token_ids(self, tokenizer):\n if self.stop_sequences is None:\n self.stop_sequences = []\n else:\n if isinstance(self.stop_sequences, str):\n self.stop_sequences = [self.stop_sequences]\n new_stop_sequences = []\n for stop_str in self.stop_sequences:\n stop_str_ids = tokenizer.encode(stop_str)\n if stop_str_ids is not None and len(stop_str_ids) >= 1: # remove bos_token_id\n stop_str_ids = stop_str_ids[1:]\n if len(stop_str_ids) > 0:\n new_stop_sequences.append(stop_str_ids)\n self.stop_sequences = new_stop_sequences\n return\n \n def to_dict(self):\n ret = {}\n ret[\"do_sample\"] = self.do_sample\n ret[\"presence_penalty\"] = self.presence_penalty\n ret[\"frequency_penalty\"] = self.frequency_penalty\n ret[\"temperature\"] = self.temperature\n ret[\"top_p\"] = self.top_p\n ret[\"top_k\"] = self.top_k\n # if self.ignore_eos is not None:\n # ret[\"ignore_eos\"] = self.ignore_eos\n # if self.max_tokens is not None:\n # ret[\"max_tokens\"] = self.max_tokens\n return ret" }, { "identifier": "Req", "path": "multi_loras/slora/io_struct.py", "snippet": "class Req:\n def __init__(self, adapter_dir, request_id, prompt_ids, sample_params: SamplingParams):\n self.adapter_dir = adapter_dir\n self.request_id = request_id\n self.prompt_ids = prompt_ids\n self.input_len = len(prompt_ids)\n self.max_output_len = sample_params.max_new_tokens\n self.sample_params = sample_params\n self.output_ids = []\n self.output_metadata_list = []\n self.has_generate_finished = False\n self.aborted = False\n\n def to_rpc_obj(self):\n return {\"adapter_dir\": self.adapter_dir,\n \"request_id\": self.request_id,\n \"input_id\": self.prompt_ids,\n \"output_len\": self.max_output_len,\n \"sampling_param\": self.sample_params.to_dict() }\n\n def to_req_detokenization_state(self):\n out = ReqDetokenizationState(self.request_id, self.prompt_ids, self.max_output_len, self.sample_params.ignore_eos)\n if self.output_metadata_list:\n out.gen_metadata.update(self.output_metadata_list[-1])\n return out\n \n def stop_sequences_matched(self):\n for stop_token_ids in self.sample_params.stop_sequences:\n stop_len = len(stop_token_ids)\n if stop_len > 0:\n if len(self.output_ids) >= stop_len:\n if all(self.output_ids[-(stop_len - i)] == stop_token_ids[i] for i in range(stop_len)):\n return True\n return False\n\n def __repr__(self):\n return (f\"request_id(n={self.request_id}, \"\n f\"adapter_dir={self.adapter_dir}, \"\n f\"prompt_ids={self.prompt_ids}, \")" }, { "identifier": "Batch", "path": "multi_loras/slora/io_struct.py", "snippet": "class Batch:\n def __init__(self, batch_id, reqs: List[Req]):\n self.batch_id = batch_id\n self.reqs = reqs\n self.id_to_reqs = {req.request_id: req for req in reqs}\n\n self.adapter_dirs = set()\n for req in reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def input_tokens(self):\n batch_input_tokens = 0\n for req in self.reqs:\n batch_input_tokens += req.input_len\n return batch_input_tokens\n\n def calcu_max_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + req.max_output_len\n return tokens\n \n def calcu_used_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + len(req.output_ids)\n return tokens\n\n def mark_finished_req(self, eos_id):\n has_new_finish = False\n for req in self.reqs:\n if req.stop_sequences_matched():\n req.has_generate_finished = True\n has_new_finish = True\n if req.output_ids[-1] == eos_id and req.sample_params.ignore_eos == False:\n req.has_generate_finished = True\n has_new_finish = True\n if len(req.output_ids) >= req.max_output_len or req.aborted:\n req.has_generate_finished = True\n has_new_finish = True\n return has_new_finish\n\n def filter_finished(self):\n unfinished_req = []\n for req in self.reqs:\n if not req.has_generate_finished:\n unfinished_req.append(req)\n self.reqs = unfinished_req\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n\n self.adapter_dirs = set()\n for req in self.reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def is_clear(self):\n return len(self.reqs) == 0\n\n def merge(self, mini_batch):\n for _req in mini_batch.reqs:\n self.reqs.append(_req)\n self.adapter_dirs.add(_req.adapter_dir)\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n return\n\n def __repr__(self):\n return (f\"batch_id={self.batch_id}, \"\n # f\"reqs={self.reqs}, \"\n f\"req_ids={self.id_to_reqs.keys()}\")" }, { "identifier": "BatchAbortReq", "path": "multi_loras/slora/io_struct.py", "snippet": "class BatchAbortReq:\n def __init__(self, req_ids):\n self.reqs: List[str] = req_ids" }, { "identifier": "BatchTokenIdOut", "path": "multi_loras/slora/io_struct.py", "snippet": "class BatchTokenIdOut:\n def __init__(self):\n self.reqs_infs: List[Tuple[str, int, Dict, bool, bool]] = [] # [req_id, new_token_id, gen_metadata, finished_state, abort_state]" }, { "identifier": "AbortReq", "path": "multi_loras/slora/io_struct.py", "snippet": "class AbortReq:\n def __init__(self, req_id):\n self.req_id = req_id" }, { "identifier": "InputParams", "path": "multi_loras/slora/router/input_params.py", "snippet": "class InputParams:\n\n def __init__(\n self,\n max_req_total_len,\n # kv cache manager parameters\n max_total_token_num,\n pool_size_lora,\n batch_max_tokens,\n running_max_req_size,\n # mem_ratio,\n # adapter_ratio,\n # heuristic\n swap,\n prefetch,\n prefetch_size,\n scheduler,\n profile,\n batch_num_adapters,\n enable_abort,\n # kernel,\n # # debug\n dummy,\n no_lora_compute,\n no_lora_swap,\n # no_lora_copy,\n no_kernel,\n no_mem_pool,\n bmm,\n ) -> None:\n self.max_req_total_len = max_req_total_len\n self.max_total_token_num = max_total_token_num\n self.pool_size_lora = pool_size_lora\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n # self.mem_ratio = mem_ratio\n # self.adapter_ratio = adapter_ratio\n\n self.swap = swap\n self.prefetch = prefetch\n self.prefetch_size = prefetch_size\n self.scheduler = scheduler\n self.profile = profile\n self.batch_num_adapters = batch_num_adapters\n self.enable_abort = enable_abort\n # self.kernel = kernel\n\n self.dummy = dummy\n self.no_lora_compute = no_lora_compute\n self.no_lora_swap = no_lora_swap\n # self.no_lora_copy = no_lora_copy\n self.no_kernel = no_kernel\n self.no_mem_pool = no_mem_pool\n self.bmm = bmm\n return" }, { "identifier": "start_model_process", "path": "multi_loras/slora/router/model_infer/model_rpc.py", "snippet": "async def start_model_process(port, world_size):\n # 单卡时不使用 rpc\n if world_size == 1:\n return ModelRpcClient(ModelRpcServer(), world_size)\n \n import multiprocessing\n proc = multiprocessing.Process(target=_init_env, args=(port,))\n proc.start()\n await asyncio.sleep(2)\n repeat_count = 0\n while repeat_count < 20:\n try:\n con = rpyc.connect(\"localhost\", port, config={\"allow_pickle\": True})\n break\n except BaseException:\n await asyncio.sleep(1)\n repeat_count += 1\n if repeat_count == 20:\n raise Exception(\"init rpc env error!\")\n\n assert proc.is_alive()\n return ModelRpcClient(con.root, world_size, rpc_server_process=proc)" }, { "identifier": "ModelRpcClient", "path": "multi_loras/slora/router/model_infer/model_rpc.py", "snippet": "class ModelRpcClient:\n def __init__(self, model_rpc, world_size, rpc_server_process=None):\n self.model: ModelRpcServer = model_rpc\n self.world_size = world_size\n self.rpc_server_process = rpc_server_process\n self.use_rpc = self.world_size != 1\n if self.use_rpc:\n def async_wrap(f):\n f = rpyc.async_(f)\n async def _func(*args, **kwargs):\n ans = f(*args, **kwargs)\n await asyncio.to_thread(ans.wait)\n # raise if exception\n return ans.value\n return _func\n self._init_model = async_wrap(self.model.init_model)\n self._load_adapters = rpyc.async_(self.model.load_adapters)\n self._offload_adapters = rpyc.async_(self.model.offload_adapters)\n self._unmerge_adapter = rpyc.async_(self.model.unmerge_adapter)\n self._merge_adapter = rpyc.async_(self.model.merge_adapter)\n self._add_batch = async_wrap(self.model.add_batch)\n self._prefill_batch = async_wrap(self.model.prefill_batch)\n self._decode_batch = async_wrap(self.model.decode_batch)\n self._filter_batch = async_wrap(self.model.filter_batch)\n self._merge_batch = async_wrap(self.model.merge_batch)\n self._remove_batch = async_wrap(self.model.remove_batch)\n self._profile_prefill = async_wrap(self.model.profile_prefill)\n else:\n self._init_model = self.model.exposed_init_model\n self._load_adapters = self.model.exposed_load_adapters\n self._offload_adapters = self.model.exposed_offload_adapters\n self._merge_adapter = self.model.exposed_merge_adapter\n self._unmerge_adapter = self.model.exposed_unmerge_adapter\n self._add_batch = self.model.exposed_add_batch\n self._prefill_batch = self.model.exposed_prefill_batch\n self._decode_batch = self.model.exposed_decode_batch\n self._filter_batch = self.model.exposed_filter_batch\n self._merge_batch = self.model.exposed_merge_batch\n self._remove_batch = self.model.exposed_remove_batch\n self._profile_prefill = self.model.exposed_profile_prefill\n return\n\n async def init_model(self, rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t prefetch_stream):\n ans : rpyc.AsyncResult = self._init_model(rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t\t\t\t prefetch_stream)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n\n async def load_adapters(self, reqs, prefetch=False):\n self._load_adapters(reqs, prefetch=prefetch)\n\n\n async def offload_adapters(self, reserved_reqs=None, prefetch=False):\n self._offload_adapters(reserved_reqs, prefetch=prefetch)\n \n async def unmerge_adapter(self):\n self._unmerge_adapter()\n \n async def merge_adapter(self):\n self._merge_adapter()\n\n\n async def init_batch(self, batch_id, reqs):\n ans = self._add_batch(batch_id, reqs, \"fp16\")\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def prefill_batch(self, batch_id):\n ans = self._prefill_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def decode_batch(self, batch_id):\n ans = self._decode_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def filter_batch(self, batch_id, req_id_list):\n ans = self._filter_batch(batch_id, req_id_list)\n if self.use_rpc:\n await ans\n return\n else:\n return \n\n async def merge_batch(self, batch_id1, batch_id2):\n ans = self._merge_batch(batch_id1, batch_id2)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def remove_batch(self, batch_id):\n ans = self._remove_batch(batch_id)\n if self.use_rpc:\n await ans\n return\n else:\n return\n \n async def profile_prefill(self):\n ans = self._profile_prefill()\n if self.use_rpc:\n return await ans\n else:\n return ans" }, { "identifier": "ReqQueue", "path": "multi_loras/slora/router/req_queue.py", "snippet": "class ReqQueue:\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n self.max_total_tokens = max_total_tokens\n assert batch_max_tokens is not None\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n self.waiting_req_list: List[Req] = []\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "Stats", "path": "multi_loras/slora/router/stats.py", "snippet": "class Stats:\n\n def __init__(self, log_status, log_stats_interval) -> None:\n self.log_stats = log_status\n self.log_stats_interval = log_stats_interval\n self.last_log_time = time.time()\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n return\n \n def count_prompt_tokens(self, run_batch):\n if self.log_stats:\n tokens = run_batch.input_tokens()\n self.prompt_tokens += tokens\n self.all_tokens += tokens\n return\n \n def count_output_tokens(self, run_batch):\n if self.log_stats:\n tokens = len(run_batch.reqs)\n self.output_tokens += tokens\n self.all_tokens += tokens\n return\n\n def print_stats(self):\n if not self.log_stats:\n return\n\n now = time.time()\n if now - self.last_log_time > self.log_stats_interval:\n print(f\"Avg tokens(prompt+generate) throughput: {self.all_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg prompt tokens throughput: {self.prompt_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg generate tokens throughput: {self.output_tokens/(now-self.last_log_time):8.3f} tokens/s\")\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n self.last_log_time = now\n return" }, { "identifier": "AlphaModel", "path": "multi_loras/slora/router/profiler.py", "snippet": "class AlphaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n print(self.base_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n\n def get_latency(self, batch_size, seq_len):\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n return (self.base_prefill[batch_size - 1][seq_len] + self.base_prefill[batch_size + 1][seq_len]) / 2\n else:\n return np.Inf" }, { "identifier": "BetaModel", "path": "multi_loras/slora/router/profiler.py", "snippet": "class BetaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n self.adapter_prefill = profiling_results[1]\n print(self.adapter_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n \n def get_latency(self, rank_size, batch_size, seq_len):\n if rank_size == 0: return 0\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.adapter_prefill[rank_size][batch_size][seq_len] - self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.adapter_prefill[rank_size][2][seq_len] - self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n a = self.adapter_prefill[rank_size][batch_size - 1][seq_len] - self.base_prefill[batch_size - 1][seq_len]\n b = self.adapter_prefill[rank_size][batch_size + 1][seq_len] - self.base_prefill[batch_size + 1][seq_len]\n return (a + b) / 2\n else:\n return np.Inf" }, { "identifier": "PETSReqQueue", "path": "multi_loras/slora/router/pets_req_queue.py", "snippet": "class PETSReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.alpha = None\n self.beta = None # will be set automatically in the profiling function in router.manager\n \n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n def intra_task_batching(self, lora_ranks):\n ## Preprocessing: gather the queries with the same adapter.\n clustered_queries_by_adapter = {}\n for query in self.waiting_req_list:\n adapter_dir = query.adapter_dir\n if adapter_dir in clustered_queries_by_adapter:\n clustered_queries_by_adapter[adapter_dir].append(query)\n else:\n clustered_queries_by_adapter[adapter_dir] = [query]\n\n ## DP\n mini_batches = []\n for adapter_dir, queries in clustered_queries_by_adapter.items():\n state_1st_stage = []\n split_idx_list = []\n\n ### Sort queries according to the sequence length in ascending order.\n queries = sorted(queries, key=lambda x: x.input_len)\n queries.insert(0, None) # Sentinel.\n\n ### Initialize.\n state_1st_stage.append(0)\n split_idx_list.append(0)\n for j in range(1, len(queries)):\n min_cost = np.Inf # INF\n split_idx = 0\n for k in range(1, j+1):\n lora_rank = lora_ranks[adapter_dir]\n tmp = state_1st_stage[k-1] + self.beta.get_latency(lora_rank, j-k+1, queries[j].input_len)\n if tmp < min_cost:\n min_cost = tmp\n split_idx = k-1\n split_idx_list.append(split_idx)\n state_1st_stage.append(min_cost)\n \n ### Split queries into mini-batches according to split_idx_list.\n \n end_idx = len(queries) - 1\n\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n mini_batch = []\n max_len = queries[end_idx].input_len\n for j in range(start_idx, end_idx + 1):\n mini_batch.append(queries[j]) \n mini_batches.append((mini_batch, max_len))\n end_idx = split_idx_list[end_idx] \n \n return mini_batches\n \n # Inter-task batching.\n def inter_task_batching(self, mini_batches):\n ## Sort mini_batches according to the max sequence length.\n mini_batches = sorted(mini_batches, key=lambda x: x[1])\n mini_batches.insert(0, None) # Sentinel.\n\n tmp = 0\n mini_batch_sum = [0]\n for mini_batch in mini_batches[1:]:\n tmp += len(mini_batch[0])\n mini_batch_sum.append(tmp)\n\n ## DP.\n state_2nd_stage = []\n split_idx_list = []\n state_2nd_stage.append(0)\n split_idx_list.append(0)\n\n for i in range(1, len(mini_batches)):\n min_cost = np.Inf # INF\n split_idx = 0\n for j in range(1, i+1):\n total_samples = mini_batch_sum[i] - mini_batch_sum[j-1]\n tmp = state_2nd_stage[j-1] + self.alpha.get_latency(total_samples, mini_batches[i][1])\n if tmp < min_cost:\n min_cost = tmp\n split_idx = j - 1\n split_idx_list.append(split_idx)\n state_2nd_stage.append(min_cost)\n\n ## Split mini_batches into final scheduled_batches.\n ### Split mini_batches into macro_batches.\n\n end_idx = len(mini_batches) - 1\n macro_batches = []\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n macro_batch = []\n max_len = mini_batches[end_idx][1]\n for j in range(start_idx, end_idx + 1):\n macro_batch.append(mini_batches[j]) \n macro_batches.append((macro_batch, max_len))\n end_idx = split_idx_list[end_idx] \n\n total_samples = 0\n for macro_batch in macro_batches:\n for mini_batch in macro_batch[0]:\n total_samples += len(mini_batch[0])\n # print(total_samples)\n\n return macro_batches\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n reqs = self.waiting_req_list\n # when waiting_reqs > 20\n if len(self.waiting_req_list) > 10:\n mini_batches = self.intra_task_batching(lora_ranks)\n macro_batches = self.inter_task_batching(mini_batches)\n \n macro_batch = macro_batches[-1][0]\n reqs = [req for minibatch in macro_batch for req in minibatch[0]]\n \n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in reqs:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "PEFTReqQueue", "path": "multi_loras/slora/router/peft_req_queue.py", "snippet": "class PEFTReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n if len(self.waiting_req_list) > 0 and current_batch is None:\n adapter_dir = self.waiting_req_list[0].adapter_dir\n if current_batch is not None:\n adapter_dir = current_batch.reqs[0].adapter_dir\n # heuristics:\n # TODO: think more\n max_other_waited_reqs = 30\n other_waited_reqs = 0\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n other_waited_reqs += 1\n if other_waited_reqs > max_other_waited_reqs:\n return None\n continue\n if req.adapter_dir == adapter_dir:\n break\n\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n continue\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "ClusterReqQueue", "path": "multi_loras/slora/router/cluster_req_queue.py", "snippet": "class ClusterReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size, batch_num_adapters) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.batch_num_adapters = batch_num_adapters\n\n def _generate_new_batch_prioritizing_existing_adapters(self, current_batch:Batch, lora_ranks: dict[str, int]):\n filtered_waiting_req_list = list(filter(lambda req: req.adapter_dir in current_batch.adapter_dirs, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n can_run_list = []\n new_batch_total_tokens = 0\n for idx, req in enumerate(filtered_waiting_req_list):\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n\n # If filtered waiting list was not enough to max-out the current running batch, we resolve to FIFO\n for req in self.waiting_req_list:\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n\n return can_run_list\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n\n if current_batch is not None and len(current_batch.adapter_dirs) >= self.batch_num_adapters:\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n rest_of_batch = self._generate_new_batch_prioritizing_existing_adapters(current_batch, lora_ranks)\n can_run_list += rest_of_batch\n if len(can_run_list) != 0:\n return Batch(uuid.uuid4().hex, can_run_list)\n else:\n return None\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None" }, { "identifier": "AbortReqQueue", "path": "multi_loras/slora/router/abort_req_queue.py", "snippet": "class AbortReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.abort_req_list: List[str] = []\n self.req_time_stamp = []\n self.init_bs = 1\n self.apprx_req_rate = 1\n self.apprx_bs = self.init_bs\n self.last_req_num = 0\n self.last_time = time.time()\n \n def append(self, req):\n self.waiting_req_list.insert(0, req)\n self.req_time_stamp.insert(0, time.time())\n assert len(self.waiting_req_list) == len(self.req_time_stamp)\n return\n\n def reset_abort_list(self):\n self.abort_req_list = []\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n self.apprx_req_rate = int(0.7 * (len(self.waiting_req_list) - self.last_req_num) + 0.3 * self.apprx_req_rate)\n for i, req in enumerate(self.waiting_req_list):\n if attainment_func(time.time() - self.req_time_stamp[i] + 0.5) == 0:\n req.aborted = True\n aborted_count += 1\n abort_list.append(req)\n self.abort_req_list.append(req.request_id)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in abort_list]\n \n if self.apprx_req_rate >= self.apprx_bs:\n print(\"apprx bs\", self.apprx_bs, \"req rate\", self.apprx_req_rate)\n # choose from the latest requests\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n elif self.apprx_req_rate < self.apprx_bs:\n # choose from the earliest requests\n for req in reversed(self.waiting_req_list):\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n \n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in can_run_list and self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n self.last_req_num = len(self.waiting_req_list)\n self.apprx_bs = max(int(0.7 * len(new_batch.reqs) + 0.3 * self.apprx_bs), self.init_bs)\n return new_batch\n else:\n return None" }, { "identifier": "get_lora_config", "path": "multi_loras/slora/models/peft/lora_adapter.py", "snippet": "def get_lora_config(lora_dir, dummy):\n if dummy:\n return get_lora_config_json(lora_dir), lora_dir\n else:\n lora_dir = re.sub(r'-(\\d+)$', '', lora_dir)\n return hf_load_config(lora_dir)" } ]
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq, BatchTokenIdOut, AbortReq from .input_params import InputParams from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from .stats import Stats from .profiler import AlphaModel, BetaModel from .pets_req_queue import PETSReqQueue from .peft_req_queue import PEFTReqQueue from .cluster_req_queue import ClusterReqQueue from .abort_req_queue import AbortReqQueue from ..models.peft.lora_adapter import get_lora_config
11,481
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 if input_params.scheduler == "pets": self.req_queue = PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": self.req_queue = PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 if input_params.scheduler == "pets": self.req_queue = PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": self.req_queue = PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None:
self.req_queue = ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens,
15
2023-10-16 02:39:47+00:00
16k
MobileLLM/AutoDroid
droidbot/input_manager.py
[ { "identifier": "EventLog", "path": "droidbot/input_event.py", "snippet": "class EventLog(object):\n \"\"\"\n save an event to local file system\n \"\"\"\n\n def __init__(self, device, app, event, profiling_method=None, tag=None):\n self.device = device\n self.app = app\n self.event = event\n if tag is None:\n from datetime import datetime\n tag = datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\n self.tag = tag\n\n self.from_state = None\n self.to_state = None\n self.event_str = None\n\n self.profiling_method = profiling_method\n self.trace_remote_file = \"/data/local/tmp/event.trace\"\n self.is_profiling = False\n self.profiling_pid = -1\n self.sampling = None\n # sampling feature was added in Android 5.0 (API level 21)\n if profiling_method is not None and \\\n str(profiling_method) != \"full\" and \\\n self.device.get_sdk_version() >= 21:\n self.sampling = int(profiling_method)\n\n def to_dict(self):\n return {\n \"tag\": self.tag,\n \"event\": self.event.to_dict(),\n \"start_state\": self.from_state.state_str,\n \"stop_state\": self.to_state.state_str,\n \"event_str\": self.event_str\n }\n\n def save2dir(self, output_dir=None):\n # Save event\n if output_dir is None:\n if self.device.output_dir is None:\n return\n else:\n output_dir = os.path.join(self.device.output_dir, \"events\")\n try:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n event_json_file_path = \"%s/event_%s.json\" % (output_dir, self.tag)\n event_json_file = open(event_json_file_path, \"w\")\n json.dump(self.to_dict(), event_json_file, indent=2)\n event_json_file.close()\n except Exception as e:\n self.device.logger.warning(\"Saving event to dir failed.\")\n self.device.logger.warning(e)\n\n def save_views(self, output_dir=None):\n # Save views\n views = self.event.get_views()\n if views:\n for view_dict in views:\n self.from_state.save_view_img(view_dict=view_dict, output_dir=output_dir)\n\n def is_start_event(self):\n if isinstance(self.event, IntentEvent):\n intent_cmd = self.event.intent\n if \"start\" in intent_cmd and self.app.get_package_name() in intent_cmd:\n return True\n return False\n\n def start(self):\n \"\"\"\n start sending event\n \"\"\"\n self.from_state = self.device.get_current_state()\n self.start_profiling()\n self.event_str = self.event.get_event_str(self.from_state)\n print(\"Action: %s\" % self.event_str)\n self.device.send_event(self.event)\n\n def start_profiling(self):\n \"\"\"\n start profiling the current event\n @return:\n \"\"\"\n if self.profiling_method is None:\n return\n if self.is_profiling:\n return\n pid = self.device.get_app_pid(self.app)\n if pid is None:\n if self.is_start_event():\n start_intent = self.app.get_start_with_profiling_intent(self.trace_remote_file, self.sampling)\n self.event.intent = start_intent.get_cmd()\n self.is_profiling = True\n return\n if self.sampling is not None:\n self.device.adb.shell(\n [\"am\", \"profile\", \"start\", \"--sampling\", str(self.sampling), str(pid), self.trace_remote_file])\n else:\n self.device.adb.shell([\"am\", \"profile\", \"start\", str(pid), self.trace_remote_file])\n self.is_profiling = True\n self.profiling_pid = pid\n\n def stop(self):\n \"\"\"\n finish sending event\n \"\"\"\n self.stop_profiling()\n self.to_state = self.device.get_current_state()\n self.save2dir()\n self.save_views()\n\n def stop_profiling(self, output_dir=None):\n if self.profiling_method is None:\n return\n if not self.is_profiling:\n return\n try:\n if self.profiling_pid == -1:\n pid = self.device.get_app_pid(self.app)\n if pid is None:\n return\n self.profiling_pid = pid\n\n self.device.adb.shell([\"am\", \"profile\", \"stop\", str(self.profiling_pid)])\n if self.sampling is None:\n time.sleep(3) # guess this time can vary between machines\n\n if output_dir is None:\n if self.device.output_dir is None:\n return\n else:\n output_dir = os.path.join(self.device.output_dir, \"events\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n event_trace_local_path = \"%s/event_trace_%s.trace\" % (output_dir, self.tag)\n self.device.pull_file(self.trace_remote_file, event_trace_local_path)\n\n except Exception as e:\n self.device.logger.warning(\"profiling event failed\")\n self.device.logger.warning(e)" }, { "identifier": "UtgBasedInputPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgBasedInputPolicy(InputPolicy):\n \"\"\"\n state-based input policy\n \"\"\"\n\n def __init__(self, device, app, random_input):\n super(UtgBasedInputPolicy, self).__init__(device, app)\n self.random_input = random_input\n self.script = None\n self.master = None\n self.script_events = []\n self.last_event = None\n self.last_state = None\n self.current_state = None\n self.utg = UTG(device=device, app=app, random_input=random_input)\n self.script_event_idx = 0\n if self.device.humanoid is not None:\n self.humanoid_view_trees = []\n self.humanoid_events = []\n\n def generate_event(self, input_manager):\n \"\"\"\n generate an event\n @return:\n \"\"\"\n\n # Get current device state\n self.current_state = self.device.get_current_state()\n if self.current_state is None:\n import time\n time.sleep(5)\n return KeyEvent(name=\"BACK\")\n\n self.__update_utg()\n\n # update last view trees for humanoid\n if self.device.humanoid is not None:\n self.humanoid_view_trees = self.humanoid_view_trees + [self.current_state.view_tree]\n if len(self.humanoid_view_trees) > 4:\n self.humanoid_view_trees = self.humanoid_view_trees[1:]\n\n event = None\n\n # if the previous operation is not finished, continue\n if len(self.script_events) > self.script_event_idx:\n event = self.script_events[self.script_event_idx].get_transformed_event(self)\n self.script_event_idx += 1\n\n # First try matching a state defined in the script\n if event is None and self.script is not None:\n operation = self.script.get_operation_based_on_state(self.current_state)\n if operation is not None:\n self.script_events = operation.events\n # restart script\n event = self.script_events[0].get_transformed_event(self)\n self.script_event_idx = 1\n\n if event is None:\n old_state, event = self.generate_event_based_on_utg(input_manager)\n import time\n time.sleep(3)\n # update last events for humanoid\n if self.device.humanoid is not None:\n self.humanoid_events = self.humanoid_events + [event]\n if len(self.humanoid_events) > 3:\n self.humanoid_events = self.humanoid_events[1:]\n\n self.last_state = self.current_state if old_state is None else old_state\n self.last_event = event\n return event\n\n def __update_utg(self):\n self.utg.add_transition(self.last_event, self.last_state, self.current_state)\n\n @abstractmethod\n def generate_event_based_on_utg(self, input_manager):\n \"\"\"\n generate an event based on UTG\n :return: InputEvent\n \"\"\"\n pass" }, { "identifier": "UtgNaiveSearchPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgNaiveSearchPolicy(UtgBasedInputPolicy):\n \"\"\"\n depth-first strategy to explore UFG (old)\n \"\"\"\n\n def __init__(self, device, app, random_input, search_method):\n super(UtgNaiveSearchPolicy, self).__init__(device, app, random_input)\n self.logger = logging.getLogger(self.__class__.__name__)\n\n self.explored_views = set()\n self.state_transitions = set()\n self.search_method = search_method\n\n self.last_event_flag = \"\"\n self.last_event_str = None\n self.last_state = None\n\n self.preferred_buttons = [\"yes\", \"ok\", \"activate\", \"detail\", \"more\", \"access\",\n \"allow\", \"check\", \"agree\", \"try\", \"go\", \"next\"]\n\n def generate_event_based_on_utg(self):\n \"\"\"\n generate an event based on current device state\n note: ensure these fields are properly maintained in each transaction:\n last_event_flag, last_touched_view, last_state, exploited_views, state_transitions\n @return: InputEvent\n \"\"\"\n self.save_state_transition(self.last_event_str, self.last_state, self.current_state)\n\n if self.device.is_foreground(self.app):\n # the app is in foreground, clear last_event_flag\n self.last_event_flag = EVENT_FLAG_STARTED\n else:\n number_of_starts = self.last_event_flag.count(EVENT_FLAG_START_APP)\n # If we have tried too many times but the app is still not started, stop DroidBot\n if number_of_starts > MAX_NUM_RESTARTS:\n raise InputInterruptedException(\"The app cannot be started.\")\n\n # if app is not started, try start it\n if self.last_event_flag.endswith(EVENT_FLAG_START_APP):\n # It seems the app stuck at some state, and cannot be started\n # just pass to let viewclient deal with this case\n self.logger.info(\"The app had been restarted %d times.\", number_of_starts)\n self.logger.info(\"Trying to restart app...\")\n pass\n else:\n start_app_intent = self.app.get_start_intent()\n\n self.last_event_flag += EVENT_FLAG_START_APP\n self.last_event_str = EVENT_FLAG_START_APP\n return IntentEvent(start_app_intent)\n\n # select a view to click\n view_to_touch = self.select_a_view(self.current_state)\n\n # if no view can be selected, restart the app\n if view_to_touch is None:\n stop_app_intent = self.app.get_stop_intent()\n self.last_event_flag += EVENT_FLAG_STOP_APP\n self.last_event_str = EVENT_FLAG_STOP_APP\n return IntentEvent(stop_app_intent)\n\n view_to_touch_str = view_to_touch['view_str']\n if view_to_touch_str.startswith('BACK'):\n result = KeyEvent('BACK')\n else:\n result = TouchEvent(view=view_to_touch)\n\n self.last_event_flag += EVENT_FLAG_TOUCH\n self.last_event_str = view_to_touch_str\n self.save_explored_view(self.current_state, self.last_event_str)\n return result\n\n def select_a_view(self, state):\n \"\"\"\n select a view in the view list of given state, let droidbot touch it\n @param state: DeviceState\n @return:\n \"\"\"\n views = []\n for view in state.views:\n if view['enabled'] and len(view['children']) == 0:\n views.append(view)\n\n if self.random_input:\n random.shuffle(views)\n\n # add a \"BACK\" view, consider go back first/last according to search policy\n mock_view_back = {'view_str': 'BACK_%s' % state.foreground_activity,\n 'text': 'BACK_%s' % state.foreground_activity}\n if self.search_method == POLICY_NAIVE_DFS:\n views.append(mock_view_back)\n elif self.search_method == POLICY_NAIVE_BFS:\n views.insert(0, mock_view_back)\n\n # first try to find a preferable view\n for view in views:\n view_text = view['text'] if view['text'] is not None else ''\n view_text = view_text.lower().strip()\n if view_text in self.preferred_buttons \\\n and (state.foreground_activity, view['view_str']) not in self.explored_views:\n self.logger.info(\"selected an preferred view: %s\" % view['view_str'])\n return view\n\n # try to find a un-clicked view\n for view in views:\n if (state.foreground_activity, view['view_str']) not in self.explored_views:\n self.logger.info(\"selected an un-clicked view: %s\" % view['view_str'])\n return view\n\n # if all enabled views have been clicked, try jump to another activity by clicking one of state transitions\n if self.random_input:\n random.shuffle(views)\n transition_views = {transition[0] for transition in self.state_transitions}\n for view in views:\n if view['view_str'] in transition_views:\n self.logger.info(\"selected a transition view: %s\" % view['view_str'])\n return view\n\n # no window transition found, just return a random view\n # view = views[0]\n # self.logger.info(\"selected a random view: %s\" % view['view_str'])\n # return view\n\n # DroidBot stuck on current state, return None\n self.logger.info(\"no view could be selected in state: %s\" % state.tag)\n return None\n\n def save_state_transition(self, event_str, old_state, new_state):\n \"\"\"\n save the state transition\n @param event_str: str, representing the event cause the transition\n @param old_state: DeviceState\n @param new_state: DeviceState\n @return:\n \"\"\"\n if event_str is None or old_state is None or new_state is None:\n return\n if new_state.is_different_from(old_state):\n self.state_transitions.add((event_str, old_state.tag, new_state.tag))\n\n def save_explored_view(self, state, view_str):\n \"\"\"\n save the explored view\n @param state: DeviceState, where the view located\n @param view_str: str, representing a view\n @return:\n \"\"\"\n if not state:\n return\n state_activity = state.foreground_activity\n self.explored_views.add((state_activity, view_str))" }, { "identifier": "UtgGreedySearchPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgGreedySearchPolicy(UtgBasedInputPolicy):\n \"\"\"\n DFS/BFS (according to search_method) strategy to explore UFG (new)\n \"\"\"\n\n def __init__(self, device, app, random_input, search_method):\n super(UtgGreedySearchPolicy, self).__init__(device, app, random_input)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.search_method = search_method\n\n self.preferred_buttons = [\"yes\", \"ok\", \"activate\", \"detail\", \"more\", \"access\",\n \"allow\", \"check\", \"agree\", \"try\", \"go\", \"next\"]\n\n self.__nav_target = None\n self.__nav_num_steps = -1\n self.__num_restarts = 0\n self.__num_steps_outside = 0\n self.__event_trace = \"\"\n self.__missed_states = set()\n self.__random_explore = False\n\n def generate_event_based_on_utg(self, input_manager):\n \"\"\"\n generate an event based on current UTG\n @return: InputEvent\n \"\"\"\n current_state = self.current_state\n self.logger.info(\"Current state: %s\" % current_state.state_str)\n if current_state.state_str in self.__missed_states:\n self.__missed_states.remove(current_state.state_str)\n\n if current_state.get_app_activity_depth(self.app) < 0:\n # If the app is not in the activity stack\n start_app_intent = self.app.get_start_intent()\n\n # It seems the app stucks at some state, has been\n # 1) force stopped (START, STOP)\n # just start the app again by increasing self.__num_restarts\n # 2) started at least once and cannot be started (START)\n # pass to let viewclient deal with this case\n # 3) nothing\n # a normal start. clear self.__num_restarts.\n\n if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \\\n or self.__event_trace.endswith(EVENT_FLAG_START_APP):\n self.__num_restarts += 1\n self.logger.info(\"The app had been restarted %d times.\", self.__num_restarts)\n else:\n self.__num_restarts = 0\n\n # pass (START) through\n if not self.__event_trace.endswith(EVENT_FLAG_START_APP):\n if self.__num_restarts > MAX_NUM_RESTARTS:\n # If the app had been restarted too many times, enter random mode\n msg = \"The app had been restarted too many times. Entering random mode.\"\n self.logger.info(msg)\n self.__random_explore = True\n else:\n # Start the app\n self.__event_trace += EVENT_FLAG_START_APP\n self.logger.info(\"Trying to start the app...\")\n return IntentEvent(intent=start_app_intent)\n\n elif current_state.get_app_activity_depth(self.app) > 0:\n # If the app is in activity stack but is not in foreground\n self.__num_steps_outside += 1\n\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:\n # If the app has not been in foreground for too long, try to go back\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:\n stop_app_intent = self.app.get_stop_intent()\n go_back_event = IntentEvent(stop_app_intent)\n else:\n go_back_event = KeyEvent(name=\"BACK\")\n self.__event_trace += EVENT_FLAG_NAVIGATE\n self.logger.info(\"Going back to the app...\")\n return go_back_event\n else:\n # If the app is in foreground\n self.__num_steps_outside = 0\n\n # Get all possible input events\n possible_events = current_state.get_possible_input()\n\n if self.random_input:\n random.shuffle(possible_events)\n\n if self.search_method == POLICY_GREEDY_DFS:\n possible_events.append(KeyEvent(name=\"BACK\"))\n elif self.search_method == POLICY_GREEDY_BFS:\n possible_events.insert(0, KeyEvent(name=\"BACK\"))\n\n # get humanoid result, use the result to sort possible events\n # including back events\n if self.device.humanoid is not None:\n possible_events = self.__sort_inputs_by_humanoid(possible_events)\n\n # If there is an unexplored event, try the event first\n for input_event in possible_events:\n if not self.utg.is_event_explored(event=input_event, state=current_state):\n self.logger.info(\"Trying an unexplored event.\")\n self.__event_trace += EVENT_FLAG_EXPLORE\n return input_event\n\n target_state = self.__get_nav_target(current_state)\n if target_state:\n navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=target_state)\n if navigation_steps and len(navigation_steps) > 0:\n self.logger.info(\"Navigating to %s, %d steps left.\" % (target_state.state_str, len(navigation_steps)))\n self.__event_trace += EVENT_FLAG_NAVIGATE\n return navigation_steps[0][1]\n\n if self.__random_explore:\n self.logger.info(\"Trying random event.\")\n random.shuffle(possible_events)\n return possible_events[0]\n\n # If couldn't find a exploration target, stop the app\n stop_app_intent = self.app.get_stop_intent()\n self.logger.info(\"Cannot find an exploration target. Trying to restart app...\")\n self.__event_trace += EVENT_FLAG_STOP_APP\n return IntentEvent(intent=stop_app_intent)\n\n def __sort_inputs_by_humanoid(self, possible_events):\n if sys.version.startswith(\"3\"):\n from xmlrpc.client import ServerProxy\n else:\n from xmlrpclib import ServerProxy\n proxy = ServerProxy(\"http://%s/\" % self.device.humanoid)\n request_json = {\n \"history_view_trees\": self.humanoid_view_trees,\n \"history_events\": [x.__dict__ for x in self.humanoid_events],\n \"possible_events\": [x.__dict__ for x in possible_events],\n \"screen_res\": [self.device.display_info[\"width\"],\n self.device.display_info[\"height\"]]\n }\n result = json.loads(proxy.predict(json.dumps(request_json)))\n new_idx = result[\"indices\"]\n text = result[\"text\"]\n new_events = []\n\n # get rid of infinite recursive by randomizing first event\n if not self.utg.is_state_reached(self.current_state):\n new_first = random.randint(0, len(new_idx) - 1)\n new_idx[0], new_idx[new_first] = new_idx[new_first], new_idx[0]\n\n for idx in new_idx:\n if isinstance(possible_events[idx], SetTextEvent):\n possible_events[idx].text = text\n new_events.append(possible_events[idx])\n return new_events\n\n def __get_nav_target(self, current_state):\n # If last event is a navigation event\n if self.__nav_target and self.__event_trace.endswith(EVENT_FLAG_NAVIGATE):\n navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)\n if navigation_steps and 0 < len(navigation_steps) <= self.__nav_num_steps:\n # If last navigation was successful, use current nav target\n self.__nav_num_steps = len(navigation_steps)\n return self.__nav_target\n else:\n # If last navigation was failed, add nav target to missing states\n self.__missed_states.add(self.__nav_target.state_str)\n\n reachable_states = self.utg.get_reachable_states(current_state)\n if self.random_input:\n random.shuffle(reachable_states)\n\n for state in reachable_states:\n # Only consider foreground states\n if state.get_app_activity_depth(self.app) != 0:\n continue\n # Do not consider missed states\n if state.state_str in self.__missed_states:\n continue\n # Do not consider explored states\n if self.utg.is_state_explored(state):\n continue\n self.__nav_target = state\n navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)\n if len(navigation_steps) > 0:\n self.__nav_num_steps = len(navigation_steps)\n return state\n\n self.__nav_target = None\n self.__nav_num_steps = -1\n return None" }, { "identifier": "UtgReplayPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgReplayPolicy(InputPolicy):\n \"\"\"\n Replay DroidBot output generated by UTG policy\n \"\"\"\n\n def __init__(self, device, app, replay_output):\n super(UtgReplayPolicy, self).__init__(device, app)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.replay_output = replay_output\n\n import os\n event_dir = os.path.join(replay_output, \"events\")\n self.event_paths = sorted([os.path.join(event_dir, x) for x in\n next(os.walk(event_dir))[2]\n if x.endswith(\".json\")])\n # skip HOME and start app intent\n self.device = device\n self.app = app\n self.event_idx = 2\n self.num_replay_tries = 0\n self.utg = UTG(device=device, app=app, random_input=None)\n self.last_event = None\n self.last_state = None\n self.current_state = None\n\n def generate_event(self):\n \"\"\"\n generate an event based on replay_output\n @return: InputEvent\n \"\"\"\n import time\n while self.event_idx < len(self.event_paths) and \\\n self.num_replay_tries < MAX_REPLY_TRIES:\n self.num_replay_tries += 1\n current_state = self.device.get_current_state()\n if current_state is None:\n time.sleep(5)\n self.num_replay_tries = 0\n return KeyEvent(name=\"BACK\")\n\n curr_event_idx = self.event_idx\n self.__update_utg()\n while curr_event_idx < len(self.event_paths):\n event_path = self.event_paths[curr_event_idx]\n with open(event_path, \"r\") as f:\n curr_event_idx += 1\n\n try:\n event_dict = json.load(f)\n except Exception as e:\n self.logger.info(\"Loading %s failed\" % event_path)\n continue\n\n if event_dict[\"start_state\"] != current_state.state_str:\n continue\n if not self.device.is_foreground(self.app):\n # if current app is in background, bring it to foreground\n component = self.app.get_package_name()\n if self.app.get_main_activity():\n component += \"/%s\" % self.app.get_main_activity()\n return IntentEvent(Intent(suffix=component))\n \n self.logger.info(\"Replaying %s\" % event_path)\n self.event_idx = curr_event_idx\n self.num_replay_tries = 0\n # return InputEvent.from_dict(event_dict[\"event\"])\n event = InputEvent.from_dict(event_dict[\"event\"])\n self.last_state = self.current_state\n self.last_event = event\n return event \n\n time.sleep(5)\n\n # raise InputInterruptedException(\"No more record can be replayed.\")\n def __update_utg(self):\n self.utg.add_transition(self.last_event, self.last_state, self.current_state)" }, { "identifier": "ManualPolicy", "path": "droidbot/input_policy.py", "snippet": "class ManualPolicy(UtgBasedInputPolicy):\n \"\"\"\n manually explore UFG\n \"\"\"\n\n def __init__(self, device, app):\n super(ManualPolicy, self).__init__(device, app, False)\n self.logger = logging.getLogger(self.__class__.__name__)\n\n self.__first_event = True\n\n def generate_event_based_on_utg(self):\n \"\"\"\n generate an event based on current UTG\n @return: InputEvent\n \"\"\"\n if self.__first_event:\n self.__first_event = False\n self.logger.info(\"Trying to start the app...\")\n start_app_intent = self.app.get_start_intent()\n return IntentEvent(intent=start_app_intent)\n else:\n return ManualEvent()" }, { "identifier": "TaskPolicy", "path": "droidbot/input_policy.py", "snippet": "class TaskPolicy(UtgBasedInputPolicy):\n\n def __init__(self, device, app, random_input, task, use_memory=True, debug_mode=False):\n super(TaskPolicy, self).__init__(device, app, random_input)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.task = task\n\n self.__nav_target = None\n self.__nav_num_steps = -1\n self.__num_restarts = 0\n self.__num_steps_outside = 0\n self.__event_trace = \"\"\n self.__missed_states = set()\n self.__random_explore = random_input\n self.__action_history = []\n self.__thought_history = []\n self.use_memory = use_memory\n # if use_memory:\n # self.memory = Memory(app_name=self.app.app_name, app_output_path=self.device.output_dir)\n if self.use_memory:\n self.similar_ele_path, self.similar_ele_function, self.similar_ele_statement = self.get_most_similar_element()\n if not self.similar_ele_function:\n self.use_memory = False\n print('=============\\nWarning: Did not find the memory of this app, the app memory is disabled\\n=============')\n else:\n print(f'============\\nFound element: {self.similar_ele_statement}\\nPath: {self.similar_ele_path}\\nFunction: {self.similar_ele_function}\\n============')\n self.state_ele_memory = {} # memorize some important states that contain elements of insight\n\n def get_most_similar_element(self):\n from InstructorEmbedding import INSTRUCTOR\n from sklearn.metrics.pairwise import cosine_similarity\n import numpy as np\n model = INSTRUCTOR('hkunlp/instructor-xl')\n task_embedding = model.encode('task: ' + self.task).reshape(1, -1)\n\n with open('memory/node_filtered_elements.json') as file:\n ele_statements = json.load(file)\n with open('memory/element_description.json') as file:\n ele_functions = json.load(file)\n with open('memory/embedded_elements_desc.json') as file:\n embeddings = json.load(file)\n app_name = self.device.output_dir.split('/')[-1]\n if app_name not in embeddings.keys():\n return None, None, None\n app_embeddings = embeddings[app_name]\n\n # similarities = {}\n max_similarity, similar_ele_idx = -9999, -9999\n for state_str, elements in app_embeddings.items():\n # if the target element is in the first ui, no onclick is needed\n # if ele_statements[app_name][state_str]['path'] == []:\n # continue\n # similarities[state_str] = []\n for idx, ele in enumerate(elements):\n if ele:\n npele = np.array(ele).reshape(1, -1)\n similarity = cosine_similarity(task_embedding, npele)[0][0]\n else:\n similarity = -9999\n # similarities[state_str].append(similarity)\n if similarity > max_similarity:\n max_similarity = similarity\n similar_ele_idx = idx\n similar_state_str = state_str\n\n similar_ele = ele_statements[app_name][similar_state_str]['elements'][similar_ele_idx]\n similar_ele_path = ele_statements[app_name][similar_state_str]['path']\n similar_ele_desc = ele_functions[app_name][similar_state_str][similar_ele_idx]\n del model\n return similar_ele_path, similar_ele_desc, similar_ele\n \n def _scroll_to_top(self, scroller, all_views_for_mark, old_state=None):\n prefix_scroll_event = []\n if old_state is None:\n old_state = self.current_state \n for _ in range(MAX_SCROLL_NUM): # first scroll up to the top\n self.device.send_event(ScrollEvent(view=scroller, direction=\"UP\"))\n scrolled_state = self.device.get_current_state()\n self.utg.add_transition(ScrollEvent(view=scroller, direction=\"UP\"), old_state, scrolled_state)\n old_state = scrolled_state\n state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions()\n scrolled_new_views = [] # judge whether there is a new view after scrolling\n for scrolled_view in scrolled_views:\n if scrolled_view not in all_views_for_mark:\n scrolled_new_views.append(scrolled_view)\n all_views_for_mark.append(scrolled_view)\n if len(scrolled_new_views) == 0:\n break\n\n prefix_scroll_event.append(ScrollEvent(view=scroller, direction=\"UP\"))\n return prefix_scroll_event\n\n\n def generate_event_based_on_utg(self, input_manager):\n \"\"\"\n generate an event based on current UTG\n @return: InputEvent\n \"\"\"\n current_state = self.current_state\n self.logger.info(\"Current state: %s\" % current_state.state_str)\n if current_state.state_str in self.__missed_states:\n self.__missed_states.remove(current_state.state_str)\n\n if current_state.get_app_activity_depth(self.app) < 0:\n # If the app is not in the activity stack\n start_app_intent = self.app.get_start_intent()\n\n # It seems the app stucks at some state, has been\n # 1) force stopped (START, STOP)\n # just start the app again by increasing self.__num_restarts\n # 2) started at least once and cannot be started (START)\n # pass to let viewclient deal with this case\n # 3) nothing\n # a normal start. clear self.__num_restarts.\n\n if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \\\n or self.__event_trace.endswith(EVENT_FLAG_START_APP):\n self.__num_restarts += 1\n self.logger.info(\"The app had been restarted %d times.\", self.__num_restarts)\n else:\n self.__num_restarts = 0\n\n # pass (START) through\n if not self.__event_trace.endswith(EVENT_FLAG_START_APP):\n if self.__num_restarts > MAX_NUM_RESTARTS:\n # If the app had been restarted too many times, enter random mode\n msg = \"The app had been restarted too many times. Entering random mode.\"\n self.logger.info(msg)\n self.__random_explore = True\n else:\n # Start the app\n self.__event_trace += EVENT_FLAG_START_APP\n self.logger.info(\"Trying to start the app...\")\n # self.__action_history = [f'- start the app {self.app.app_name}']\n self.__action_history = [f'- launchApp {self.app.app_name}']\n self.__thought_history = [f'launch the app {self.app.app_name} to finish the task {self.task}']\n return None, IntentEvent(intent=start_app_intent)\n\n elif current_state.get_app_activity_depth(self.app) > 0:\n # If the app is in activity stack but is not in foreground\n self.__num_steps_outside += 1\n\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:\n # If the app has not been in foreground for too long, try to go back\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:\n stop_app_intent = self.app.get_stop_intent()\n go_back_event = IntentEvent(stop_app_intent)\n else:\n go_back_event = KeyEvent(name=\"BACK\")\n self.__event_trace += EVENT_FLAG_NAVIGATE\n self.logger.info(\"Going back to the app...\")\n self.__action_history.append('- go back')\n self.__thought_history.append('the app has not been in foreground for too long, try to go back')\n return None, go_back_event\n else:\n # If the app is in foreground\n self.__num_steps_outside = 0\n \n \n scrollable_views = current_state.get_scrollable_views()#self._get_scrollable_views(current_state)\n \n if len(scrollable_views) > 0:\n '''\n if there is at least one scroller in the screen, we scroll each scroller many times until all the screens after scrolling have been recorded, you do not need to read\n '''\n # print(scrollable_views)\n\n actions_dict = {}\n whole_state_views, whole_state_actions, whole_state_strs = [], [], []\n\n # state_strs = [current_state.state_str]\n state_prompt, current_candidate_actions, current_views, _ = current_state.get_described_actions()\n all_views_for_mark = copy.deepcopy(current_views) # just for judging whether the screen has been scrolled up to the top\n\n for scrollerid in range(len(scrollable_views)):\n scroller = scrollable_views[scrollerid]\n # prefix_scroll_event = []\n actions_dict[scrollerid] = []\n\n prefix_scroll_event = self._scroll_to_top(scroller, all_views_for_mark)\n \n # after scrolling to the top, update the current_state\n top_state = self.device.get_current_state()\n state_prompt, top_candidate_actions, top_views, _ = top_state.get_described_actions()\n all_views_without_id, all_actions = top_views, top_candidate_actions\n\n too_few_item_time = 0\n\n for _ in range(MAX_SCROLL_NUM): # then scroll down to the bottom\n whole_state_strs.append(top_state.state_str) # record the states from the top to the bottom\n self.device.send_event(ScrollEvent(view=scroller, direction=\"DOWN\"))\n scrolled_state = self.device.get_current_state()\n state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions()\n \n scrolled_new_views = []\n for scrolled_view_id in range(len(scrolled_views)):\n scrolled_view = scrolled_views[scrolled_view_id]\n if scrolled_view not in all_views_without_id:\n scrolled_new_views.append(scrolled_view)\n all_views_without_id.append(scrolled_view)\n all_actions.append(prefix_scroll_event + [ScrollEvent(view=scroller, direction=\"DOWN\"), scrolled_candidate_actions[scrolled_view_id]])\n # print('found new views:', scrolled_new_views)\n if len(scrolled_new_views) == 0:\n break\n \n prefix_scroll_event.append(ScrollEvent(view=scroller, direction=\"DOWN\"))\n\n if len(scrolled_new_views) < 2:\n too_few_item_time += 1\n if too_few_item_time >= 2:\n break\n\n self.utg.add_transition(ScrollEvent(view=scroller, direction=\"DOWN\"), top_state, scrolled_state)\n top_state = scrolled_state\n \n # filter out the views that have been added to the whole_state by scrolling other scrollers\n for all_view_id in range(len(all_views_without_id)):\n view = all_views_without_id[all_view_id]\n if view not in whole_state_views:\n whole_state_views.append(view)\n whole_state_actions.append(all_actions[all_view_id])\n \n all_views_for_mark = []\n _ = self._scroll_to_top(scroller, all_views_for_mark, top_state)\n # print(whole_state_views)\n action, candidate_actions, target_view, thought = self._get_action_from_views_actions(\n views=whole_state_views, candidate_actions=whole_state_actions, state_strs=whole_state_strs, action_history=self.__action_history, thought_history=self.__thought_history)\n\n if isinstance(action, list): # the screen has to be scrolled first\n last_state = None\n for eventid in range(len(action) - 1):\n self.device.send_event(action[eventid])\n last_state = self.device.get_current_state()\n # self.__action_history.append(current_state.get_action_desc(action[eventid]))\n self.__action_history.append(current_state.get_action_descv2(action[-1], target_view))\n self.__thought_history.append(thought)\n return last_state, action[-1]\n '''\n end for dealing with scrollers\n '''\n else:\n action, candidate_actions, target_view, thought = self._get_action_from_views_actions(\n current_state=current_state, action_history=self.__action_history, thought_history=self.__thought_history, state_strs=current_state.state_str)\n \n if action == FINISHED:\n return None, FINISHED\n if action is not None:\n self.__action_history.append(current_state.get_action_descv2(action, target_view))\n self.__thought_history.append(thought)\n return None, action\n\n if self.__random_explore:\n self.logger.info(\"Trying random event.\")\n action = random.choice(candidate_actions)\n self.__action_history.append(current_state.get_action_descv2(action, target_view))\n self.__thought_history.append('random trying')\n return None, action\n\n # If couldn't find a exploration target, stop the app\n stop_app_intent = self.app.get_stop_intent()\n self.logger.info(\"Cannot find an exploration target. Trying to restart app...\")\n self.__action_history.append('- stop the app')\n self.__thought_history.append(\"couldn't find a exploration target, stop the app\")\n self.__event_trace += EVENT_FLAG_STOP_APP\n return None, IntentEvent(intent=stop_app_intent)\n \n def _save2yaml(self, file_name, state_prompt, idx, state_str, inputs='null'):\n if not os.path.exists(file_name):\n tmp_data = {\n 'task_name': self.task,\n 'step_num': 0,\n 'records': []\n }\n with open(file_name, 'w', encoding='utf-8') as f:\n yaml.dump(tmp_data, f)\n\n with open(file_name, 'r', encoding='utf-8') as f:\n old_yaml_data = yaml.safe_load(f)\n \n new_records = old_yaml_data['records']\n new_records.append(\n {'State': state_prompt,\n 'Choice': idx,\n 'Input': inputs,\n 'state_str': state_str}\n )\n # import pdb;pdb.set_trace()\n data = {\n 'task_name': self.task,\n 'step_num': len(list(old_yaml_data['records'])),\n 'records': new_records\n }\n with open(file_name, 'w', encoding='utf-8') as f:\n yaml.dump(data, f)\n def _make_prompt_lmql(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):\n if self.use_memory:\n # if isinstance(state_str, list):\n # if len(state_str) == 1:\n # state_str = state_str[0]\n # else:\n # state_str = self.memory.hash_state(state_prompt)\n # new_state_prompt = self.f(action_history, state_prompt, state_str)\n # if new_state_prompt !z= None and new_state_prompt != 'no_description':\n # state_prompt = new_state_prompt\n if len(action_history) <= len(self.similar_ele_path):\n current_ui_id = len(action_history) - 1\n new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)\n if new_state_prompt != state_prompt: # current state contains an element of insight\n self.state_ele_memory[state_str] = new_state_prompt\n state_prompt = new_state_prompt\n # elif state_str in self.state_ele_memory.keys():\n # state_prompt = self.state_ele_memory[state_str]\n\n if use_thoughts:\n history_with_thought = []\n for idx in range(len(action_history)):\n history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])\n else:\n history_with_thought = action_history\n\n\n return '\\n'.join(history_with_thought),state_prompt\n def _make_prompt(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):\n if self.use_memory:\n # if isinstance(state_str, list):\n # if len(state_str) == 1:\n # state_str = state_str[0]\n # else:\n # state_str = self.memory.hash_state(state_prompt)\n # new_state_prompt = self.f(action_history, state_prompt, state_str)\n # if new_state_prompt !z= None and new_state_prompt != 'no_description':\n # state_prompt = new_state_prompt\n if len(action_history) <= len(self.similar_ele_path):\n current_ui_id = len(action_history) - 1\n new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)\n if new_state_prompt != state_prompt: # current state contains an element of insight\n self.state_ele_memory[state_str] = new_state_prompt\n state_prompt = new_state_prompt\n # elif state_str in self.state_ele_memory.keys():\n # state_prompt = self.state_ele_memory[state_str]\n\n if use_thoughts:\n history_with_thought = []\n for idx in range(len(action_history)):\n history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])\n else:\n history_with_thought = action_history\n\n introduction = '''You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.'''\n task_prompt = 'Task: ' + self.task\n history_prompt = 'Previous UI actions: \\n' + '\\n'.join(history_with_thought)\n full_state_prompt = 'Current UI state: \\n' + state_prompt\n request_prompt = '''Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>'''\n prompt = introduction + '\\n' + task_prompt + '\\n' + history_prompt + '\\n' + full_state_prompt + '\\n' + request_prompt\n return prompt\n \n def _extract_input_text(self, string, start='Text: ', end=' Thought'):\n start_index = string.find(start) + len(start) # Find the location of 'start'\n if start_index == -1:\n start_index = 0\n end_index = string.find(end) # Find the location of 'end'\n substring = string[start_index:end_index] if end_index != -1 else string[start_index:]\n return substring\n \n def _extract_input_textv2(self, string):\n if string[:11] == 'InputText: ':\n return string[11:]\n else:\n return string\n \n def _get_text_view_description(self, view):\n content_description = safe_dict_get(view, 'content_description', default='')\n view_text = safe_dict_get(view, 'text', default='')\n\n view_desc = f\"<input class='&'>#</input>\"#.replace('&', view_class)#.replace('#', text)\n if view_text:\n view_desc = view_desc.replace('#', view_text)\n else:\n view_desc = view_desc.replace('#', '')\n if content_description:\n view_desc = view_desc.replace('&', content_description)\n else:\n view_desc = view_desc.replace(\" class='&'\", \"\")\n return view_desc\n\n def _get_action_from_views_actions(self, action_history, thought_history, views=None, candidate_actions=None, state_strs=None, current_state=None):\n '''\n get action choice from LLM based on a list of views and corresponding actions\n '''\n if current_state:\n state_prompt, candidate_actions, _, _ = current_state.get_described_actions()\n state_str = current_state.state_str\n if USE_LMQL:\n history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,\n thought_history=thought_history) \n else:\n prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)\n else:\n views_with_id = []\n for id in range(len(views)):\n views_with_id.append(tools.insert_id_into_view(views[id], id))\n state_prompt = '\\n'.join(views_with_id)\n state_str = tools.hash_string(state_prompt)\n if USE_LMQL:\n history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,\n thought_history=thought_history) \n else:\n prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)\n\n # ids = [str(idx) for idx, i in enumerate(candidate_actions)]\n ids = str([i for i in range(len(candidate_actions))])\n \n if USE_LMQL:\n idx, action_type, input_text=prompt_llm_with_history(task=self.task, history=history, ui_desc=state_prompt, ids=ids)\n else:\n print('********************************** prompt: **********************************')\n print(prompt)\n print('********************************** end of prompt **********************************')\n response = tools.query_gpt(prompt)\n \n print(f'response: {response}')\n idx, action_type, input_text = tools.extract_action(response)\n\n file_name = self.device.output_dir +'/'+ self.task.replace('\"', '_').replace(\"'\", '_') + '.yaml' #str(str(time.time()).replace('.', ''))\n idx = int(idx)\n selected_action = candidate_actions[idx]\n \n selected_view_description = tools.get_item_properties_from_id(ui_state_desc=state_prompt, view_id=idx)\n thought = ''# tools.get_thought(response)\n\n if isinstance(selected_action, SetTextEvent):\n if input_text != \"N/A\" and input_text != None:\n selected_action.text = input_text.replace('\"', '').replace(' ', '-')\n if len(selected_action.text) > 30: # heuristically disable long text input\n selected_action.text = ''\n else:\n selected_action.text = ''\n self._save2yaml(file_name, state_prompt, idx, state_strs, inputs=selected_action.text)\n else:\n self._save2yaml(file_name, state_prompt, idx, state_strs, inputs='null')\n return selected_action, candidate_actions, selected_view_description, thought\n\n def _insert_predictions_into_state_prompt(self, state_prompt, current_state_item_descriptions):\n state_prompt_list = state_prompt.split('>\\n')\n item_list = []\n for view_desc in state_prompt_list:\n if view_desc[0] == ' ':\n view_desc = view_desc[1:]\n if view_desc[-1] != '>':\n view_desc = view_desc + '>'\n view_desc_without_id = tools.get_view_without_id(view_desc)\n if view_desc_without_id in current_state_item_descriptions.keys():\n prediction = 'title=' + current_state_item_descriptions[view_desc_without_id]\n view_desc_list = view_desc.split(' ', 2)\n if len(view_desc_list) > 2: # for example, <button id=3 class='More options' checked=False></button>\n inserted_view = view_desc_list[0] + ' ' + view_desc_list[1] + ' ' + prediction + ' ' + view_desc_list[2]\n else: # for example, <p id=4>June</p>\n latter_part = view_desc_list[1].split('>', 1)\n inserted_view = view_desc_list[0] + ' ' + latter_part[0] + ' ' + prediction + '>' + latter_part[1]\n if inserted_view[-1] != '>':\n inserted_view += '>'\n item_list.append(inserted_view)\n else:\n item_list.append(view_desc)\n return '\\n'.join(item_list)\n\n def _get_item_prediction(self, action_history, state_prompt, state_str):\n '''\n find the most match history_state in memory_graph based on action_history. \n match the current items in device_state with the history items in history_state, \n return the predicted screen after touching the item\n if can not find the device_state not in action_history, return None, can decide whether to explore\n '''\n def parse_history_views(history):\n parsed_views = []\n for history_action in history:\n history_action_list = history_action.split(': ', 1)\n if 'launchApp' in history_action:\n return []\n latter_part = history_action_list[1]\n if ' InputText:' in latter_part:\n target_view = latter_part.split(' InputText:', 1)[0]\n elif ' Reason:' in latter_part:\n target_view = latter_part.split(' Reason:', 1)[0]\n else:\n target_view = latter_part\n parsed_views.append(target_view)\n return parsed_views\n \n action_history = parse_history_views(action_history[1:]) # ignore the first action, which is launching the app\n \n # search the current state str in memory based on history actions\n current_state_str = self.memory.get_first_state_str()\n next_state_str = None\n for actionid in range(0, len(action_history)):\n actioned_view = action_history[actionid] #action_history[actionid].rsplit('.', 1)[0]\n next_state_str = self.memory.get_successor_by_node_edge(current_state_str, actioned_view)\n current_state_str = next_state_str\n # the past actions have lead to a state that does not exist in the memory\n if next_state_str == None:\n break\n if next_state_str == None:\n current_state_str = state_str\n # now, current_state_str is the current device state string, we should add all its successors' information into the items on this device state\n current_state_item_descriptions = self.memory.get_predictions_of_items(current_state_str)\n # import pdb;pdb.set_trace()\n if current_state_item_descriptions is None:\n return 'no_description' # there is no description of the current state, either it is the leaf node or it was not explored\n # import pdb;pdb.set_trace()\n return self._insert_predictions_into_state_prompt(state_prompt, current_state_item_descriptions)" }, { "identifier": "POLICY_NAIVE_DFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_NAIVE_DFS = \"dfs_naive\"" }, { "identifier": "POLICY_GREEDY_DFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_GREEDY_DFS = \"dfs_greedy\"" }, { "identifier": "POLICY_NAIVE_BFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_NAIVE_BFS = \"bfs_naive\"" }, { "identifier": "POLICY_GREEDY_BFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_GREEDY_BFS = \"bfs_greedy\"" }, { "identifier": "POLICY_REPLAY", "path": "droidbot/input_policy.py", "snippet": "POLICY_REPLAY = \"replay\"" }, { "identifier": "POLICY_MEMORY_GUIDED", "path": "droidbot/input_policy.py", "snippet": "POLICY_MEMORY_GUIDED = \"memory_guided\" # implemented in input_policy2" }, { "identifier": "POLICY_MANUAL", "path": "droidbot/input_policy.py", "snippet": "POLICY_MANUAL = \"manual\"" }, { "identifier": "POLICY_MONKEY", "path": "droidbot/input_policy.py", "snippet": "POLICY_MONKEY = \"monkey\"" }, { "identifier": "POLICY_NONE", "path": "droidbot/input_policy.py", "snippet": "POLICY_NONE = \"none\"" }, { "identifier": "POLICY_TASK", "path": "droidbot/input_policy.py", "snippet": "POLICY_TASK = \"task\"" } ]
import json import logging import subprocess import time from .input_event import EventLog from .input_policy import UtgBasedInputPolicy, UtgNaiveSearchPolicy, UtgGreedySearchPolicy, \ UtgReplayPolicy, \ ManualPolicy, TaskPolicy, \ POLICY_NAIVE_DFS, POLICY_GREEDY_DFS, \ POLICY_NAIVE_BFS, POLICY_GREEDY_BFS, \ POLICY_REPLAY, POLICY_MEMORY_GUIDED, \ POLICY_MANUAL, POLICY_MONKEY, POLICY_NONE, POLICY_TASK from .input_script import DroidBotScript from .input_policy2 import MemoryGuidedPolicy
13,374
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master):
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master):
if self.policy_name == POLICY_NONE:
15
2023-10-23 03:32:58+00:00
16k
f0uriest/interpax
tests/test_interpolate.py
[ { "identifier": "fft_interp1d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=\"n\")\ndef fft_interp1d(f: jax.Array, n: int, sx: jax.Array = None, dx: float = 1.0):\n \"\"\"Interpolation of a 1d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n : int\n Number of desired interpolation points.\n sx : ndarray or None\n Shift in x to evaluate at. If original data is f(x), interpolates to f(x + sx)\n dx : float\n Spacing of source points\n\n Returns\n -------\n fi : ndarray, shape(n, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft(f, axis=0)\n nx = c.shape[0]\n if sx is not None:\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n c = (c[None].T * sx).T\n c = jnp.moveaxis(c, 0, -1)\n pad = ((n - nx) // 2, n - nx - (n - nx) // 2)\n if nx % 2 != 0:\n pad = pad[::-1]\n c = jnp.fft.ifftshift(_pad_along_axis(jnp.fft.fftshift(c, axes=0), pad, axis=0))\n return jnp.fft.fft(c, axis=0).real" }, { "identifier": "fft_interp2d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=(\"n1\", \"n2\"))\ndef fft_interp2d(\n f: jax.Array,\n n1: int,\n n2: int,\n sx: jax.Array = None,\n sy: jax.Array = None,\n dx: float = 1.0,\n dy: float = 1.0,\n):\n \"\"\"Interpolation of a 2d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ny, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n1, n2 : int\n Number of desired interpolation points in x and y directions\n sx, sy : ndarray or None\n Shift in x and y to evaluate at. If original data is f(x,y), interpolates to\n f(x + sx, y + sy). Both must be provided or None\n dx, dy : float\n Spacing of source points in x and y\n\n Returns\n -------\n fi : ndarray, shape(n1, n2, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft2(f, axes=(0, 1))\n nx, ny = c.shape[:2]\n if (sx is not None) and (sy is not None):\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n sy = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(ny)[:, None] * sy / dy)\n c = (c[None].T * sx[None, :, :] * sy[:, None, :]).T\n c = jnp.moveaxis(c, 0, -1)\n padx = ((n1 - nx) // 2, n1 - nx - (n1 - nx) // 2)\n pady = ((n2 - ny) // 2, n2 - ny - (n2 - ny) // 2)\n if nx % 2 != 0:\n padx = padx[::-1]\n if ny % 2 != 0:\n pady = pady[::-1]\n\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=0), padx, axis=0), axes=0\n )\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=1), pady, axis=1), axes=1\n )\n\n return jnp.fft.fft2(c, axes=(0, 1)).real" }, { "identifier": "Interpolator1D", "path": "interpax/_spline.py", "snippet": "class Interpolator1D(eqx.Module):\n \"\"\"Convenience class for representing a 1D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n ):\n x, f = map(jnp.asarray, (x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n\n self.derivs = {\"fx\": fx}\n\n def __call__(self, xq: jax.Array, dx: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n Query points where interpolation is desired\n dx : int >= 0\n Derivative to take.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp1d(\n xq,\n self.x,\n self.f,\n self.method,\n dx,\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator2D", "path": "interpax/_spline.py", "snippet": "class Interpolator2D(eqx.Module):\n \"\"\"Convenience class for representing a 2D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, f = map(jnp.asarray, (x, y, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.y = y\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n\n self.derivs = {\"fx\": fx, \"fy\": fy, \"fxy\": fxy}\n\n def __call__(self, xq: jax.Array, yq: jax.Array, dx: int = 0, dy: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq : ndarray, shape(Nq,)\n x, y query points where interpolation is desired\n dx, dy : int >= 0\n Derivative to take in x, y directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp2d(\n xq,\n yq,\n self.x,\n self.y,\n self.f,\n self.method,\n (dx, dy),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator3D", "path": "interpax/_spline.py", "snippet": "class Interpolator3D(eqx.Module):\n \"\"\"Convenience class for representing a 3D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n z: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, z, f = map(jnp.asarray, (x, y, z, f))\n axis = kwargs.get(\"axis\", 0)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n self.x = x\n self.y = y\n self.z = z\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n\n self.derivs = {\n \"fx\": fx,\n \"fy\": fy,\n \"fz\": fz,\n \"fxy\": fxy,\n \"fxz\": fxz,\n \"fyz\": fyz,\n \"fxyz\": fxyz,\n }\n\n def __call__(\n self,\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n dx: int = 0,\n dy: int = 0,\n dz: int = 0,\n ):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq, zq : ndarray, shape(Nq,)\n x, y, z query points where interpolation is desired\n dx, dy, dz : int >= 0\n Derivative to take in x, y, z directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp3d(\n xq,\n yq,\n zq,\n self.x,\n self.y,\n self.z,\n self.f,\n self.method,\n (dx, dy, dz),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "interp1d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp1d(\n xq: jax.Array,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 1d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n query points where interpolation is desired\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n derivative : int >= 0\n derivative order to calculate\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, f data, recommend using Interpolator1D\n which caches the calculation of the derivatives and spline coefficients.\n\n \"\"\"\n xq, x, f = map(jnp.asarray, (xq, x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n outshape = xq.shape + f.shape[1:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq = jnp.atleast_1d(xq)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n lowx, highx = _parse_extrap(extrap, 1)\n\n if period is not None:\n xq, x, f, fx = _make_periodic(xq, x, period, axis, f, fx)\n lowx = highx = True\n\n if method == \"nearest\":\n\n def derivative0():\n i = jnp.argmin(jnp.abs(xq[:, np.newaxis] - x[np.newaxis]), axis=1)\n return f[i]\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1])\n\n elif method == \"linear\":\n\n def derivative0():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n delta = xq - x[i - 1]\n fq = jnp.where(\n (dx == 0),\n jnp.take(f, i, axis).T,\n jnp.take(f, i - 1, axis).T + (delta * dxi * df.T),\n ).T\n return fq\n\n def derivative1():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n return (df.T * dxi).T\n\n def derivative2():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1, derivative2])\n\n elif method in (CUBIC_METHODS + (\"monotonic\", \"monotonic-0\")):\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n assert fx.shape == f.shape\n\n dx = x[i] - x[i - 1]\n delta = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n t = delta * dxi\n\n f0 = jnp.take(f, i - 1, axis)\n f1 = jnp.take(f, i, axis)\n fx0 = (jnp.take(fx, i - 1, axis).T * dx).T\n fx1 = (jnp.take(fx, i, axis).T * dx).T\n\n F = jnp.stack([f0, f1, fx0, fx1], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_CUBIC, F).T\n ttx = _get_t_der(t, derivative, dxi)\n fq = jnp.einsum(\"ji...,ij->i...\", coef, ttx)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n return fq.reshape(outshape)" }, { "identifier": "interp2d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp2d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 2d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0 or array-like, shape(2,)\n derivative order to calculate in x, y. Use a single value for the same in both\n directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, f data, recommend using\n Interpolator2D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, x, y, f = map(jnp.asarray, (xq, yq, x, y, f))\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n xq, yq = jnp.broadcast_arrays(xq, yq)\n outshape = xq.shape + f.shape[2:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq = map(jnp.atleast_1d, (xq, yq))\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n periodx, periody = _parse_ndarg(period, 2)\n derivative_x, derivative_y = _parse_ndarg(derivative, 2)\n lowx, highx, lowy, highy = _parse_extrap(extrap, 2)\n\n if periodx is not None:\n xq, x, f, fx, fy, fxy = _make_periodic(xq, x, periodx, 0, f, fx, fy, fxy)\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fxy = _make_periodic(yq, y, periody, 1, f, fx, fy, fxy)\n lowy = highy = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 4 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n neighbors_x = jnp.array(\n [[x[i], x[i - 1], x[i], x[i - 1]], [y[j], y[j], y[j - 1], y[j - 1]]]\n )\n neighbors_f = jnp.array(\n [f[i, j].T, f[i - 1, j].T, f[i, j - 1].T, f[i - 1, j - 1].T]\n )\n xyq = jnp.array([xq, yq])\n dist = jnp.linalg.norm(neighbors_x - xyq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[2:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0), derivative0, derivative1\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n f00 = f[i - 1, j - 1]\n f01 = f[i - 1, j]\n f10 = f[i, j - 1]\n f11 = f[i, j]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n F = jnp.array([[f00, f01], [f10, f11]])\n fq = (dxi * dyi * jnp.einsum(\"ijk...,ik,jk->k...\", F, tx, ty).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n assert fx.shape == fy.shape == fxy.shape == f.shape\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fxy\"] = fxy\n fsq = OrderedDict()\n for ff in fs.keys():\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_BICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, *coef.shape[1:]), order=\"F\"), 2, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n fq = jnp.einsum(\"ijk...,ij,ik->i...\", coef, ttx, tty)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n\n return fq.reshape(outshape)" }, { "identifier": "interp3d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp3d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 3d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n zq : ndarray, shape(Nq,)\n z query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0, array-like, shape(3,)\n derivative order to calculate in x,y,z directions. Use a single value for the\n same in all directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions for\n [[xlow, xhigh],[ylow,yhigh],[zlow,zhigh]]\n period : float > 0, None, array-like, shape(3,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in all directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, z, f data, recommend using\n Interpolator3D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, zq, x, y, z, f = map(jnp.asarray, (xq, yq, zq, x, y, z, f))\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n xq, yq, zq = jnp.broadcast_arrays(xq, yq, zq)\n outshape = xq.shape + f.shape[3:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq, zq = map(jnp.atleast_1d, (xq, yq, zq))\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n periodx, periody, periodz = _parse_ndarg(period, 3)\n derivative_x, derivative_y, derivative_z = _parse_ndarg(derivative, 3)\n lowx, highx, lowy, highy, lowz, highz = _parse_extrap(extrap, 3)\n\n if periodx is not None:\n xq, x, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n xq, x, periodx, 0, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n yq, y, periody, 1, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowy = highy = True\n if periodz is not None:\n zq, z, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n zq, z, periodz, 2, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowz = highz = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 8 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n neighbors_x = jnp.array(\n [\n [x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1]],\n [y[j], y[j], y[j - 1], y[j - 1], y[j], y[j], y[j - 1], y[j - 1]],\n [z[k], z[k], z[k], z[k], z[k - 1], z[k - 1], z[k - 1], z[k - 1]],\n ]\n )\n neighbors_f = jnp.array(\n [\n f[i, j, k].T,\n f[i - 1, j, k].T,\n f[i, j - 1, k].T,\n f[i - 1, j - 1, k].T,\n f[i, j, k - 1].T,\n f[i - 1, j, k - 1].T,\n f[i, j - 1, k - 1].T,\n f[i - 1, j - 1, k - 1].T,\n ]\n )\n xyzq = jnp.array([xq, yq, zq])\n dist = jnp.linalg.norm(neighbors_x - xyzq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[3:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0) & (derivative_z == 0),\n derivative0,\n derivative1,\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n f000 = f[i - 1, j - 1, k - 1]\n f001 = f[i - 1, j - 1, k]\n f010 = f[i - 1, j, k - 1]\n f100 = f[i, j - 1, k - 1]\n f110 = f[i, j, k - 1]\n f011 = f[i - 1, j, k]\n f101 = f[i, j - 1, k]\n f111 = f[i, j, k]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n z0 = z[k - 1]\n z1 = z[k]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n dz = z1 - z0\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n dz0 = lambda: jnp.array([z1 - zq, zq - z0])\n dz1 = lambda: jnp.array([-jnp.ones_like(zq), jnp.ones_like(zq)])\n dz2 = lambda: jnp.zeros((2, zq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n tz = jax.lax.switch(derivative_z, [dz0, dz1, dz2])\n\n F = jnp.array([[[f000, f001], [f010, f011]], [[f100, f101], [f110, f111]]])\n fq = (dxi * dyi * dzi * jnp.einsum(\"lijk...,lk,ik,jk->k...\", F, tx, ty, tz).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n assert (\n fx.shape\n == fy.shape\n == fz.shape\n == fxy.shape\n == fxz.shape\n == fyz.shape\n == fxyz.shape\n == f.shape\n )\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n dz = z[k] - z[k - 1]\n deltaz = zq - z[k - 1]\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n tz = deltaz * dzi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fz\"] = fz\n fs[\"fxy\"] = fxy\n fs[\"fxz\"] = fxz\n fs[\"fyz\"] = fyz\n fs[\"fxyz\"] = fxyz\n fsq = OrderedDict()\n for ff in fs.keys():\n for kk in [0, 1]:\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj) + str(kk)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj, k - 1 + kk]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n if \"z\" in ff:\n fsq[s] = (dz * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_TRICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, 4, *coef.shape[1:]), order=\"F\"), 3, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n ttz = _get_t_der(tz, derivative_z, dzi)\n fq = jnp.einsum(\"lijk...,li,lj,lk->l...\", coef, ttx, tty, ttz)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n fq = _extrap(zq, fq, z, lowz, highz)\n\n return fq.reshape(outshape)" } ]
import jax import jax.numpy as jnp import numpy as np import pytest from jax import config as jax_config from interpax import ( Interpolator1D, Interpolator2D, Interpolator3D, fft_interp1d, fft_interp2d, interp1d, interp2d, interp3d, )
13,170
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp)
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp)
interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs)
5
2023-10-18 13:12:20+00:00
16k
apple/ml-nvas3d
demo/generate_demo_video.py
[ { "identifier": "convolve_moving_receiver", "path": "nvas3d/utils/dynamic_utils.py", "snippet": "def convolve_moving_receiver(\n source_audio: np.ndarray,\n rirs: np.ndarray,\n interp_index: T.List[int],\n interp_weight: T.List[float]\n) -> np.ndarray:\n \"\"\"\n Apply convolution between an audio signal and moving impulse responses (IRs).\n\n Args:\n - source_audio: Source audio of shape (audio_len,)\n - rirs: RIRs of shape (num_positions, num_channels, ir_length)\n - interp_index: Indices representing the start positions for interpolation of shape (audio_len,).\n - interp_weight: Weight values for linear interpolation of shape (audio_len,).\n\n Returns:\n - Convolved audio signal of shape (num_channels, audio_len)\n \"\"\"\n\n num_channels = rirs.shape[1]\n audio_len = source_audio.shape[0]\n\n # Perform convolution for each position and channel\n convolved_audios = oaconvolve(source_audio[None, None, :], rirs, axes=-1)[..., :audio_len]\n\n # NumPy fancy indexing and broadcasting for interpolation\n start_audio = convolved_audios[interp_index, np.arange(num_channels)[:, None], np.arange(audio_len)]\n end_audio = convolved_audios[interp_index + 1, np.arange(num_channels)[:, None], np.arange(audio_len)]\n interp_weight = interp_weight[None, :]\n\n # Apply linear interpolation\n moving_audio = (1 - interp_weight) * start_audio + interp_weight * end_audio\n\n return moving_audio" }, { "identifier": "setup_dynamic_interp", "path": "nvas3d/utils/dynamic_utils.py", "snippet": "def setup_dynamic_interp(\n receiver_position: np.ndarray,\n total_samples: int,\n) -> T.Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Setup moving path with a constant speed for a receiver, given its positions in 3D space.\n\n Args:\n - receiver_position: Receiver positions in 3D space of shape (num_positions, 3).\n - total_samples: Total number of samples in the audio.\n\n Returns:\n - interp_index: Indices representing the start positions for interpolation.\n - interp_weight: Weight values for linear interpolation.\n \"\"\"\n\n # Calculate the number of samples per interval\n distance = np.linalg.norm(np.diff(receiver_position, axis=0), axis=1)\n speed_per_sample = distance.sum() / total_samples\n samples_per_interval = np.round(distance / speed_per_sample).astype(int)\n\n # Distribute rounding errors\n error = total_samples - samples_per_interval.sum()\n for i in np.random.choice(len(samples_per_interval), abs(error)):\n samples_per_interval[i] += np.sign(error)\n\n # Calculate indices and weights for linear interpolation\n interp_index = np.repeat(np.arange(len(distance)), samples_per_interval)\n interp_weight = np.concatenate([np.linspace(0, 1, num, endpoint=False) for num in samples_per_interval])\n\n return interp_index, interp_weight.astype(np.float32)" }, { "identifier": "clip_two", "path": "nvas3d/utils/audio_utils.py", "snippet": "def clip_two(audio1, audio2):\n \"\"\"\n Clips two audio signals to the same length.\n\n Args:\n audio1: First audio signal.\n audio2: Second audio signal.\n\n Returns: \n - Two audio signals of the same length.\n \"\"\"\n\n length_diff = audio1.shape[-1] - audio2.shape[-1]\n\n if length_diff == 0:\n return audio1, audio2\n elif length_diff > 0:\n audio1 = audio1[..., :audio2.shape[-1]]\n elif length_diff < 0:\n audio2 = audio2[..., :audio1.shape[-1]]\n\n return audio1, audio2" }, { "identifier": "clip_all", "path": "nvas3d/utils/audio_utils.py", "snippet": "def clip_all(audio_list):\n \"\"\"\n Clips all audio signals in a list to the same length.\n\n Args: \n audio_list: List of audio signals.\n\n Returns: \n - List of audio signals of the same length.\n \"\"\"\n\n min_length = min(audio.shape[-1] for audio in audio_list)\n clipped_audio_list = []\n for audio in audio_list:\n clipped_audio = audio[..., :min_length]\n clipped_audio_list.append(clipped_audio)\n\n return clipped_audio_list" }, { "identifier": "create_scene", "path": "soundspaces_nvas3d/utils/ss_utils.py", "snippet": "def create_scene(room: str,\n receiver_position: T.Tuple[float, float, float] = [0.0, 0.0, 0.0],\n sample_rate: float = 48000,\n image_size: T.Tuple[int, int] = (512, 256),\n include_visual_sensor: bool = True,\n hfov: float = 90.0\n ) -> Scene:\n \"\"\"\n Create a soundspaces scene to render IR.\n \"\"\"\n\n # Note: Make sure mp3d room is downloaded\n with suppress_stdout_and_stderr():\n # Create a receiver\n receiver = Receiver(\n position=receiver_position,\n rotation=0,\n sample_rate=sample_rate\n )\n\n scene = Scene(\n room,\n [None], # placeholder for source class\n receiver=receiver,\n include_visual_sensor=include_visual_sensor,\n add_source_mesh=False,\n device=torch.device('cpu'),\n add_source=False,\n image_size=image_size,\n hfov=hfov\n )\n\n return scene" }, { "identifier": "render_rir_parallel", "path": "soundspaces_nvas3d/utils/ss_utils.py", "snippet": "def render_rir_parallel(room_list: T.List[str],\n source_position_list: T.List[T.Tuple[float, float, float]],\n receiver_position_list: T.List[T.Tuple[float, float, float]],\n filename_list: T.List[str] = None,\n receiver_rotation_list: T.List[float] = None,\n batch_size: int = 64,\n sample_rate: float = 48000,\n use_default_material: bool = False,\n channel_type: str = 'Ambisonics',\n channel_order: int = 1\n ) -> T.List[torch.Tensor]:\n \"\"\"\n Run render_ir parallely for all elements of zip(source_position_list, receiver_position_list).\n \"\"\"\n\n assert len(room_list) == len(source_position_list)\n assert len(source_position_list) == len(receiver_position_list)\n\n if filename_list is None:\n is_return = True\n else:\n is_return = False\n\n if receiver_rotation_list is None:\n receiver_rotation_list = [0] * len(receiver_position_list)\n\n # Note: Make sure all rooms are downloaded\n\n # Calculate the number of batches\n num_points = len(source_position_list)\n num_batches = (num_points + batch_size - 1) // batch_size\n\n # Use tqdm to display the progress bar\n progress_bar = tqdm(total=num_points)\n\n def update_progress(*_):\n progress_bar.update()\n\n ir_list = []\n # Process the tasks in batches\n for batch_idx in range(num_batches):\n # Calculate the start and end indices of the current batch\n start_idx = batch_idx * batch_size\n end_idx = min(start_idx + batch_size, num_points)\n if is_return:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], None, receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n else:\n batch = [(room_list[i], source_position_list[i], receiver_position_list[i], filename_list[i], receiver_rotation_list[i]) for i in range(start_idx, end_idx)]\n\n # Create a multiprocessing Pool for the current batch\n with multiprocessing.Pool() as pool:\n tasks = []\n for room, source_position, receiver_position, filename, receiver_rotation in batch:\n # Apply async mapping of process_ir function\n task = pool.apply_async(render_ir, args=(room, source_position, receiver_position, filename, receiver_rotation, sample_rate, use_default_material, channel_type, channel_order), callback=update_progress)\n tasks.append(task)\n\n # Wait for all tasks in the batch to complete and collect results\n for task in tasks:\n if is_return:\n ir = task.get() # Block until the result is ready\n ir_list.append(ir) # Append the result to the list\n else:\n task.get()\n if is_return:\n return ir_list" }, { "identifier": "load_room_grid", "path": "soundspaces_nvas3d/utils/aihabitat_utils.py", "snippet": "def load_room_grid(\n room: str,\n grid_distance: float\n) -> T.Dict:\n \"\"\"\n Load grid data for a specified room. If the grid data does not exist, it generates one.\n\n Args:\n - room: Name of the room.\n - grid_distance: The spacing between grid points.\n\n Returns:\n - A dictionary containing grid information for the specified room.\n \"\"\"\n\n grid_distance_str = str(grid_distance).replace(\".\", \"_\")\n dirname_grid = f'data/scene_datasets/metadata/mp3d/grid_{grid_distance_str}'\n filename_grid = f'{dirname_grid}/grid_{room}.npy'\n if not os.path.exists(filename_grid):\n os.makedirs(dirname_grid, exist_ok=True)\n print(f'Computing grid_{room}...')\n from soundspaces_nvas3d.rir_generation.generate_grid import save_xy_grid_points\n grid_info = save_xy_grid_points(room, grid_distance, dirname_grid)\n\n # load grid\n grid_info = np.load(filename_grid, allow_pickle=True).item()\n\n return grid_info" }, { "identifier": "Receiver", "path": "soundspaces_nvas3d/soundspaces_nvas3d.py", "snippet": "class Receiver:\n \"\"\"\n Receiver for SoundSpaces\n \"\"\"\n\n def __init__(self,\n position: T.Tuple[float, float, float],\n rotation: float,\n sample_rate: float = 48000,\n ):\n\n self.position = position\n self.rotation = rotation\n self.sample_rate = sample_rate" }, { "identifier": "Source", "path": "soundspaces_nvas3d/soundspaces_nvas3d.py", "snippet": "class Source:\n \"\"\"\n Source for Soundspaces\n \"\"\"\n\n def __init__(self,\n position: T.Tuple[float, float, float],\n rotation: float,\n dry_sound: str,\n mesh: str,\n device: torch.device\n ):\n\n self.position = position\n self.rotation = rotation\n self.device = device # where to store dry_sound\n self.dry_sound = dry_sound\n self.mesh = mesh" }, { "identifier": "Scene", "path": "soundspaces_nvas3d/soundspaces_nvas3d.py", "snippet": "class Scene:\n \"\"\"\n Soundspaces scene including room, receiver, and source list\n \"\"\"\n\n def __init__(self,\n room: str,\n source_name_list: T.List[str],\n receiver: Receiver = None,\n source_list: T.List[Source] = None,\n include_visual_sensor: bool = True,\n add_source_mesh: bool = True,\n device: torch.device = torch.device('cpu'),\n add_source: bool = True,\n image_size: T.Tuple[int, int] = (512, 256),\n hfov: float = 90.0,\n use_default_material: bool = False,\n channel_type: str = 'Ambisonics',\n channel_order: int = 1\n ):\n\n # Set scene\n self.room = room\n self.n_sources = len(source_name_list)\n assert self.n_sources > 0\n self.receiver = receiver\n self.source_list = source_list\n self.source_current = None\n self.include_visual_sensor = include_visual_sensor\n self.add_source_mesh = add_source_mesh\n self.device = device # where to store IR\n\n # Set channel config for soundspaces\n self.channel = {}\n self.channel['type'] = channel_type\n self.channel['order'] = channel_order\n if channel_type == 'Ambisonics':\n self.channel_count = (self.channel['order'] + 1)**2\n elif channel_type == 'Binaural':\n self.channel_count = 2\n\n # Set aihabitat config for soundspaces\n self.aihabitat = {}\n self.aihabitat['default_agent'] = 0\n self.aihabitat['sensor_height'] = 1.5\n self.aihabitat['height'] = image_size[0]\n self.aihabitat['width'] = image_size[1]\n self.aihabitat['hfov'] = hfov\n\n # Set acoustics config for soundspaces\n self.acoustic_config = {}\n self.acoustic_config['sampleRate'] = 48000\n self.acoustic_config['direct'] = True\n self.acoustic_config['indirect'] = True\n self.acoustic_config['diffraction'] = True\n self.acoustic_config['transmission'] = True\n self.acoustic_config['directSHOrder'] = 5\n self.acoustic_config['indirectSHOrder'] = 3\n self.acoustic_config['unitScale'] = 1\n self.acoustic_config['frequencyBands'] = 32\n self.acoustic_config['indirectRayCount'] = 50000\n\n # Set audio material\n if use_default_material:\n self.audio_material = './data/material/mp3d_material_config_default.json'\n else:\n self.audio_material = './data/material/mp3d_material_config.json'\n\n # Create simulation\n self.create_scene()\n\n # Randomly set source and receiver position\n source_position, source_rotation = None, None\n receiver_position, receiver_rotation = None, None\n\n # Create receiver (inside the room)\n if self.receiver is None:\n # random receiver\n self.create_receiver(receiver_position, receiver_rotation)\n else:\n # input receiver\n self.update_receiver(self.receiver)\n\n if add_source:\n # Create source\n if self.source_list is None:\n # random source\n self.source_list = [None] * self.n_sources\n for source_id, source_name in enumerate(source_name_list):\n self.create_source(source_name, source_id, source_position, source_rotation)\n else:\n # input source\n for source_id, _ in enumerate(source_name_list):\n self.update_source(self.source_list[source_id], source_id)\n\n def create_scene(self):\n \"\"\"\n Given the configuration, create a scene for soundspaces\n \"\"\"\n\n # Set backend configuration\n backend_cfg = habitat_sim.SimulatorConfiguration()\n backend_cfg.scene_id = f'./data/scene_datasets/mp3d/{self.room}/{self.room}.glb'\n backend_cfg.scene_dataset_config_file = './data/scene_datasets/mp3d/mp3d.scene_dataset_config.json'\n backend_cfg.load_semantic_mesh = True\n backend_cfg.enable_physics = False\n\n # Set agent configuration\n agent_config = habitat_sim.AgentConfiguration()\n\n if self.include_visual_sensor:\n # Set color sensor\n rgb_sensor_spec = habitat_sim.CameraSensorSpec()\n rgb_sensor_spec.uuid = \"color_sensor\"\n rgb_sensor_spec.sensor_type = habitat_sim.SensorType.COLOR\n rgb_sensor_spec.resolution = [self.aihabitat['height'], self.aihabitat['width']]\n rgb_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n rgb_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n rgb_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n agent_config.sensor_specifications = [rgb_sensor_spec]\n\n # Set depth sensor\n depth_sensor_spec = habitat_sim.CameraSensorSpec()\n depth_sensor_spec.uuid = \"depth_sensor\"\n depth_sensor_spec.sensor_type = habitat_sim.SensorType.DEPTH\n depth_sensor_spec.resolution = [self.aihabitat[\"height\"], self.aihabitat[\"width\"]]\n depth_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n depth_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n depth_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n agent_config.sensor_specifications.append(depth_sensor_spec)\n\n # # Set semantic sensor\n # semantic_sensor_spec = habitat_sim.CameraSensorSpec()\n # semantic_sensor_spec.uuid = \"semantic_sensor\"\n # semantic_sensor_spec.sensor_type = habitat_sim.SensorType.SEMANTIC\n # semantic_sensor_spec.resolution = [self.aihabitat[\"height\"], self.aihabitat[\"width\"]]\n # semantic_sensor_spec.position = [0.0, self.aihabitat[\"sensor_height\"], 0.0]\n # semantic_sensor_spec.sensor_subtype = habitat_sim.SensorSubType.PINHOLE\n # semantic_sensor_spec.hfov = self.aihabitat[\"hfov\"]\n # agent_config.sensor_specifications.append(semantic_sensor_spec)\n\n # Set simulator configuration\n cfg = habitat_sim.Configuration(backend_cfg, [agent_config])\n\n # Set simulator\n sim = habitat_sim.Simulator(cfg)\n\n # set navmesh path for searching for navigatable points\n navmesh = f'./data/scene_datasets/mp3d/{self.room}/{self.room}.navmesh'\n sim.pathfinder.load_nav_mesh(navmesh)\n\n # seed for navmesh\n sim.seed(random.randint(0, 1024))\n\n # Set simulation\n self.sim = sim\n print('Scene created!')\n\n return self\n\n import torch\n\n def add_audio_sensor(self):\n \"\"\"\n Add audio sensor to the scene\n \"\"\"\n\n # set audio sensor\n audio_sensor_spec = habitat_sim.AudioSensorSpec()\n audio_sensor_spec.uuid = \"audio_sensor\"\n audio_sensor_spec.enableMaterials = True # make sure _semantic.ply file is in the scene folder\n audio_sensor_spec.channelLayout.type = getattr(habitat_sim.sensor.RLRAudioPropagationChannelLayoutType, self.channel['type'])\n audio_sensor_spec.channelLayout.channelCount = self.channel_count # ambisonics\n\n # Set acoustic configuration\n audio_sensor_spec.acousticsConfig.sampleRate = self.acoustic_config['sampleRate']\n audio_sensor_spec.acousticsConfig.direct = self.acoustic_config['direct']\n audio_sensor_spec.acousticsConfig.indirect = self.acoustic_config['indirect']\n audio_sensor_spec.acousticsConfig.diffraction = self.acoustic_config['diffraction']\n audio_sensor_spec.acousticsConfig.transmission = self.acoustic_config['transmission']\n audio_sensor_spec.acousticsConfig.directSHOrder = self.acoustic_config['directSHOrder']\n audio_sensor_spec.acousticsConfig.indirectSHOrder = self.acoustic_config['indirectSHOrder']\n audio_sensor_spec.acousticsConfig.unitScale = self.acoustic_config['unitScale']\n audio_sensor_spec.acousticsConfig.frequencyBands = self.acoustic_config['frequencyBands']\n audio_sensor_spec.acousticsConfig.indirectRayCount = self.acoustic_config['indirectRayCount']\n # audio_sensor_spec.acousticsConfig.maxIRLength = 40.0\n # audio_sensor_spec.acousticsConfig.sourceRayCount = 2000\n # audio_sensor_spec.acousticsConfig.meshSimplification = False\n\n # Initialize receiver\n audio_sensor_spec.position = [0.0, self.aihabitat['sensor_height'], 0.0] # audio sensor has a height of 1.5m\n self.sim.add_sensor(audio_sensor_spec)\n\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioMaterialsJSON(self.audio_material)\n\n return self\n\n def create_receiver(self,\n position: T.Tuple[float, float, float] = None,\n rotation: float = None\n ):\n \"\"\"\n Randomly sample receiver position and rotation\n \"\"\"\n\n if position is None:\n # Randomly set receiver position in the room\n position = self.sim.pathfinder.get_random_navigable_point()\n rotation = random.uniform(0, 360)\n\n # Set sample rate\n sample_rate = self.acoustic_config['sampleRate']\n\n # Set receiver\n receiver = Receiver(position, rotation, sample_rate)\n\n # Update receiver\n self.update_receiver(receiver)\n\n return self\n\n def update_receiver(self,\n receiver: Receiver\n ):\n \"\"\"\n Update receiver\n \"\"\"\n\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.position = np.array(receiver.position + np.array([0, 0.0, 0])) # agent height is already applied in audio_sensor_spec.position\n new_state.rotation = quat_from_angle_axis(math.radians(receiver.rotation), np.array([0, 1.0, 0])) # + -> left\n # new_state.rotation *= quat_from_angle_axis(math.radians(-30), np.array([1.0, 0, 0])) # + -> up\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n self.receiver = receiver # for reference\n\n return self\n\n def update_receiver_position(self,\n receiver_position: T.Tuple[float, float, float]\n ):\n \"\"\"\n Update receiver position\n \"\"\"\n\n self.receiver.position = receiver_position\n\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.position = np.array(receiver_position + np.array([0, 0.0, 0])) # agent height is already applied in audio_sensor_spec.position\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n return self\n\n def create_source(self,\n source_name: str,\n source_id: int,\n position: T.Tuple[float, float, float] = None,\n rotation: float = None\n ):\n \"\"\"\n Set source given the source name, position, and rotation\n \"\"\"\n\n if position is None:\n # Randomly set source position in the room\n position = self.sim.pathfinder.get_random_navigable_point()\n rotation = random.uniform(0, 360) # only for mesh as source sound is omnidirectional\n\n # Randomly set source sound\n dry_sound, mesh = sample_dry_sound_and_mesh(source_name)\n\n # Set source\n source = Source(position, rotation, dry_sound, mesh, device=self.device)\n\n # Save source\n self.update_source(source, source_id)\n\n return self\n\n def update_source(self,\n source: Source,\n source_id: int = None\n ):\n \"\"\"\n Update source\n \"\"\"\n\n if source_id is not None:\n # update source list\n self.source_list[source_id] = source\n\n # Add mesh\n if self.add_source_mesh:\n ########## Add mesh (source.position, source.rotation) ##########\n obj_templates_mgr = self.sim.get_object_template_manager()\n rigid_obj_mgr = self.sim.get_rigid_object_manager()\n\n # Load the object template from the configuration file\n obj_templates_mgr.load_configs(str(os.path.join(\"data/objects\")))\n\n # Insert the object relative to the agent\n object_ids = []\n object_orientation = mn.Quaternion.rotation(mn.Deg(source.rotation), mn.Vector3.y_axis())\n object_template_handle = obj_templates_mgr.get_template_handles(f'data/objects/{source.mesh}')[0] # debug\n if source.mesh == 'male':\n scale = 0.5\n height_offset = 0.935\n elif source.mesh == 'female':\n scale = 1.0\n height_offset = 0.85\n elif source.mesh == 'guitar':\n scale = 1 / 1239.1628 * 2\n height_offset = 1.5\n object_orientation *= mn.Quaternion.rotation(mn.Deg(-90), mn.Vector3.x_axis())\n elif source.mesh == 'drum':\n scale = 1 / 1.8\n height_offset = 0.6\n elif source.mesh == 'classic_microphone':\n scale = 1 / 1.15\n height_offset = 0.67\n elif source.mesh == 'bluetooth_speaker':\n scale = 1 / 70\n height_offset = 1.0\n\n # Scale the object to fit the scene\n scaled_object_template = obj_templates_mgr.get_template_by_handle(object_template_handle)\n scaled_object_template.scale = np.array([scale, scale, scale])\n obj_templates_mgr.register_template(scaled_object_template, \"scaled\")\n object = rigid_obj_mgr.add_object_by_template_handle(\"scaled\")\n object.translation = np.array(source.position) + np.array([0, height_offset, 0])\n object.rotation = object_orientation\n\n object_ids.append(object.object_id)\n\n # rigid_obj_mgr.remove_all_objects()\n\n else:\n # update current source\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioSourceTransform(source.position + np.array([0, self.aihabitat[\"sensor_height\"], 0])) # add 1.5m to the height calculation\n\n self.source_current = source # for reference\n\n return self\n\n def update_source_position(self,\n source_position\n ):\n \"\"\"\n Update Source position\n \"\"\"\n\n audio_sensor = self.sim.get_agent(self.aihabitat['default_agent'])._sensors['audio_sensor']\n audio_sensor.setAudioSourceTransform(source_position + np.array([0, self.aihabitat[\"sensor_height\"], 0])) # add 1.5m to the height calculation\n\n def render_ir(self,\n source_id: int\n ) -> torch.Tensor:\n \"\"\"\n Render IR given the source ID\n \"\"\"\n\n source = self.source_list[source_id]\n self.update_source(source)\n ir = torch.tensor(self.sim.get_sensor_observations()['audio_sensor'], device=self.device)\n\n return ir\n\n def render_ir_simple(self,\n source_position: T.Tuple[float, float, float],\n receiver_position: T.Tuple[float, float, float],\n ) -> torch.Tensor:\n \"\"\"\n Render IR given the source ID\n \"\"\"\n\n # source\n self.update_source_position(source_position)\n\n # receiver\n self.update_receiver_position(receiver_position)\n\n # render ir\n ir = torch.tensor(self.sim.get_sensor_observations()['audio_sensor'], device=self.device)\n\n return ir\n\n def render_ir_all(self) -> T.List[torch.Tensor]:\n \"\"\"\n Render IR for all sources\n \"\"\"\n\n ir_list = []\n for source_id in range(self.n_sources):\n print(f'Rendering IR {source_id}/{self.n_sources}...')\n ir = self.render_ir(source_id)\n ir_list.append(ir)\n\n return ir_list\n\n def render_image(self,\n is_instance=False\n ):\n \"\"\"\n Render image including rgb, depth, and semantic\n \"\"\"\n\n observation = self.sim.get_sensor_observations()\n rgb = observation[\"color_sensor\"]\n depth = observation[\"depth_sensor\"]\n\n # Semantic\n # semantic = sim.get_sensor_observations()[\"semantic_sensor\"]\n # is_valid = (depth != 0)\n # semantic[~is_valid] = semantic.max() + 1\n\n # if is_instance:\n # # Display instance id\n # aihabitat_utils.display_sample(rgb, semantic, depth, filename=f'{dir_results}/view/view_instance.png')\n # else:\n # # Display category id\n # category = aihabitat_utils.semantic_id_to_category_id(semantic, sim.semantic_scene.objects)\n # void_id = 0\n # category[~is_valid] = void_id\n # aihabitat_utils.display_sample(rgb, category, depth, filename=f'{dir_results}/view/view_category.png')\n\n return rgb, depth\n\n def render_envmap(self):\n \"\"\"\n Render environment map in *** format\n \"\"\"\n\n with suppress_stdout_and_stderr():\n angles = [0, 270, 180, 90]\n rgb_panorama = []\n depth_panorama = []\n\n for angle_offset in angles:\n angle = self.receiver.rotation + angle_offset\n agent = self.sim.get_agent(self.aihabitat[\"default_agent\"])\n new_state = self.sim.get_agent(self.aihabitat[\"default_agent\"]).get_state()\n new_state.rotation = quat_from_angle_axis(\n math.radians(angle), np.array([0, 1.0, 0])\n ) * quat_from_angle_axis(math.radians(0), np.array([1.0, 0, 0]))\n new_state.sensor_states = {}\n agent.set_state(new_state, True)\n\n observation = self.sim.get_sensor_observations()\n rgb_panorama.append(observation[\"color_sensor\"])\n depth_panorama.append((observation['depth_sensor']))\n envmap_rgb = np.concatenate(rgb_panorama, axis=1)\n envmap_depth = np.concatenate(depth_panorama, axis=1)\n\n # rotate receiver to original angle\n self.update_receiver(self.receiver)\n\n return envmap_rgb, envmap_depth\n\n def generate_xy_grid_points(self,\n grid_distance: float,\n height: float = None,\n filename_png: str = None,\n meters_per_pixel: float = 0.005\n ) -> torch.Tensor:\n \"\"\"\n Generate the 3D positions of grid points at the given height\n \"\"\"\n\n pathfinder = self.sim.pathfinder\n assert pathfinder.is_loaded\n # agent_height = pathfinder.nav_mesh_settings.agent_height # to be navigable, full body of the agent should be inside\n if height is None: # height of the agent foot\n height = 0\n # height = pathfinder.get_bounds()[0][1] # floor height\n\n # Sample grid\n bounds = pathfinder.get_bounds()\n x_points = torch.arange(bounds[0][0], bounds[1][0] + grid_distance, grid_distance)\n z_points = torch.arange(bounds[0][2], bounds[1][2] + grid_distance, grid_distance)\n x_grid, z_grid = torch.meshgrid(x_points, z_points)\n y_value = height * torch.ones_like(x_grid.reshape(-1))\n\n # Combine x, y, and z coordinates into a single tensor of points\n points = torch.stack([x_grid.reshape(-1), y_value.reshape(-1), z_grid.reshape(-1)], dim=-1)\n is_points_navigable = []\n for point in points:\n is_points_navigable.append(pathfinder.is_navigable(point)) # navigable points\n torch.tensor(is_points_navigable).sum()\n\n # Flatten the tensor of points into a list\n grid_points = points[is_points_navigable]\n\n # assert len(grid_points) > 0\n # save image\n if filename_png is not None:\n aihabitat_utils.save_town_map_grid(filename_png, pathfinder, grid_points, meters_per_pixel=meters_per_pixel)\n\n return grid_points\n\n def generate_data(self, use_dry_sound: bool = False):\n \"\"\"\n Generate all data including IR, envmap, audio, image\n \"\"\"\n\n # env map\n if self.include_visual_sensor:\n envmap_rgb, envmap_depth = self.render_image()\n else:\n envmap_rgb, envmap_depth = None, None\n\n # IR\n self.add_audio_sensor() # add audio_sensor after image rendering for faster image rendering\n ir_list = self.render_ir_all()\n # ir_total = sum_arrays_with_different_length(ir_list).detach().cpu()\n\n # audio_list\n dry_sound_list = []\n audio_list = []\n # audio_total = None\n if use_dry_sound:\n for source_id, source in enumerate(self.source_list):\n # load dry sound\n dry_sound = source.dry_sound\n if isinstance(dry_sound, str):\n dry_sound, sample_rate = torchaudio.load(dry_sound)\n self.dry_sound = dry_sound.to(self.device)\n self.sample_rate = sample_rate\n\n ir = ir_list[source_id]\n audio = torch.stack([audio_utils.fft_conv(dry_sound[0], ir_channel, is_cpu=True) for ir_channel in ir])\n dry_sound_list.append(dry_sound.detach().cpu())\n audio_list.append(audio.detach().cpu())\n\n # audio_total\n # audio_total = sum_arrays_with_different_length(audio_list)\n\n # cpu\n ir_list = [tensor.detach().cpu() for tensor in ir_list]\n\n # dirname = '.'\n # with open(f'{dirname}/debug.txt', 'w') as f:\n # f.write(f'NavMesh area: {self.sim.pathfinder.navigable_area}\\n')\n # f.write(f'NavMesh bounds: {self.sim.pathfinder.get_bounds()}\\n')\n # f.write(f'Receiver position: {self.receiver.position}\\n')\n # for s, source in enumerate(self.source_list):\n # f.write(f'Source {s} position: {source.position}\\n')\n # f.write(f'\\n')\n\n return dict(\n ir_list=ir_list,\n sample_rate=self.receiver.sample_rate,\n envmap=[envmap_rgb, envmap_depth],\n audio_list=audio_list,\n dry_sound_list=dry_sound_list,\n )" } ]
import os import json import argparse import itertools import subprocess import typing as T import torch import imageio import torchaudio import numpy as np import matplotlib.pyplot as plt from moviepy.editor import * from nvas3d.utils.dynamic_utils import convolve_moving_receiver, setup_dynamic_interp from nvas3d.utils.audio_utils import clip_two, clip_all from soundspaces_nvas3d.utils.ss_utils import create_scene, render_rir_parallel from soundspaces_nvas3d.utils.aihabitat_utils import load_room_grid from soundspaces_nvas3d.soundspaces_nvas3d import Receiver, Source, Scene
10,867
def generate_rir_combination( room: str, source_idx_list: T.List[int], grid_points_source: torch.Tensor, receiver_idx_list: T.List[int], receiver_rotation_list: T.List[float], grid_points_receiver: torch.Tensor, channel_type: str = 'Binaural', channel_order: int = 0 ) -> T.List[T.List[torch.Tensor]]: """ Generates room impulse responses (RIR) for given source and receiver combinations. Args: - room: Room object for which RIRs need to be computed. - source_idx_list: List of source indices. - grid_points_source: Grid points for the source. - receiver_idx_list: List of receiver indices. - receiver_rotation_list: List of receiver rotations. - grid_points_receiver: Grid points for the receiver. - channel_type: Type of the channel. Defaults to 'Ambisonics'. - channel_order: Order of the channel for Ambisonics. Defulats to 0, as video usually does not support HOA. Returns: - A 2D list containing RIRs for every source-receiver combination. """ # Set source and receiver points source_point_list = grid_points_source[source_idx_list] receiver_point_list = grid_points_receiver[receiver_idx_list] source_points_pair, receiver_points_pair = all_pairs(source_point_list, receiver_point_list) _, receiver_rotation_pair = all_pairs(source_point_list, receiver_rotation_list) room_list = [room] * len(source_points_pair) filename_list = None # Render RIR for grid points ir_list = render_rir_parallel(room_list, source_points_pair, receiver_points_pair, receiver_rotation_list=receiver_rotation_pair, filename_list=filename_list, channel_type=channel_type, channel_order=channel_order) ir_list = clip_all(ir_list) # make the length consistent num_channel = len(ir_list[0]) # Reshape RIR num_sources = len(source_idx_list) num_receivers = len(receiver_idx_list) ir_output = torch.stack(ir_list).reshape(num_sources, num_receivers, num_channel, -1) # '-1' will infer the remaining dimension based on the size of each tensor in ir_list ir_output /= ir_output.abs().max() return ir_output def interpolate_values( start: float, end: float, interp_weight: float ) -> float: """ Interpolate between two values based on the weight values. Args: - start: Beginning value. - end: Ending value. - interp_weight: Weight for linear interpolation Returns: - Interpolated value. """ return (1 - interp_weight) * start + interp_weight * end def main(args): """ Generate NVAS video from the estimated dry sound. Save: ├── {results_demo} = results/nvas3d_demo/default/demo/{room}/0 │ ├── video/ │ │ ├── moving_audio.wav : Audio interpolated for the moving receiver. │ │ ├── moving_audio_1.wav : Audio interpolated specifically for source 1. │ │ ├── moving_audio_2.wav : Audio interpolated specifically for source 2. │ │ ├── moving_video.mp4 : Video visualization of movement (no audio). │ │ ├── nvas.mp4 : NVAS video results with combined audio. │ │ ├── nvas_source1.mp4 : NVAS video results for only source 1 audio. │ │ ├── nvas_source2.mp4 : NVAS video results for only source 2 audio. │ │ └── rgb_receiver.png : A rendered view from the perspective of the receiver. """ # Constants sample_rate = args.sample_rate sample_rate_video = args.sample_rate_video novel_path_config = args.novel_path_config use_gt_location = args.use_gt_location channel_type = args.channel_type use_placeholder_mesh = args.use_placeholder_mesh # Load data and metadata metadata = torch.load(f'{args.results_dir}/results_detection/metadata.pt') room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] if use_gt_location: # Use estimated dry sound from GT source location source1_idx = metadata['source1_idx'][0].item() source2_idx = metadata['source2_idx'][0].item() source_idx_list = [source1_idx, source2_idx] else: # Use estimated dry sound from detected source location detected_source1_idx = metadata['detected_source_idx'][0] detected_source2_idx = metadata['detected_source_idx'][1] source_idx_list = [detected_source1_idx, detected_source2_idx] # Define receiver path and rotations with open(f'demo/config_demo/{novel_path_config}.json', 'r') as file: json_path = json.load(file) receiver_idx_list = json_path['receiver_idx_list'] receiver_rotation_list = json_path['receiver_rotation_list'] # Load grid points
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # def normalize(input: torch.Tensor) -> torch.Tensor: output = (input - input.min()) / (input.max() - input.min()) output = 2 * output - 1 return output def configure_scene_from_metadata( metadata: T.Dict[str, T.Any], image_size: T.Tuple[int, int] = (1000, 1000), hfov: float = 90.0, use_placeholder_mesh: bool = False ) -> Scene: """ Configures a scene using the provided metadata. Args: - metadata: Dictionary containing room and grid point information. - image_size: The size of the rendered image. - hfov: Horizontal field of view. - use_placeholder_mesh: Flag to determine if placeholder meshes should be used. Returns: - Configured scene object. """ room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] source_idx_list = [metadata['source1_idx'][0].item(), metadata['source2_idx'][0].item()] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] scene = create_scene(room, image_size=image_size, hfov=hfov) if use_placeholder_mesh: # Add placeholder mesh for sources and receivers to the scene # Download the following mesh objects and locate it under data/objects/{mesh_name}.glb: # - "Bluetooth Speaker" (https://skfb.ly/6VLyL) by Ramanan is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - “Classic Microphone” (https://skfb.ly/6Aryq) by urbanmasque is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/) # - "Standard Drum Set" (https://skfb.ly/owroB) by Heataker is licensed under Creative Commons Attribution (http://creativecommons.org/licenses/by/4.0/). # - "3D Posed People" (https://renderpeople.com/free-3d-people/) by Renderpeople: The licensing for our Renderpeople products includes that customers are allowed to use the data for rendering still images and animations for commercial or private purposes, such as video production, broadcasting, print, movies, advertising, illustrations and presentations (https://renderpeople.com/faq/) ss_source1 = Source( position=grid_points_source[source_idx_list[0]], rotation=0, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_source2 = Source( position=grid_points_source[source_idx_list[1]], rotation=-90, dry_sound='', mesh='bluetooth_speaker', # Need mesh object device=torch.device('cpu') ) ss_mic_list = [ Source( position=grid_points_source[idx], rotation=180, dry_sound='', mesh='classic_microphone', # Need mesh object device=torch.device('cpu') ) for idx in receiver_idx_list_original ] scene.add_source_mesh = True scene.source_list = [None] * (len(source_idx_list) + len(receiver_idx_list_original)) scene.update_source(ss_source1, 0) scene.update_source(ss_source2, 1) for m, mic in enumerate(ss_mic_list): scene.update_source(mic, m + 2) return scene def interpolate_moving_audio( source1_audio: torch.Tensor, source2_audio: torch.Tensor, ir1_list: T.List[torch.Tensor], ir2_list: T.List[torch.Tensor], receiver_position: torch.Tensor ) -> T.Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Interpolates audio for a moving receiver. Args: - source1_audio: First source audio. - source2_audio: Second source audio. - ir1_list: List of impulse responses for source 1. - ir2_list: List of impulse responses for source 2. - receiver_position: Positions of the moving receiver. Returns: - Tuple containing combined audio, interpolated audio from source 1, and interpolated audio from source 2. """ # Prepare for interpolation audio_len = source1_audio.shape[-1] interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), audio_len) # Generate audio for moving receiver receiver_audio_1 = convolve_moving_receiver(source1_audio.numpy()[0], ir1_list.numpy(), interp_index, interp_weight) receiver_audio_2 = convolve_moving_receiver(source2_audio.numpy()[0], ir2_list.numpy(), interp_index, interp_weight) receiver_audio_1 = receiver_audio_1[..., :source1_audio.shape[-1]] receiver_audio_2 = receiver_audio_2[..., :source1_audio.shape[-1]] # Mix and normalize audios receiver_audio = (receiver_audio_1 + receiver_audio_2) scale = np.max(abs(receiver_audio)) receiver_audio /= scale receiver_audio_1 /= scale receiver_audio_2 /= scale return torch.from_numpy(receiver_audio), torch.from_numpy(receiver_audio_1), torch.from_numpy(receiver_audio_2) def interpolate_rgb_images( scene: Scene, receiver_position: torch.Tensor, receiver_rotation_list: T.List[float], video_len: int ) -> T.List[np.ndarray]: """ Interpolates RGB images based on receiver movement and rotation. Args: - scene: Scene object to render the images from. - receiver_position: Positions of the receiver along the path. - receiver_rotation_list: List of rotations for the receiver. - video_len: Number of frames in the video. Returns: - List of interpolated RGB images. """ interp_index, interp_weight = setup_dynamic_interp(receiver_position.numpy(), video_len) interpolated_rgb_list = [] for t in range(len(interp_index)): # Find the positions and rotations between which we're interpolating start_idx = interp_index[t] end_idx = start_idx + 1 start_pos = receiver_position[start_idx] end_pos = receiver_position[end_idx] start_rot = receiver_rotation_list[start_idx] end_rot = receiver_rotation_list[end_idx] # Interpolate position and rotation receiver_position_interp = interpolate_values(start_pos, end_pos, interp_weight[t]) receiver_rotation_interp = interpolate_values(start_rot, end_rot, interp_weight[t]) receiver = Receiver(receiver_position_interp, receiver_rotation_interp) scene.update_receiver(receiver) rgb, _ = scene.render_image() interpolated_rgb_list.append(rgb[..., :3]) return interpolated_rgb_list def all_pairs( list1: T.List[T.Any], list2: T.List[T.Any] ) -> T.Tuple[T.List[T.Any], T.List[T.Any]]: """ Computes all pairs of combinations between two lists. Args: - list1: First list. - list2: Second list. Returns: - Two lists containing paired elements from list1 and list2. """ list_pair = list(itertools.product(list1, list2)) list1_pair, list2_pair = zip(*list_pair) list1_pair = list(list1_pair) list2_pair = list(list2_pair) return list1_pair, list2_pair def generate_rir_combination( room: str, source_idx_list: T.List[int], grid_points_source: torch.Tensor, receiver_idx_list: T.List[int], receiver_rotation_list: T.List[float], grid_points_receiver: torch.Tensor, channel_type: str = 'Binaural', channel_order: int = 0 ) -> T.List[T.List[torch.Tensor]]: """ Generates room impulse responses (RIR) for given source and receiver combinations. Args: - room: Room object for which RIRs need to be computed. - source_idx_list: List of source indices. - grid_points_source: Grid points for the source. - receiver_idx_list: List of receiver indices. - receiver_rotation_list: List of receiver rotations. - grid_points_receiver: Grid points for the receiver. - channel_type: Type of the channel. Defaults to 'Ambisonics'. - channel_order: Order of the channel for Ambisonics. Defulats to 0, as video usually does not support HOA. Returns: - A 2D list containing RIRs for every source-receiver combination. """ # Set source and receiver points source_point_list = grid_points_source[source_idx_list] receiver_point_list = grid_points_receiver[receiver_idx_list] source_points_pair, receiver_points_pair = all_pairs(source_point_list, receiver_point_list) _, receiver_rotation_pair = all_pairs(source_point_list, receiver_rotation_list) room_list = [room] * len(source_points_pair) filename_list = None # Render RIR for grid points ir_list = render_rir_parallel(room_list, source_points_pair, receiver_points_pair, receiver_rotation_list=receiver_rotation_pair, filename_list=filename_list, channel_type=channel_type, channel_order=channel_order) ir_list = clip_all(ir_list) # make the length consistent num_channel = len(ir_list[0]) # Reshape RIR num_sources = len(source_idx_list) num_receivers = len(receiver_idx_list) ir_output = torch.stack(ir_list).reshape(num_sources, num_receivers, num_channel, -1) # '-1' will infer the remaining dimension based on the size of each tensor in ir_list ir_output /= ir_output.abs().max() return ir_output def interpolate_values( start: float, end: float, interp_weight: float ) -> float: """ Interpolate between two values based on the weight values. Args: - start: Beginning value. - end: Ending value. - interp_weight: Weight for linear interpolation Returns: - Interpolated value. """ return (1 - interp_weight) * start + interp_weight * end def main(args): """ Generate NVAS video from the estimated dry sound. Save: ├── {results_demo} = results/nvas3d_demo/default/demo/{room}/0 │ ├── video/ │ │ ├── moving_audio.wav : Audio interpolated for the moving receiver. │ │ ├── moving_audio_1.wav : Audio interpolated specifically for source 1. │ │ ├── moving_audio_2.wav : Audio interpolated specifically for source 2. │ │ ├── moving_video.mp4 : Video visualization of movement (no audio). │ │ ├── nvas.mp4 : NVAS video results with combined audio. │ │ ├── nvas_source1.mp4 : NVAS video results for only source 1 audio. │ │ ├── nvas_source2.mp4 : NVAS video results for only source 2 audio. │ │ └── rgb_receiver.png : A rendered view from the perspective of the receiver. """ # Constants sample_rate = args.sample_rate sample_rate_video = args.sample_rate_video novel_path_config = args.novel_path_config use_gt_location = args.use_gt_location channel_type = args.channel_type use_placeholder_mesh = args.use_placeholder_mesh # Load data and metadata metadata = torch.load(f'{args.results_dir}/results_detection/metadata.pt') room = metadata['room'][0] grid_points_source = metadata['grid_points'][0] receiver_idx_list_original = torch.tensor(metadata['receiver_idx_list'])[:4] if use_gt_location: # Use estimated dry sound from GT source location source1_idx = metadata['source1_idx'][0].item() source2_idx = metadata['source2_idx'][0].item() source_idx_list = [source1_idx, source2_idx] else: # Use estimated dry sound from detected source location detected_source1_idx = metadata['detected_source_idx'][0] detected_source2_idx = metadata['detected_source_idx'][1] source_idx_list = [detected_source1_idx, detected_source2_idx] # Define receiver path and rotations with open(f'demo/config_demo/{novel_path_config}.json', 'r') as file: json_path = json.load(file) receiver_idx_list = json_path['receiver_idx_list'] receiver_rotation_list = json_path['receiver_rotation_list'] # Load grid points
grid_points_receiver = load_room_grid(room, grid_distance=args.grid_distance)['grid_points']
6
2023-10-19 05:35:54+00:00
16k
openvpi/SingingVocoders
training/ddspgan_task_2.py
[ { "identifier": "DDSPgan", "path": "models/ddspgan/ddspgan.py", "snippet": "class DDSPgan(nn.Module):\n def __init__(self,config):\n super().__init__()\n if config['model_args']['type']=='CombSub':\n self.ddsp = CombSub(\n sampling_rate=config['audio_sample_rate'],\n block_size=config['hop_size'],\n win_length=config['win_size'],\n n_mag_harmonic=config['model_args']['n_mag_harmonic'],\n n_mag_noise=config['model_args']['n_mag_noise'],\n n_mels=config['audio_num_mel_bins'])\n elif config['model_args']['type']=='Sins':\n self.ddsp = Sins(\n sampling_rate=config['audio_sample_rate'],\n block_size=config['hop_size'],\n win_length=config['win_size'],\n n_harmonics=config['model_args']['n_harmonics'],\n n_mag_noise=config['model_args']['n_mag_noise'],\n n_mels=config['audio_num_mel_bins'])\n\n def forward(self,mel,f0,infer=False):\n signal, _, (s_h, s_n) = self.ddsp(mel.transpose(1,2), torch.unsqueeze(f0,dim=-1), infer=infer)\n return signal.unsqueeze(1)" }, { "identifier": "Generator", "path": "models/nsf_HiFigan/models.py", "snippet": "class Generator(torch.nn.Module):\n def __init__(self, h):\n super(Generator, self).__init__()\n self.h = h\n self.num_kernels = len(h.resblock_kernel_sizes)\n self.num_upsamples = len(h.upsample_rates)\n self.m_source = SourceModuleHnNSF(\n sampling_rate=h.sampling_rate,\n harmonic_num=8\n )\n self.noise_convs = nn.ModuleList()\n self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))\n resblock = ResBlock1 if h.resblock == '1' else ResBlock2\n\n self.ups = nn.ModuleList()\n for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):\n c_cur = h.upsample_initial_channel // (2 ** (i + 1))\n self.ups.append(weight_norm(\n ConvTranspose1d(h.upsample_initial_channel // (2 ** i), h.upsample_initial_channel // (2 ** (i + 1)),\n k, u, padding=(k - u) // 2)))\n if i + 1 < len(h.upsample_rates): #\n stride_f0 = int(np.prod(h.upsample_rates[i + 1:]))\n self.noise_convs.append(Conv1d(\n 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))\n else:\n self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))\n self.resblocks = nn.ModuleList()\n ch = h.upsample_initial_channel\n for i in range(len(self.ups)):\n ch //= 2\n for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):\n self.resblocks.append(resblock(h, ch, k, d))\n\n self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))\n self.ups.apply(init_weights)\n self.conv_post.apply(init_weights)\n self.upp = int(np.prod(h.upsample_rates))\n\n def forward(self, x, f0):\n har_source = self.m_source(f0, self.upp).transpose(1, 2)\n x = self.conv_pre(x)\n for i in range(self.num_upsamples):\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = self.ups[i](x)\n x_source = self.noise_convs[i](har_source)\n x = x + x_source\n xs = None\n for j in range(self.num_kernels):\n if xs is None:\n xs = self.resblocks[i * self.num_kernels + j](x)\n else:\n xs += self.resblocks[i * self.num_kernels + j](x)\n x = xs / self.num_kernels\n x = F.leaky_relu(x)\n x = self.conv_post(x)\n x = torch.tanh(x)\n\n return x\n\n def remove_weight_norm(self):\n # rank_zero_info('Removing weight norm...')\n print('Removing weight norm...')\n for l in self.ups:\n remove_weight_norm(l)\n for l in self.resblocks:\n l.remove_weight_norm()\n remove_weight_norm(self.conv_pre)\n remove_weight_norm(self.conv_post)" }, { "identifier": "AttrDict", "path": "models/nsf_HiFigan/models.py", "snippet": "class AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self" }, { "identifier": "MultiScaleDiscriminator", "path": "models/nsf_HiFigan/models.py", "snippet": "class MultiScaleDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiScaleDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList(\n [\n DiscriminatorS(use_spectral_norm=True),\n DiscriminatorS(),\n DiscriminatorS(),\n ]\n )\n self.meanpools = nn.ModuleList(\n [AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)]\n )\n\n def forward(self, y):\n y_d_rs = []\n\n fmap_rs = []\n\n for i, d in enumerate(self.discriminators):\n if i != 0:\n y = self.meanpools[i - 1](y)\n\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n\n return y_d_rs, fmap_rs," }, { "identifier": "MultiPeriodDiscriminator", "path": "models/nsf_HiFigan/models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, periods=None):\n super(MultiPeriodDiscriminator, self).__init__()\n self.periods = periods if periods is not None else [2, 3, 5, 7, 11]\n self.discriminators = nn.ModuleList()\n for period in self.periods:\n self.discriminators.append(DiscriminatorP(period))\n\n def forward(self, y):\n y_d_rs = []\n\n fmap_rs = []\n\n\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n\n return y_d_rs, fmap_rs," }, { "identifier": "HiFiloss", "path": "modules/loss/HiFiloss.py", "snippet": "class HiFiloss(nn.Module):\n def __init__(self, config: dict):\n super().__init__()\n self.mel = PitchAdjustableMelSpectrogram(sample_rate=config['audio_sample_rate'],\n n_fft=config['fft_size'],\n win_length=config['win_size'],\n hop_length=config['hop_size'],\n f_min=config['fmin'],\n f_max=config['fmax_for_loss'],\n n_mels=config['audio_num_mel_bins'], )\n self.L1loss = nn.L1Loss()\n self.lab_aux_loss = config.get('lab_aux_loss', 45)\n self.lab_aux_mel_loss = config.get('lab_aux_melloss', self.lab_aux_loss)\n self.lab_aux_stft_loss = config.get('lab_aux_stftloss', 2.5)\n if config.get('use_stftloss', False):\n self.stft = warp_stft({'fft_sizes': config['loss_fft_sizes'], 'hop_sizes': config['loss_hop_sizes'],\n 'win_lengths': config['loss_win_lengths']})\n self.use_stftloss = config.get('use_stftloss', False)\n\n def discriminator_loss(self, disc_real_outputs, disc_generated_outputs):\n loss = 0\n rlosses = 0\n glosses = 0\n r_losses = []\n g_losses = []\n\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg ** 2)\n loss += r_loss + g_loss\n rlosses += r_loss.item()\n glosses += g_loss.item()\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, rlosses, glosses, r_losses, g_losses\n\n def Dloss(self, Dfake, Dtrue):\n\n (Fmsd_out, _), (Fmpd_out, _) = Dfake\n (Tmsd_out, _), (Tmpd_out, _) = Dtrue\n msdloss, msdrlosses, msdglosses, _, _ = self.discriminator_loss(Tmsd_out, Fmsd_out)\n mpdloss, mpdrlosses, mpdglosses, _, _ = self.discriminator_loss(Tmpd_out, Fmpd_out)\n loss = msdloss + mpdloss\n return loss, {'DmsdlossF': msdglosses, 'DmsdlossT': msdrlosses, 'DmpdlossT': mpdrlosses,\n 'DmpdlossF': mpdglosses}\n\n def feature_loss(self, fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2\n\n def GDloss(self, GDfake, GDtrue):\n loss = 0\n gen_losses = []\n msd_losses = 0\n mpd_losses = 0\n (msd_out, Fmsd_feature), (mpd_out, Fmpd_feature) = GDfake\n (_, Tmsd_feature), (_, Tmpd_feature) = GDtrue\n for dg in msd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n msd_losses = l + msd_losses\n\n for dg in mpd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mpd_losses = l + mpd_losses\n\n msd_feature_loss = self.feature_loss(Tmsd_feature, Fmsd_feature)\n mpd_feature_loss = self.feature_loss(Tmpd_feature, Fmpd_feature)\n # loss +=msd_feature_loss\n # loss +=mpd_feature_loss\n loss = msd_feature_loss + mpd_feature_loss + mpd_losses + msd_losses\n # (msd_losses, mpd_losses), (msd_feature_loss, mpd_feature_loss), gen_losses\n return loss, {'Gmsdloss': msd_losses, 'Gmpdloss': mpd_losses, 'Gmsd_feature_loss': msd_feature_loss,\n 'Gmpd_feature_loss': mpd_feature_loss}\n\n def Auxloss(self, Goutput, sample):\n Gmel = self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n mel_loss = self.L1loss(Gmel, Rmel) * self.lab_aux_mel_loss\n if self.use_stftloss:\n sc_loss, mag_loss = self.stft.stft(Goutput['audio'].squeeze(1), sample['audio'].squeeze(1))\n stft_loss = (sc_loss + mag_loss) * self.lab_aux_stft_loss\n loss = mel_loss + stft_loss\n return loss, {'auxloss': loss, 'auxloss_mel': mel_loss, 'auxloss_stft': stft_loss}\n return mel_loss, {'auxloss': mel_loss}\n\n # def Auxloss(self,Goutput, sample):\n #\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n # sc_loss, mag_loss=self.stft.stft(Goutput['audio'].squeeze(1), sample['audio'].squeeze(1))\n # loss=(sc_loss+ mag_loss)*self.labauxloss\n # return loss,{'auxloss':loss,'auxloss_sc_loss':sc_loss,'auxloss_mag_loss':mag_loss}\n #" }, { "identifier": "ddsploss", "path": "modules/loss/ddsploss.py", "snippet": "class ddsploss(nn.Module):\n def __init__(self,config:dict):\n super().__init__()\n self.mel=PitchAdjustableMelSpectrogram( sample_rate=config['audio_sample_rate'],\n n_fft=config['fft_size'],\n win_length=config['win_size'],\n hop_length=config['hop_size'],\n f_min=config['fmin'],\n f_max=config['fmax_for_loss'],\n n_mels=config['audio_num_mel_bins'],)\n self.L1loss=nn.L1Loss()\n self.labauxloss=config.get('lab_aux_loss',45)\n self.stft=warp_stft({'fft_sizes':[2048,1024, 2048, 512,1024],'hop_sizes':[512,120, 240, 50,256],'win_lengths':[2048,600, 1200, 240,512]})\n\n\n def discriminator_loss(self,disc_real_outputs, disc_generated_outputs):\n loss = 0\n rlosses=0\n glosses=0\n r_losses = []\n g_losses = []\n\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg ** 2)\n loss += r_loss + g_loss\n rlosses+=r_loss.item()\n glosses +=g_loss.item()\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, rlosses,glosses,r_losses, g_losses\n\n\n def Dloss(self,Dfake, Dtrue):\n\n (Fmsd_out, _), (Fmpd_out, _)=Dfake\n (Tmsd_out, _), (Tmpd_out, _)=Dtrue\n msdloss, msdrlosses, msdglosses, _, _=self.discriminator_loss(Tmsd_out,Fmsd_out)\n mpdloss, mpdrlosses, mpdglosses, _, _ = self.discriminator_loss(Tmpd_out, Fmpd_out)\n loss=msdloss+mpdloss\n return loss,{'DmsdlossF':msdglosses,'DmsdlossT':msdrlosses,'DmpdlossT':mpdrlosses,'DmpdlossF':mpdglosses}\n\n def feature_loss(self,fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2\n\n def GDloss(self,GDfake,GDtrue):\n loss = 0\n gen_losses = []\n msd_losses=0\n mpd_losses = 0\n (msd_out, Fmsd_feature), (mpd_out, Fmpd_feature)=GDfake\n (_, Tmsd_feature), (_, Tmpd_feature) = GDtrue\n for dg in msd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n msd_losses=l+msd_losses\n\n for dg in mpd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mpd_losses=l+mpd_losses\n\n msd_feature_loss=self.feature_loss(Tmsd_feature,Fmsd_feature)\n mpd_feature_loss = self.feature_loss(Tmpd_feature, Fmpd_feature)\n # loss +=msd_feature_loss\n # loss +=mpd_feature_loss\n loss= msd_feature_loss+mpd_feature_loss+mpd_losses+msd_losses\n # (msd_losses, mpd_losses), (msd_feature_loss, mpd_feature_loss), gen_losses\n return loss, {'Gmsdloss':msd_losses,'Gmpdloss':mpd_losses,'Gmsd_feature_loss':msd_feature_loss,'Gmpd_feature_loss':mpd_feature_loss}\n\n # def Auxloss(self,Goutput, sample):\n #\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n # loss=self.L1loss(Gmel, Rmel)*self.labauxloss\n # return loss,{'auxloss':loss}\n\n def Auxloss(self,Goutput, sample):\n\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n sc_loss, mag_loss=self.stft.stft(Goutput['audio'].squeeze(1), sample['audio'].squeeze(1))\n loss=(sc_loss+ mag_loss)*self.labauxloss\n return loss,{'auxloss':loss,'auxloss_sc_loss':sc_loss,'auxloss_mag_loss':mag_loss}" }, { "identifier": "GanBaseTask", "path": "training/base_task_gan.py", "snippet": "class GanBaseTask(pl.LightningModule):\n \"\"\"\n Base class for training tasks.\n 1. *load_ckpt*:\n load checkpoint;\n 2. *training_step*:\n record and log the loss;\n 3. *optimizer_step*:\n run backwards step;\n 4. *start*:\n load training configs, backup code, log to tensorboard, start training;\n 5. *configure_ddp* and *init_ddp_connection*:\n start parallel training.\n\n Subclasses should define:\n 1. *build_model*, *build_optimizer*, *build_scheduler*:\n how to build the model, the optimizer and the training scheduler;\n 2. *_training_step*:\n one training step of the model;\n 3. *on_validation_end* and *_on_validation_end*:\n postprocess the validation output.\n \"\"\"\n\n def __init__(self, config: dict, *args, **kwargs):\n # dataset configs\n super().__init__(*args, **kwargs)\n self.dataset_cls = None\n self.config = config\n # self.max_batch_frames = self.config['max_batch_frames']\n # self.max_batch_size = self.config['max_batch_size']\n # self.max_val_batch_frames = self.config['max_val_batch_frames']\n # self.max_val_batch_size = self.config['max_val_batch_size']\n\n # self.accumulate_grad_batches = self.config['accumulate_grad_batches']\n self.clip_grad_norm = self.config['clip_grad_norm']\n\n self.training_sampler = None\n self.model = None\n self.generator = None\n self.discriminator = None\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = False\n\n self.valid_losses: Dict[str, Metric] = {\n 'total_loss': MeanMetric()\n }\n self.valid_metric_names = set()\n self.mix_loss = None\n\n self.automatic_optimization = False\n self.skip_immediate_validations = 0\n\n self.aux_step = self.config.get('aux_step')\n self.train_dataset = None\n self.valid_dataset = None\n\n ###########\n\n # Training, validation and testing\n ###########\n def setup(self, stage):\n self.model = self.build_model()\n self.unfreeze_all_params()\n if self.config['freezing_enabled']:\n self.freeze_params()\n if self.config['finetune_enabled'] and get_latest_checkpoint_path(\n pathlib.Path(self.config['work_dir'])) is None:\n self.load_finetune_ckpt(self.load_pre_train_model())\n self.print_arch()\n self.build_losses_and_metrics()\n self.build_dataset()\n # self.train_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['train_set_name'], allow_aug=True\n # )\n # self.valid_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['valid_set_name'], allow_aug=False\n # )\n\n def build_dataset(self):\n raise NotImplementedError()\n\n def get_need_freeze_state_dict_key(self, model_state_dict) -> list:\n key_list = []\n for i in self.config['frozen_params']:\n for j in model_state_dict:\n if j.startswith(i):\n key_list.append(j)\n return list(set(key_list))\n\n def freeze_params(self) -> None:\n model_state_dict = self.state_dict().keys()\n freeze_key = self.get_need_freeze_state_dict_key(model_state_dict=model_state_dict)\n\n for i in freeze_key:\n params = self.get_parameter(i)\n\n params.requires_grad = False\n\n def unfreeze_all_params(self) -> None:\n for i in self.parameters():\n i.requires_grad = True\n\n def load_finetune_ckpt(\n self, state_dict\n ) -> None:\n\n adapt_shapes = self.config['finetune_strict_shapes']\n if not adapt_shapes:\n cur_model_state_dict = self.state_dict()\n unmatched_keys = []\n for key, param in state_dict.items():\n if key in cur_model_state_dict:\n new_param = cur_model_state_dict[key]\n if new_param.shape != param.shape:\n unmatched_keys.append(key)\n print('| Unmatched keys: ', key, new_param.shape, param.shape)\n for key in unmatched_keys:\n del state_dict[key]\n self.load_state_dict(state_dict, strict=False)\n\n def load_pre_train_model(self):\n\n pre_train_ckpt_path = self.config.get('finetune_ckpt_path')\n blacklist = self.config.get('finetune_ignored_params')\n if blacklist is None:\n blacklist = []\n # if whitelist is None:\n # raise RuntimeError(\"\")\n\n if pre_train_ckpt_path is not None:\n ckpt = torch.load(pre_train_ckpt_path)\n\n state_dict = {}\n for i in ckpt['state_dict']:\n # if 'diffusion' in i:\n # if i in rrrr:\n # continue\n skip = False\n for b in blacklist:\n if i.startswith(b):\n skip = True\n break\n\n if skip:\n continue\n\n state_dict[i] = ckpt['state_dict'][i]\n print(i)\n return state_dict\n else:\n raise RuntimeError(\"\")\n\n def build_model(self):\n raise NotImplementedError()\n\n @rank_zero_only\n def print_arch(self):\n utils.print_arch(self)\n\n def build_losses_and_metrics(self):\n raise NotImplementedError()\n\n def register_metric(self, name: str, metric: Metric):\n assert isinstance(metric, Metric)\n setattr(self, name, metric)\n self.valid_metric_names.add(name)\n\n # def run_model(self, sample, infer=False):\n # \"\"\"\n # steps:\n # 1. run the full model\n # 2. calculate losses if not infer\n # \"\"\"\n # raise NotImplementedError()\n\n def Gforward(self, sample, infer=False):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n def Dforward(self, Goutput):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n # def on_train_epoch_start(self):\n # if self.training_sampler is not None:\n # self.training_sampler.set_epoch(self.current_epoch)\n\n def _training_step(self, sample, batch_idx):\n \"\"\"\n :return: total loss: torch.Tensor, loss_log: dict, other_log: dict\n\n \"\"\"\n aux_only = False\n if self.aux_step is not None:\n if self.aux_step > self.global_step:\n aux_only = True\n\n log_diet = {}\n opt_g, opt_d = self.optimizers()\n Goutput = self.Gforward(sample=sample)\n if not aux_only:\n Dfake = self.Dforward(Goutput=Goutput['audio'].detach())\n Dtrue = self.Dforward(Goutput=sample['audio'])\n Dloss, Dlog = self.mix_loss.Dloss(Dfake=Dfake, Dtrue=Dtrue)\n log_diet.update(Dlog)\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Dloss/self.clip_grad_norm)\n # else:\n opt_d.zero_grad()\n self.manual_backward(Dloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_d, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_d.step()\n opt_d.zero_grad()\n if not aux_only:\n GDfake = self.Dforward(Goutput=Goutput['audio'])\n GDtrue = self.Dforward(Goutput=sample['audio'])\n GDloss, GDlog = self.mix_loss.GDloss(GDfake=GDfake,GDtrue=GDtrue)\n log_diet.update(GDlog)\n Auxloss, Auxlog = self.mix_loss.Auxloss(Goutput=Goutput, sample=sample)\n\n log_diet.update(Auxlog)\n if not aux_only:\n Gloss=GDloss + Auxloss\n else:\n Gloss=Auxloss\n\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Gloss / self.clip_grad_norm)\n # else:\n # self.manual_backward(Gloss)\n # if (batch_idx + 1) % self.accumulate_grad_batches == 0:\n opt_g.zero_grad()\n self.manual_backward(Gloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_g, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_g.step()\n\n\n\n return log_diet\n\n def training_step(self, sample, batch_idx, ): # todo\n log_outputs = self._training_step(sample, batch_idx)\n\n # logs to progress bar\n self.log_dict({'loss':sum(log_outputs.values())}, prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # self.log('lr', self.lr_schedulers().get_last_lr()[0], prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # logs to tensorboard\n if self.global_step % self.config['log_interval'] == 0:\n tb_log = {f'training/{k}': v for k, v in log_outputs.items()}\n # tb_log['training/lr'] = self.lr_schedulers().get_last_lr()[0]\n self.logger.log_metrics(tb_log, step=self.global_step)\n #\n # return total_loss\n\n # def on_before_optimizer_step(self, *args, **kwargs):\n # self.log_dict(grad_norm(self, norm_type=2))\n\n def _on_validation_start(self):\n pass\n\n def on_validation_start(self):\n self._on_validation_start()\n for metric in self.valid_losses.values():\n metric.to(self.device)\n metric.reset()\n\n def _validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n :return: loss_log: dict, weight: int\n \"\"\"\n raise NotImplementedError()\n\n def validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n\n \"\"\"\n\n # if self.skip_immediate_validations == 0 and self.global_step != 0:\n # self.skip_immediate_validation = True\n # self.skip_immediate_validations = 1\n # if self.global_step == 0:\n # self.skip_immediate_validations = 1\n\n if self.skip_immediate_validation:\n rank_zero_debug(f\"Skip validation {batch_idx}\")\n return {}\n with torch.autocast(self.device.type, enabled=False):\n losses, weight = self._validation_step(sample, batch_idx)\n losses = {\n 'total_loss': sum(losses.values()),\n **losses\n }\n for k, v in losses.items():\n if k not in self.valid_losses:\n self.valid_losses[k] = MeanMetric().to(self.device)\n self.valid_losses[k].update(v, weight=weight) # weight=1\n return losses\n\n def on_validation_epoch_end(self):\n if self.skip_immediate_validation:\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = True\n return\n loss_vals = {k: v.compute() for k, v in self.valid_losses.items()}\n self.log('val_loss', loss_vals['total_loss'], on_epoch=True, prog_bar=True, logger=False, sync_dist=True)\n self.logger.log_metrics({f'validation/{k}': v for k, v in loss_vals.items()}, step=self.global_step)\n for metric in self.valid_losses.values():\n metric.reset()\n metric_vals = {k: getattr(self, k).compute() for k in self.valid_metric_names}\n self.logger.log_metrics({f'metrics/{k}': v for k, v in metric_vals.items()}, step=self.global_step)\n for metric_name in self.valid_metric_names:\n getattr(self, metric_name).reset()\n\n # noinspection PyMethodMayBeStatic\n def build_scheduler(self, optimizer):\n from utils import build_lr_scheduler_from_config\n\n scheduler_args = self.config['lr_scheduler_args']\n assert scheduler_args['scheduler_cls'] != ''\n scheduler = build_lr_scheduler_from_config(optimizer, scheduler_args)\n return scheduler\n\n # noinspection PyMethodMayBeStatic\n def build_optimizer(self, model, optimizer_args):\n from utils import build_object_from_class_name\n\n assert optimizer_args['optimizer_cls'] != ''\n if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n\n if isinstance(model, nn.ModuleList):\n parameterslist = []\n for i in model:\n parameterslist = parameterslist + list(i.parameters())\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.ModuleDict):\n parameterslist = []\n for i in model:\n # parameterslist = parameterslist + list(model[i].parameters())\n parameterslist.append({'params': model[i].parameters()})\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.Module):\n\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n model.parameters(),\n **optimizer_args\n )\n else:\n raise RuntimeError(\"\")\n\n return optimizer\n\n def configure_optimizers(self):\n optG = self.build_optimizer(self.generator, optimizer_args=self.config['generater_optimizer_args'])\n optD = self.build_optimizer(self.discriminator, optimizer_args=self.config['discriminate_optimizer_args'])\n\n return [optG, optD]\n # scheduler = self.build_scheduler(optm)\n # if scheduler is None:\n # return optm\n # return {\n # \"optimizer\": optm,\n # \"lr_scheduler\": {\n # \"scheduler\": scheduler,\n # \"interval\": \"step\",\n # \"frequency\": 1\n # }\n # }\n\n def train_dataloader(self):\n # self.training_sampler = DsBatchSampler(\n # self.train_dataset,\n # max_batch_frames=self.max_batch_frames,\n # max_batch_size=self.max_batch_size,\n # num_replicas=(self.trainer.distributed_sampler_kwargs or {}).get('num_replicas', 1),\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # sort_by_similar_size=self.config['sort_by_len'],\n # required_batch_count_multiple=self.config['accumulate_grad_batches'],\n # frame_count_grid=self.config['sampler_frame_count_grid'],\n # shuffle_sample=True,\n # shuffle_batch=False,\n # seed=self.config['seed']\n # )\n return torch.utils.data.DataLoader(self.train_dataset,\n collate_fn=self.train_dataset.collater,\n batch_size=self.config['batch_size'],\n # batch_sampler=self.training_sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n pin_memory=True,\n persistent_workers=True)\n\n def val_dataloader(self):\n # sampler = DsEvalBatchSampler(\n # self.valid_dataset,\n # max_batch_frames=self.max_val_batch_frames,\n # max_batch_size=self.max_val_batch_size,\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # batch_by_size=False\n # )\n return torch.utils.data.DataLoader(self.valid_dataset,\n collate_fn=self.valid_dataset.collater,\n batch_size=1,\n # batch_sampler=sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n shuffle=False)\n\n def test_dataloader(self):\n return self.val_dataloader()\n\n def on_test_start(self):\n self.on_validation_start()\n\n def test_step(self, sample, batch_idx):\n return self.validation_step(sample, batch_idx)\n\n def on_test_end(self):\n return self.on_validation_end()\n\n def on_save_checkpoint(self, checkpoint):\n pass\n # checkpoint['trainer_stage'] = self.trainer.state.stage.value\n\n # def on_load_checkpoint(self, checkpoint):\n # # from lightning.pytorch.trainer.states import RunningStage\n # from utils import simulate_lr_scheduler\n # # if checkpoint.get('trainer_stage', '') == RunningStage.VALIDATING.value:\n # # self.skip_immediate_validation = True\n #\n # optimizer_args = self.config['optimizer_args']\n # scheduler_args = self.config['lr_scheduler_args']\n #\n # if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n # optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n #\n # if checkpoint.get('optimizer_states', None):\n # opt_states = checkpoint['optimizer_states']\n # assert len(opt_states) == 1 # only support one optimizer\n # opt_state = opt_states[0]\n # for param_group in opt_state['param_groups']:\n # for k, v in optimizer_args.items():\n # if k in param_group and param_group[k] != v:\n # if 'lr_schedulers' in checkpoint and checkpoint['lr_schedulers'] and k == 'lr':\n # continue\n # rank_zero_info(f'| Overriding optimizer parameter {k} from checkpoint: {param_group[k]} -> {v}')\n # param_group[k] = v\n # if 'initial_lr' in param_group and param_group['initial_lr'] != optimizer_args['lr']:\n # rank_zero_info(\n # f'| Overriding optimizer parameter initial_lr from checkpoint: {param_group[\"initial_lr\"]} -> {optimizer_args[\"lr\"]}'\n # )\n # param_group['initial_lr'] = optimizer_args['lr']\n #\n # if checkpoint.get('lr_schedulers', None):\n # assert checkpoint.get('optimizer_states', False)\n # assert len(checkpoint['lr_schedulers']) == 1 # only support one scheduler\n # checkpoint['lr_schedulers'][0] = simulate_lr_scheduler(\n # optimizer_args, scheduler_args,\n # step_count=checkpoint['global_step'],\n # num_param_groups=len(checkpoint['optimizer_states'][0]['param_groups'])\n # )\n # for param_group, new_lr in zip(\n # checkpoint['optimizer_states'][0]['param_groups'],\n # checkpoint['lr_schedulers'][0]['_last_lr'],\n # ):\n # if param_group['lr'] != new_lr:\n # rank_zero_info(\n # f'| Overriding optimizer parameter lr from checkpoint: {param_group[\"lr\"]} -> {new_lr}')\n # param_group['lr'] = new_lr" }, { "identifier": "DsBatchSampler", "path": "utils/training_utils.py", "snippet": "class DsBatchSampler(Sampler):\n def __init__(self, dataset, max_batch_frames, max_batch_size, sub_indices=None,\n num_replicas=None, rank=None, frame_count_grid=200,\n required_batch_count_multiple=1, batch_by_size=True, sort_by_similar_size=True,\n shuffle_sample=False, shuffle_batch=False, seed=0, drop_last=False) -> None:\n self.dataset = dataset\n self.max_batch_frames = max_batch_frames\n self.max_batch_size = max_batch_size\n self.sub_indices = sub_indices\n self.num_replicas = num_replicas\n self.rank = rank\n self.frame_count_grid = frame_count_grid\n self.required_batch_count_multiple = required_batch_count_multiple\n self.batch_by_size = batch_by_size\n self.sort_by_similar_size = sort_by_similar_size\n self.shuffle_sample = shuffle_sample\n self.shuffle_batch = shuffle_batch\n self.seed = seed\n self.drop_last = drop_last\n self.epoch = 0\n self.batches = None\n self.formed = None\n\n def __form_batches(self):\n if self.formed == self.epoch + self.seed:\n return\n rng = np.random.default_rng(self.seed + self.epoch)\n if self.shuffle_sample:\n if self.sub_indices is not None:\n rng.shuffle(self.sub_indices)\n indices = np.array(self.sub_indices)\n else:\n indices = rng.permutation(len(self.dataset))\n\n if self.sort_by_similar_size:\n grid = self.frame_count_grid\n assert grid > 0\n sizes = (np.round(np.array(self.dataset._sizes)[indices] / grid) * grid).clip(grid, None).astype(\n np.int64)\n indices = indices[np.argsort(sizes, kind='mergesort')]\n\n indices = indices.tolist()\n else:\n indices = self.sub_indices if self.sub_indices is not None else list(range(len(self.dataset)))\n\n if self.batch_by_size:\n batches = utils.batch_by_size(\n indices, self.dataset.num_frames,\n max_batch_frames=self.max_batch_frames,\n max_batch_size=self.max_batch_size\n )\n else:\n batches = [indices[i:i + self.max_batch_size] for i in range(0, len(indices), self.max_batch_size)]\n\n floored_total_batch_count = (len(batches) // self.num_replicas) * self.num_replicas\n if self.drop_last and len(batches) > floored_total_batch_count:\n batches = batches[:floored_total_batch_count]\n leftovers = []\n else:\n leftovers = (rng.permutation(len(batches) - floored_total_batch_count) + floored_total_batch_count).tolist()\n\n batch_assignment = rng.permuted(\n np.arange(floored_total_batch_count).reshape(-1, self.num_replicas).transpose(), axis=0\n )[self.rank].tolist()\n floored_batch_count = len(batch_assignment)\n ceiled_batch_count = floored_batch_count + (1 if len(leftovers) > 0 else 0)\n if self.rank < len(leftovers):\n batch_assignment.append(leftovers[self.rank])\n elif len(leftovers) > 0:\n batch_assignment.append(batch_assignment[self.epoch % floored_batch_count])\n if self.required_batch_count_multiple > 1 and ceiled_batch_count % self.required_batch_count_multiple != 0:\n # batch_assignment = batch_assignment[:((floored_batch_count \\\n # // self.required_batch_count_multiple) * self.required_batch_count_multiple)]\n ceiled_batch_count = math.ceil(\n ceiled_batch_count / self.required_batch_count_multiple) * self.required_batch_count_multiple\n for i in range(ceiled_batch_count - len(batch_assignment)):\n batch_assignment.append(\n batch_assignment[(i + self.epoch * self.required_batch_count_multiple) % floored_batch_count])\n\n self.batches = [deepcopy(batches[i]) for i in batch_assignment]\n\n if self.shuffle_batch:\n rng.shuffle(self.batches)\n\n del indices\n del batches\n del batch_assignment\n\n def __iter__(self):\n self.__form_batches()\n return iter(self.batches)\n\n def __len__(self):\n self.__form_batches()\n if self.batches is None:\n raise RuntimeError(\"Batches are not initialized. Call __form_batches first.\")\n return len(self.batches)\n\n def set_epoch(self, epoch):\n self.epoch = epoch" }, { "identifier": "DsEvalBatchSampler", "path": "utils/training_utils.py", "snippet": "class DsEvalBatchSampler(Sampler):\n def __init__(self, dataset, max_batch_frames, max_batch_size, rank=None, batch_by_size=True) -> None:\n self.dataset = dataset\n self.max_batch_frames = max_batch_frames\n self.max_batch_size = max_batch_size\n self.rank = rank\n self.batch_by_size = batch_by_size\n self.batches = None\n self.batch_size = max_batch_size\n self.drop_last = False\n\n if self.rank == 0:\n indices = list(range(len(self.dataset)))\n if self.batch_by_size:\n self.batches = utils.batch_by_size(\n indices, self.dataset.num_frames,\n max_batch_frames=self.max_batch_frames, max_batch_size=self.max_batch_size\n )\n else:\n self.batches = [\n indices[i:i + self.max_batch_size]\n for i in range(0, len(indices), self.max_batch_size)\n ]\n else:\n self.batches = [[0]]\n\n def __iter__(self):\n return iter(self.batches)\n\n def __len__(self):\n return len(self.batches)" }, { "identifier": "get_latest_checkpoint_path", "path": "utils/training_utils.py", "snippet": "def get_latest_checkpoint_path(work_dir):\n if not isinstance(work_dir, Path):\n work_dir = Path(work_dir)\n if not work_dir.exists():\n return None\n\n last_step = -1\n last_ckpt_name = None\n\n for ckpt in work_dir.glob('model_ckpt_steps_*.ckpt'):\n search = re.search(r'steps_\\d+', ckpt.name)\n if search:\n step = int(search.group(0)[6:])\n if step > last_step:\n last_step = step\n last_ckpt_name = str(ckpt)\n\n return last_ckpt_name if last_ckpt_name is not None else None" }, { "identifier": "PitchAdjustableMelSpectrogram", "path": "utils/wav2mel.py", "snippet": "class PitchAdjustableMelSpectrogram:\n def __init__(\n self,\n sample_rate=44100,\n n_fft=2048,\n win_length=2048,\n hop_length=512,\n f_min=40,\n f_max=16000,\n n_mels=128,\n center=False,\n ):\n self.sample_rate = sample_rate\n self.n_fft = n_fft\n self.win_size = win_length\n self.hop_length = hop_length\n self.f_min = f_min\n self.f_max = f_max\n self.n_mels = n_mels\n self.center = center\n\n self.mel_basis = {}\n self.hann_window = {}\n\n def __call__(self, y, key_shift=0, speed=1.0):\n factor = 2 ** (key_shift / 12)\n n_fft_new = int(np.round(self.n_fft * factor))\n win_size_new = int(np.round(self.win_size * factor))\n hop_length = int(np.round(self.hop_length * speed))\n\n # if torch.min(y) < -1.0:\n # logger.warning(f\"min value is {torch.min(y)}\")\n # if torch.max(y) > 1.0:\n # logger.warning(f\"max value is {torch.max(y)}\")\n\n mel_basis_key = f\"{self.f_max}_{y.device}\"\n if mel_basis_key not in self.mel_basis:\n mel = librosa_mel_fn(\n sr=self.sample_rate,\n n_fft=self.n_fft,\n n_mels=self.n_mels,\n fmin=self.f_min,\n fmax=self.f_max,\n )\n self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)\n\n hann_window_key = f\"{key_shift}_{y.device}\"\n if hann_window_key not in self.hann_window:\n self.hann_window[hann_window_key] = torch.hann_window(\n win_size_new, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (\n int((win_size_new - hop_length) // 2),\n int((win_size_new - hop_length+1) // 2),\n ),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft_new,\n hop_length=hop_length,\n win_length=win_size_new,\n window=self.hann_window[hann_window_key],\n center=self.center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=True,\n ).abs()\n # spec = torch.view_as_real(spec)\n # spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))\n\n if key_shift != 0:\n size = self.n_fft // 2 + 1\n resize = spec.size(1)\n if resize < size:\n spec = F.pad(spec, (0, 0, 0, size - resize))\n\n spec = spec[:, :size, :] * self.win_size / win_size_new\n\n spec = torch.matmul(self.mel_basis[mel_basis_key], spec)\n\n return spec\n\n def dynamic_range_compression_torch(self,x, C=1, clip_val=1e-5):\n return torch.log(torch.clamp(x, min=clip_val) * C)" } ]
import logging import os import pathlib import random import sys import lightning.pytorch as pl import matplotlib import numpy as np import torch.utils.data import utils from typing import Dict from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_only from matplotlib import pyplot as plt from torch import nn from torch.utils.data import Dataset from torchmetrics import Metric, MeanMetric from models.ddspgan.ddspgan import DDSPgan from models.nsf_HiFigan.models import Generator, AttrDict, MultiScaleDiscriminator, MultiPeriodDiscriminator from modules.loss.HiFiloss import HiFiloss from modules.loss.ddsploss import ddsploss from training.base_task_gan import GanBaseTask from utils.training_utils import ( DsBatchSampler, DsEvalBatchSampler, get_latest_checkpoint_path ) from utils.wav2mel import PitchAdjustableMelSpectrogram
13,057
end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] record['uv'] = record['uv'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] record['uv'] = record['uv'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty = (len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) uv = np.stack([record['uv'] for record in minibatch if 'uv' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0), 'uv': torch.from_numpy(uv) } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec class ddspgan_task(GanBaseTask): def __init__(self, config): super().__init__(config) self.TF = PitchAdjustableMelSpectrogram( f_min=0, f_max=None, n_mels=256,) self.logged_gt_wav = set() self.stft=stftlog() def build_dataset(self): self.train_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'train_set_name']) self.valid_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'valid_set_name'], infer=True) def build_model(self): # cfg=self.config['model_args'] # cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']}) # h=AttrDict(cfg) self.generator=DDSPgan(self.config)
# from utils.indexed_datasets import IndexedDataset def spec_to_figure(spec, vmin=None, vmax=None): if isinstance(spec, torch.Tensor): spec = spec.cpu().numpy() fig = plt.figure(figsize=(12, 9),dpi=100) plt.pcolor(spec.T, vmin=vmin, vmax=vmax) plt.tight_layout() return fig class nsf_HiFigan_dataset(Dataset): def __init__(self, config: dict, data_dir, infer=False): super().__init__() self.config = config self.data_dir = data_dir if isinstance(data_dir, pathlib.Path) else pathlib.Path(data_dir) with open(self.data_dir, 'r', encoding='utf8') as f: fills = f.read().strip().split('\n') self.data_index = fills self.infer = infer self.volume_aug = self.config['volume_aug'] self.volume_aug_prob = self.config['volume_aug_prob'] if not infer else 0 def __getitem__(self, index): data_path = self.data_index[index] data = np.load(data_path) return {'f0': data['f0'], 'spectrogram': data['mel'], 'audio': data['audio'], 'uv': data['uv']} def __len__(self): return len(self.data_index) def collater(self, minibatch): samples_per_frame = self.config['hop_size'] if self.infer: crop_mel_frames = 0 else: crop_mel_frames = self.config['crop_mel_frames'] for record in minibatch: # Filter out records that aren't long enough. if len(record['spectrogram']) < crop_mel_frames: del record['spectrogram'] del record['audio'] del record['f0'] del record['uv'] continue start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] record['uv'] = record['uv'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] record['uv'] = record['uv'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty = (len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) uv = np.stack([record['uv'] for record in minibatch if 'uv' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0), 'uv': torch.from_numpy(uv) } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec class ddspgan_task(GanBaseTask): def __init__(self, config): super().__init__(config) self.TF = PitchAdjustableMelSpectrogram( f_min=0, f_max=None, n_mels=256,) self.logged_gt_wav = set() self.stft=stftlog() def build_dataset(self): self.train_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'train_set_name']) self.valid_dataset = nsf_HiFigan_dataset(config=self.config, data_dir=pathlib.Path(self.config['DataIndexPath']) / self.config[ 'valid_set_name'], infer=True) def build_model(self): # cfg=self.config['model_args'] # cfg.update({'sampling_rate':self.config['audio_sample_rate'],'num_mels':self.config['audio_num_mel_bins'],'hop_size':self.config['hop_size']}) # h=AttrDict(cfg) self.generator=DDSPgan(self.config)
self.discriminator=nn.ModuleDict({'msd':MultiScaleDiscriminator(), 'mpd':MultiPeriodDiscriminator(periods=self.config['model_args']['discriminator_periods'])})
4
2023-10-17 13:45:09+00:00
16k
Jacob-Zhou/gecdi
gec/parser.py
[ { "identifier": "Dataset", "path": "gec/data.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n r\"\"\"\n Dataset that is compatible with :class:`torch.utils.data.Dataset`, serving as a wrapper for manipulating all data fields\n with the operating behaviours defined in :class:`~supar.utils.transform.Transform`.\n The data fields of all the instantiated sentences can be accessed as an attribute of the dataset.\n\n Args:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform` or its derivations.\n The instance holds a series of loading and processing behaviours with regard to the specific data format.\n data (Union[str, Iterable]):\n A filename or a list of instances that will be passed into :meth:`transform.load`.\n cache (bool):\n If ``True``, tries to use the previously cached binarized data for fast loading.\n In this way, sentences are loaded on-the-fly according to the meta data.\n If ``False``, all sentences will be directly loaded into the memory.\n Default: ``False``.\n binarize (bool):\n If ``True``, binarizes the dataset once building it. Only works if ``cache=True``. Default: ``False``.\n bin (str):\n Path for saving binarized files, required if ``cache=True``. Default: ``None``.\n max_len (int):\n Sentences exceeding the length will be discarded. Default: ``None``.\n kwargs (Dict):\n Together with `data`, kwargs will be passed into :meth:`transform.load` to control the loading behaviour.\n\n Attributes:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform`.\n sentences (List[Sentence]):\n A list of sentences loaded from the data.\n Each sentence includes fields obeying the data format defined in ``transform``.\n If ``cache=True``, each is a pointer to the sentence stored in the cache file.\n \"\"\"\n\n def __init__(\n self,\n transform: Transform,\n data: Union[str, Iterable],\n cache: bool = False,\n binarize: bool = False,\n bin: str = None,\n max_len: int = None,\n **kwargs\n ) -> Dataset:\n super(Dataset, self).__init__()\n\n self.transform = transform\n self.data = data\n self.cache = cache\n self.binarize = binarize\n self.bin = bin\n self.max_len = max_len or INF\n self.kwargs = kwargs\n\n if cache:\n if not isinstance(data, str) or not os.path.exists(data):\n raise FileNotFoundError(\"Only files are allowed for binarization, but not found\")\n if self.bin is None:\n self.fbin = data + '.pt'\n else:\n os.makedirs(self.bin, exist_ok=True)\n self.fbin = os.path.join(self.bin, os.path.split(data)[1]) + '.pt'\n if not self.binarize and os.path.exists(self.fbin):\n try:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n except Exception:\n raise RuntimeError(f\"Error found while debinarizing {self.fbin}, which may have been corrupted. \"\n \"Try re-binarizing it first\")\n else:\n self.sentences = list(transform.load(data, **kwargs))\n\n def __repr__(self):\n s = f\"{self.__class__.__name__}(\"\n s += f\"n_sentences={len(self.sentences)}\"\n if hasattr(self, 'loader'):\n s += f\", n_batches={len(self.loader)}\"\n if hasattr(self, 'buckets'):\n s += f\", n_buckets={len(self.buckets)}\"\n if self.shuffle:\n s += f\", seed={self.seed}\"\n if self.cache:\n s += f\", cache={self.cache}\"\n if self.binarize:\n s += f\", binarize={self.binarize}\"\n if self.max_len < INF:\n s += f\", max_len={self.max_len}\"\n s += \")\"\n return s\n\n def __len__(self):\n return len(self.sentences)\n\n def __getitem__(self, index):\n return debinarize(self.fbin, self.sentences[index]) if self.cache else self.sentences[index]\n\n def __getattr__(self, name):\n if name not in {f.name for f in self.transform.flattened_fields}:\n raise AttributeError\n if self.cache:\n if os.path.exists(self.fbin) and not self.binarize:\n sentences = self\n else:\n sentences = self.transform.load(self.data, **self.kwargs)\n return (getattr(sentence, name) for sentence in sentences)\n return [getattr(sentence, name) for sentence in self.sentences]\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n @lazy_property\n def sizes(self):\n if not self.cache:\n return [s.size for s in self.sentences]\n return debinarize(self.fbin, 'sizes')\n\n def build(\n self,\n batch_size: int,\n n_buckets: int = 1,\n shuffle: bool = False,\n distributed: bool = False,\n n_workers: int = 0,\n pin_memory: bool = True,\n chunk_size: int = 1000,\n seed: int = 1,\n ) -> Dataset:\n # numericalize all fields\n if not self.cache:\n self.sentences = [i for i in self.transform(self.sentences) if len(i) < self.max_len]\n else:\n # if not forced to do binarization and the binarized file already exists, directly load the meta file\n if os.path.exists(self.fbin) and not self.binarize:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n else:\n @contextmanager\n def cache(sentences):\n ftemp = tempfile.mkdtemp()\n fs = os.path.join(ftemp, 'sentences')\n fb = os.path.join(ftemp, os.path.basename(self.fbin))\n global global_transform\n global_transform = self.transform\n sentences = binarize({'sentences': progress_bar(sentences)}, fs)[1]['sentences']\n try:\n yield ((sentences[s:s+chunk_size], fs, f\"{fb}.{i}\", self.max_len)\n for i, s in enumerate(range(0, len(sentences), chunk_size)))\n finally:\n del global_transform\n shutil.rmtree(ftemp)\n\n def numericalize(sentences, fs, fb, max_len):\n sentences = global_transform((debinarize(fs, sentence) for sentence in sentences))\n sentences = [i for i in sentences if len(i) < max_len]\n return binarize({'sentences': sentences, 'sizes': [sentence.size for sentence in sentences]}, fb)[0]\n\n logger.info(f\"Seeking to cache the data to {self.fbin} first\")\n # numericalize the fields of each sentence\n if is_master():\n with cache(self.transform.load(self.data, **self.kwargs)) as chunks, mp.Pool(32) as pool:\n results = [pool.apply_async(numericalize, chunk) for chunk in chunks]\n self.sentences = binarize((r.get() for r in results), self.fbin, merge=True)[1]['sentences']\n if is_dist():\n dist.barrier()\n if not is_master():\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n # NOTE: the final bucket count is roughly equal to n_buckets\n self.buckets = dict(zip(*kmeans(self.sizes, n_buckets)))\n self.loader = DataLoader(transform=self.transform,\n dataset=self,\n batch_sampler=Sampler(self.buckets, batch_size, shuffle, distributed, seed=seed),\n num_workers=n_workers,\n collate_fn=collate_fn,\n pin_memory=pin_memory)\n self.seed = seed\n self.shuffle = shuffle\n return self" }, { "identifier": "map_token_ids", "path": "gec/fn.py", "snippet": "def map_token_ids(vocab_0, vocab_1, equal_labels=None):\n \"\"\"\n Map token ids from vocab_0 to vocab_1\n\n Args:\n vocab_0 (dict): vocab_0\n vocab_1 (dict): vocab_1\n equal_labels (dict): equal_labels\n \"\"\"\n if equal_labels is None:\n equal_labels = {}\n return [(i, vocab_1[equal_labels.get(k, k)]) for k, i in vocab_0.items()\n if k in vocab_1]" }, { "identifier": "PerplexityMetric", "path": "gec/metric.py", "snippet": "class PerplexityMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[torch.Tensor] = None,\n golds: Optional[torch.Tensor] = None,\n mask: Optional[torch.BoolTensor] = None,\n reverse: bool = True,\n eps: float = 1e-12) -> PerplexityMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_tokens = 0.\n\n self.tp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n\n self.total_loss = 0.\n\n if loss is not None:\n self(loss, preds, golds, mask)\n\n def __repr__(self):\n s = f\"loss: {self.loss:.4f} PPL: {self.ppl:.4f}\"\n if self.tp > 0:\n s += f\" - TGT: P: {self.p:6.2%} R: {self.r:6.2%} F0.5: {self.f:6.2%}\"\n return s\n\n def __call__(self, loss: float, preds: Tuple[List, torch.Tensor],\n golds: Tuple[List, torch.Tensor],\n mask: torch.BoolTensor) -> PerplexityMetric:\n n_tokens = mask.sum().item()\n self.n += len(mask)\n self.count += 1\n self.n_tokens += n_tokens\n self.total_loss += float(loss) * n_tokens\n\n if preds is not None:\n with tempfile.TemporaryDirectory() as t:\n fsrc, fpred, fgold = os.path.join(t, 'src'), os.path.join(\n t, 'pred'), os.path.join(t, 'gold')\n pred_m2, gold_m2 = os.path.join(t, 'pred.m2'), os.path.join(\n t, 'gold.m2')\n with open(fsrc, 'w') as fs, open(fpred, 'w') as f:\n for s, i in preds:\n fs.write(s + '\\n')\n f.write(i + '\\n')\n with open(fgold, 'w') as f:\n for _, i in golds:\n f.write(i + '\\n')\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fpred}',\n '-out', f'{pred_m2}'\n ])\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fgold}',\n '-out', f'{gold_m2}'\n ])\n out = subprocess.check_output(\n [\n 'errant_compare', '-hyp', f'{pred_m2}', '-ref',\n f'{gold_m2}'\n ],\n stderr=subprocess.STDOUT).decode()\n tp, fp, fn = (int(i) for i in out.split('\\n')[3].split()[:3])\n self.tp += tp\n self.pred += tp + fp\n self.gold += tp + fn\n return self\n\n def __add__(self, other: PerplexityMetric) -> PerplexityMetric:\n metric = PerplexityMetric(eps=self.eps)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.n_tokens = self.n_tokens + other.n_tokens\n metric.total_loss = self.total_loss + other.total_loss\n\n metric.tp = self.tp + other.tp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.f if self.f > 0 else self.ppl\n\n @property\n def loss(self):\n return self.total_loss / self.n_tokens\n\n @property\n def ppl(self):\n return math.pow(2, (self.loss / math.log(2)))\n\n @property\n def p(self):\n return self.tp / (self.pred + self.eps)\n\n @property\n def r(self):\n return self.tp / (self.gold + self.eps)\n\n @property\n def f(self):\n return (1 + 0.5**2) * self.p * self.r / (0.5**2 * self.p + self.r +\n self.eps)" }, { "identifier": "SpanMetric", "path": "gec/metric.py", "snippet": "class SpanMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[List[List[Tuple]]] = None,\n golds: Optional[List[List[Tuple]]] = None,\n reverse: bool = False,\n beta: Optional[float] = 1.,\n eps: float = 1e-12) -> SpanMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_ucm = 0.0\n self.n_lcm = 0.0\n self.n_tr = 0.0\n self.n_fr = 0.0\n self.n_e = 0.0\n self.n_c = 0.0\n self.utp = 0.0\n self.ltp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n self.beta = beta\n\n if loss is not None:\n self(loss, preds, golds)\n\n def __repr__(self):\n s = f\"ErrorSents: {self.n_e:6.0f} CorrectSents: {self.n_c:6.0f} TR: {self.tr:7.2%} FR: {self.fr:7.2%} \"\n # s += f\"GoldSpans: {self.gold:6.0f} PredSpans: {self.pred:6.0f} \"\n s += f\"UP: {self.up:7.2%} UR: {self.ur:7.2%} UF{'' if self.beta == 1.0 else self.beta}: {self.uf:7.2%} \"\n s += f\"LP: {self.lp:7.2%} LR: {self.lr:7.2%} LF{'' if self.beta == 1.0 else self.beta}: {self.lf:7.2%}\"\n return s\n\n def __call__(self, loss: float, preds: List[List[Tuple]],\n golds: List[List[Tuple]]) -> SpanMetric:\n self.n += len(preds)\n self.count += 1\n self.total_loss += float(loss)\n for pred, gold in zip(preds, golds):\n upred, ugold = Counter([tuple(span[:-1])\n for span in pred]), Counter(\n [tuple(span[:-1]) for span in gold])\n lpred, lgold = Counter([tuple(span) for span in pred\n ]), Counter([tuple(span) for span in gold])\n utp, ltp = list((upred & ugold).elements()), list(\n (lpred & lgold).elements())\n self.n_ucm += len(utp) == len(pred) == len(gold)\n self.n_lcm += len(ltp) == len(pred) == len(gold)\n self.n_tr += ((len(gold) > 0) and (len(pred) > 0))\n self.n_fr += ((len(gold) == 0) and (len(pred) > 0))\n self.n_e += (len(gold) > 0)\n self.n_c += (len(gold) == 0)\n self.utp += len(utp)\n self.ltp += len(ltp)\n self.pred += len(pred)\n self.gold += len(gold)\n return self\n\n def __add__(self, other: SpanMetric) -> SpanMetric:\n metric = SpanMetric(eps=self.eps, beta=self.beta)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.total_loss = self.total_loss + other.total_loss\n metric.n_ucm = self.n_ucm + other.n_ucm\n metric.n_lcm = self.n_lcm + other.n_lcm\n metric.n_tr = self.n_tr + other.n_tr\n metric.n_fr = self.n_fr + other.n_fr\n metric.n_e = self.n_e + other.n_e\n metric.n_c = self.n_c + other.n_c\n metric.utp = self.utp + other.utp\n metric.ltp = self.ltp + other.ltp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.lf\n\n @property\n def ucm(self):\n return self.n_ucm / (self.n + self.eps)\n\n @property\n def lcm(self):\n return self.n_lcm / (self.n + self.eps)\n\n @property\n def tr(self):\n return self.n_tr / (self.n_e + self.eps)\n\n @property\n def fr(self):\n return self.n_fr / (self.n_c + self.eps)\n\n @property\n def up(self):\n return self.utp / (self.pred + self.eps)\n\n @property\n def ur(self):\n return self.utp / (self.gold + self.eps)\n\n @property\n def uf(self):\n return (1 + self.beta**2) * self.utp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)\n\n @property\n def lp(self):\n return self.ltp / (self.pred + self.eps)\n\n @property\n def lr(self):\n return self.ltp / (self.gold + self.eps)\n\n @property\n def lf(self):\n return (1 + self.beta**2) * self.ltp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)" }, { "identifier": "Seq2SeqDetectModel", "path": "gec/model.py", "snippet": "class Seq2SeqDetectModel(Seq2SeqModel):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_labels,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=1024,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n del self.classifier\n self.error_classifier = nn.Linear(self.model.config.d_model,\n self.args.n_labels)\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def loss(self, x, tgt, src_error, tgt_error, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, tgt_mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n y = self.decoder_dropout(y)\n # s_src_error = self.error_classifier(x[:, 1:-1])\n s_tgt_error = self.error_classifier(y)\n\n # src_mask = src_mask[:, 2:]\n\n if \"partial\" in self.args.error_schema:\n # src_mask = src_mask & (src_error != self.args.nul_index)\n tgt_mask = tgt_mask & (tgt_error != self.args.nul_index)\n # src_error_loss = self.criterion(s_src_error[src_mask], src_error[src_mask])\n tgt_error_loss = self.criterion(s_tgt_error[tgt_mask],\n tgt_error[tgt_mask])\n # return src_error_loss + tgt_error_loss\n return tgt_error_loss\n\n def decode(self, x, tgt, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n s_errors = self.error_classifier(y)\n if \"partial\" in self.args.error_schema:\n s_errors[...,\n self.args.nul_index] = torch.finfo(s_errors.dtype).min\n errors = s_errors.argmax(-1)\n errors[~mask] = -1\n\n return errors" }, { "identifier": "Seq2SeqModel", "path": "gec/model.py", "snippet": "class Seq2SeqModel(Model):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=512,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n if self.args.encoder == 'transformer':\n self.token_dropout = TokenDropout(self.args.token_dropout)\n self.decoder = TransformerDecoder(\n layer=TransformerDecoderLayer(\n n_heads=self.args.n_decoder_heads,\n n_model=self.args.n_decoder_hidden,\n n_inner=self.args.n_decoder_inner,\n dropout=self.args.decoder_dropout),\n n_layers=self.args.n_decoder_layers)\n\n else:\n from transformers import AutoModel\n self.model = AutoModel.from_pretrained(self.args.bart,\n dropout=self.args.dropout)\n self.encoder, self.decoder = self.model.encoder, self.model.decoder\n self.decoder_dropout = nn.Dropout(self.args.decoder_dropout)\n self.classifier = nn.Linear(self.args.n_encoder_hidden,\n self.args.n_words)\n self.classifier.weight = (self.word_embed.embed\n if self.args.encoder == 'transformer' else\n self.model.shared).weight\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def forward(self, words):\n r\"\"\"\n Args:\n words (~torch.LongTensor): ``[batch_size, seq_len]``.\n Word indices.\n\n Returns:\n ~torch.Tensor:\n Representations for the src sentences of the shape ``[batch_size, seq_len, n_model]``.\n \"\"\"\n # we need to do token dropout, so the TranformerWordEmbedding layer is not invoked here\n if self.args.encoder == 'transformer':\n embed = self.token_dropout(self.word_embed.embed(words))\n embed = embed * self.word_embed.embed_scale + self.word_embed.pos_embed(\n embed)\n embed = self.embed_dropout(embed)\n return self.encoder(embed, words.ne(self.args.pad_index))\n else:\n return self.encoder(input_ids=words,\n attention_mask=words.ne(\n self.args.pad_index))[0]\n\n def loss(self, x, tgt, src_mask, tgt_mask):\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n y = self.decoder_dropout(y)\n s_y = self.classifier(y)\n return self.criterion(s_y[tgt_mask], tgt[tgt_mask])\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (tuple(\n past_state.index_select(0, beam_idx)\n for past_state in layer_past), )\n return reordered_past\n\n def decode(self, x, src_mask):\n batch_size, *_ = x.shape\n beam_size, n_words = self.args.beam_size, self.args.n_words\n\n # repeat the src inputs beam_size times\n # [batch_size * beam_size, ...]\n x = x.unsqueeze(1).repeat(1, beam_size, 1, 1).view(-1, *x.shape[1:])\n src_mask = src_mask.unsqueeze(1).repeat(1, beam_size, 1).view(\n -1, *src_mask.shape[1:])\n # initialize the tgt inputs by <bos>\n # [batch_size * beam_size, seq_len]\n tgt = x.new_full((batch_size * beam_size, 1),\n self.args.bos_index,\n dtype=torch.long)\n # [batch_size * beam_size]\n active = src_mask.new_ones(batch_size * beam_size)\n # [batch_size]\n batches = tgt.new_tensor(range(batch_size)) * beam_size\n # accumulated scores\n scores = x.new_full((batch_size, self.args.beam_size),\n MIN).index_fill_(-1, tgt.new_tensor(0), 0).view(-1)\n\n def rank(scores, mask, k):\n scores = scores / mask.sum(-1).unsqueeze(\n -1)**self.args.length_penalty\n return scores.view(batch_size, -1).topk(k, -1)[1]\n\n if self.args.encoder != 'transformer':\n past_key_values = self.decoder(\n input_ids=torch.full_like(tgt[:, :1], self.args.eos_index),\n attention_mask=torch.ones_like(src_mask[:, :1]),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=None,\n use_cache=True)[1]\n\n for t in range(1, min(self.args.max_len + 1, int(1.8 * x.shape[1]))):\n tgt_mask = tgt.ne(self.args.pad_index)\n if self.args.encoder == 'transformer':\n attn_mask = tgt_mask.new_ones(t, t).tril_()\n s_y = self.decoder(self.embed(tgt[active]), x[active],\n tgt_mask[active], src_mask[active],\n attn_mask)\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n # [batch_size * beam_size, n_words]\n s_y = x.new_full((batch_size * beam_size, n_words),\n MIN).masked_scatter_(active.unsqueeze(-1),\n s_y)\n else:\n input_ids = tgt[:, -1:]\n s_y, new_past_key_values = self.decoder(\n input_ids=input_ids,\n attention_mask=torch.cat(\n (torch.ones_like(tgt_mask[:, :1]), tgt_mask), 1),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=past_key_values,\n use_cache=True)[:2]\n del past_key_values\n past_key_values = new_past_key_values\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n s_y[~active] = MIN\n\n s_y[~active, self.args.pad_index] = 0\n\n # [batch_size * beam_size, n_words]\n scores = scores.unsqueeze(-1) + s_y\n # [batch_size, beam_size]\n cands = rank(scores, tgt_mask, beam_size)\n # [batch_size * beam_size]\n scores = scores.view(batch_size, -1).gather(-1, cands).view(-1)\n # beams, tokens = cands // n_words, cands % n_words\n beams, tokens = cands.div(\n n_words, rounding_mode='floor'), (cands % n_words).view(-1, 1)\n indices = (batches.unsqueeze(-1) + beams).view(-1)\n # [batch_size * beam_size, seq_len + 1]\n tgt = torch.cat((tgt[indices], tokens), 1)\n past_key_values = self._reorder_cache(past_key_values, indices)\n active = tokens.ne(\n tokens.new_tensor(\n (self.args.eos_index, self.args.pad_index))).all(-1)\n\n if not active.any():\n break\n cands = rank(scores.view(-1, 1), tgt.ne(self.args.pad_index),\n self.args.topk)\n return tgt[(batches.unsqueeze(-1) + cands).view(-1)].view(\n batch_size, self.args.topk, -1)" }, { "identifier": "Field", "path": "gec/transform.py", "snippet": "class Field(supar.utils.Field):\n r\"\"\"\n Defines a datatype together with instructions for converting to :class:`~torch.Tensor`.\n :class:`Field` models common text processing datatypes that can be represented by tensors.\n It holds a :class:`~supar.utils.vocab.Vocab` object that defines the set of possible values\n for elements of the field and their corresponding numerical representations.\n The :class:`Field` object also holds other parameters relating to how a datatype\n should be numericalized, such as a tokenization method.\n\n Args:\n name (str):\n The name of the field.\n pad_token (str):\n The string token used as padding. Default: ``None``.\n unk_token (str):\n The string token used to represent OOV words. Default: ``None``.\n bos_token (str):\n A token that will be prepended to every example using this field, or ``None`` for no `bos_token`.\n Default: ``None``.\n eos_token (str):\n A token that will be appended to every example using this field, or ``None`` for no `eos_token`.\n lower (bool):\n Whether to lowercase the text in this field. Default: ``False``.\n use_vocab (bool):\n Whether to use a :class:`~supar.utils.vocab.Vocab` object.\n If ``False``, the data in this field should already be numerical.\n Default: ``True``.\n tokenize (function):\n The function used to tokenize strings using this field into sequential examples. Default: ``None``.\n fn (function):\n The function used for preprocessing the examples. Default: ``None``.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.padding_side = kwargs.pop('padding_side') if 'padding_side' in kwargs else 'right'\n super().__init__(*args, **kwargs)\n\n def compose(self, batch: Iterable[torch.Tensor]) -> torch.Tensor:\n r\"\"\"\n Composes a batch of sequences into a padded tensor.\n\n Args:\n batch (Iterable[~torch.Tensor]):\n A list of tensors.\n\n Returns:\n A padded tensor converted to proper device.\n \"\"\"\n\n return pad(batch, self.pad_index, padding_side=self.padding_side).to(self.device, non_blocking=True)" }, { "identifier": "Text", "path": "gec/transform.py", "snippet": "class Text(Transform):\n\n fields = ['SRC', 'TGT']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None\n ) -> Text:\n super().__init__()\n\n self.SRC = SRC\n self.TGT = TGT\n\n @property\n def src(self):\n return self.SRC,\n\n @property\n def tgt(self):\n return self.TGT,\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (str or Iterable):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n index, sentence = 0, []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n sentence = TextSentence(self, sentence, index)\n yield sentence\n index += 1\n sentence = []\n else:\n sentence.append(line)" }, { "identifier": "Tree", "path": "gec/transform.py", "snippet": "class Tree(Transform):\n\n fields = ['SRC', 'TGT', 'SRCERROR', 'TGTERROR']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None,\n SRCERROR: Optional[Union[Field, Iterable[Field]]] = None,\n TGTERROR: Optional[Union[Field, Iterable[Field]]] = None,\n **kwargs\n ) -> Tree:\n super().__init__()\n self.error_schema = kwargs.pop('error_schema') if 'error_schema' in kwargs else 'last'\n self.fine_error_type = kwargs.pop('fine_error_type') if 'fine_error_type' in kwargs else False\n\n self.SRC = SRC\n self.TGT = TGT\n self.SRCERROR = SRCERROR\n self.TGTERROR = TGTERROR\n\n @property\n def src(self):\n return self.SRC, self.TGT\n\n @property\n def tgt(self):\n return self.SRCERROR, self.TGTERROR\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (Union[str, Iterable]):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n def consume(lines, chunksize=10000):\n index, sentence, chunk = 0, [], []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n chunk.append((sentence, index))\n if len(chunk) == chunksize:\n yield chunk\n chunk = []\n index += 1\n sentence = []\n else:\n sentence.append(line)\n if len(chunk) > 0:\n yield chunk\n\n @contextmanager\n def cache(lines):\n global global_transform\n global_transform = self\n ftemp = tempfile.mkdtemp()\n fbin = os.path.join(ftemp, 'data')\n try:\n yield ((chunk, f\"{fbin}.{i}\") for i, chunk in enumerate(consume(lines))), fbin\n finally:\n if dist.is_initialized() and not is_master():\n dist.barrier()\n del global_transform\n shutil.rmtree(ftemp)\n\n with cache(lines) as (chunks, fbin):\n if is_master():\n def process(chunk, fb):\n sentences = [TreeSentence(global_transform, *s) for s in progress_bar(chunk)]\n sentences = [s for s in sentences if s.vaild]\n return binarize({'sentences': sentences}, fb)[0]\n with mp.Pool(32) as pool:\n results = [pool.apply_async(process, (chunk, fb)) for chunk, fb in chunks]\n binarize((r.get() for r in results), fbin, merge=True)\n if dist.is_initialized() and not is_master():\n fbin = gather(fbin)[0]\n dist.barrier()\n for s in debinarize(fbin, meta=True)['sentences']:\n yield debinarize(fbin, s)" } ]
import os import shutil import tempfile import math import dill import torch import torch.distributed as dist from datetime import datetime, timedelta from typing import Iterable, Union from gec.data import Dataset from gec.fn import map_token_ids from supar.parser import Parser from supar.utils import Config from supar.utils.common import MIN, NUL, UNK from supar.utils.field import RawField from supar.utils.fn import set_rng_state from supar.utils.logging import get_logger, init_logger, progress_bar from supar.utils.metric import Metric from supar.utils.optim import PolynomialLR from supar.utils.parallel import DistributedDataParallel as DDP, gather, is_dist from supar.utils.parallel import is_master from supar.utils.tokenizer import TransformerTokenizer from supar.utils.transform import AttachJuxtaposeTree, Batch from torch.cuda.amp import GradScaler from torch.optim import AdamW from torch.optim.lr_scheduler import ExponentialLR from torch.nn.functional import embedding from .metric import PerplexityMetric, SpanMetric from .model import Seq2SeqDetectModel, Seq2SeqModel from .transform import Field, Text, Tree from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from transformers import AutoTokenizer, GPT2LMHeadModel
13,666
train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def evaluate(self, data: Union[str, Iterable], batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, punct: bool = False, tree: bool = True, proj: bool = False, partial: bool = False, verbose: bool = True, **kwargs): return super().evaluate(**Config().update(locals())) def predict(self, data: Union[str, Iterable], pred: str = None, lang: str = None, prob: bool = False, batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, tree: bool = True, proj: bool = False, verbose: bool = True, **kwargs): return super().predict(**Config().update(locals())) def train_step(self, batch: Batch) -> torch.Tensor: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) return loss @torch.no_grad()
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq' MODEL = Seq2SeqModel def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.SRC = self.transform.SRC self.TGT = self.transform.TGT def train(self, train: Union[str, Iterable], dev: Union[str, Iterable], test: Union[str, Iterable], epochs: int, patience: int, batch_size: int = 5000, update_steps: int = 1, buckets: int = 32, workers: int = 0, clip: float = 5.0, amp: bool = False, cache: bool = False, verbose: bool = True, **kwargs) -> None: args = self.args.update(locals()) init_logger(logger, verbose=args.verbose) self.transform.train() batch_size = batch_size // update_steps if dist.is_initialized(): batch_size = batch_size // dist.get_world_size() logger.info("Loading the data") if args.cache: args.bin = os.path.join(os.path.dirname(args.path), 'bin') train = Dataset(self.transform, args.train, **args).build(batch_size, buckets, True, dist.is_initialized(), workers, chunk_size=args.chunk_size, seed=args.seed) dev = Dataset(self.transform, args.dev, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'train:':6} {train}") if not args.test: logger.info(f"{'dev:':6} {dev}\n") else: test = Dataset(self.transform, args.test, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'dev:':6} {dev}") logger.info(f"{'test:':6} {test}\n") self.optimizer = AdamW(self.model.parameters(), args.lr, (args.mu, args.nu), args.eps, args.weight_decay) steps = len(train.loader) * epochs // args.update_steps self.scheduler = PolynomialLR(self.optimizer, warmup_steps=self.args.warmup_steps, steps=steps) self.scaler = GradScaler(enabled=args.amp) if dist.is_initialized(): self.model = DDP(self.model, device_ids=[args.local_rank], find_unused_parameters=args.get( 'find_unused_parameters', True)) if args.amp: self.model.register_comm_hook(dist.group.WORLD, fp16_compress_hook) self.step, self.epoch, self.best_e, self.patience, self.n_batches = 1, 1, 1, patience, len( train.loader) self.best_metric, self.elapsed = Metric(), timedelta() if self.args.checkpoint: try: self.optimizer.load_state_dict( self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def evaluate(self, data: Union[str, Iterable], batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, punct: bool = False, tree: bool = True, proj: bool = False, partial: bool = False, verbose: bool = True, **kwargs): return super().evaluate(**Config().update(locals())) def predict(self, data: Union[str, Iterable], pred: str = None, lang: str = None, prob: bool = False, batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, tree: bool = True, proj: bool = False, verbose: bool = True, **kwargs): return super().predict(**Config().update(locals())) def train_step(self, batch: Batch) -> torch.Tensor: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) return loss @torch.no_grad()
def eval_step(self, batch: Batch) -> PerplexityMetric:
2
2023-10-18 10:55:33+00:00
16k
jianlanluo/SAQ
vqn/conservative_sac_main.py
[ { "identifier": "VQN", "path": "vqn/vqn.py", "snippet": "class VQN(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.embedding_dim = 128\n config.codebook_size = 64\n config.commitment_cost = 1.0\n config.quantization_cost = 1.0\n config.entropy_loss_ratio = 0.0\n config.entropy_loss_type = \"softmax\"\n config.entropy_temperature = 1.0\n config.vqvae_arch = '512-512'\n config.action_only_quantization = False\n config.reconstruction_loss_type = 'l2'\n config.vqvae_lr = 3e-4\n\n config.discount = 0.99\n config.qf_arch = '512-512'\n config.qf_lr = 3e-4\n config.target_update_period = 200\n config.reset_qf = False\n config.td_loss_weight = 1.0\n\n config.bc_loss_weight = 0.0\n\n config.action_selection_threshold = 0.0\n\n config.cql_temp = 1.0\n config.cql_min_q_weight = 0.0\n \n config.qf_weight_decay = 0.0\n\n config.q_value_penalty_weight = 0.0\n config.q_value_penalty_type = 'l1'\n config.q_value_penalty_aggregation = 'mean'\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, observation_dim, action_dim):\n self.config = self.get_default_config(config)\n self.observation_dim = observation_dim\n self.action_dim = action_dim\n\n self.vqvae = ActionVQVAE(\n observation_dim=self.observation_dim,\n action_dim=self.action_dim,\n embedding_dim=self.config.embedding_dim,\n codebook_size=self.config.codebook_size,\n commitment_cost=self.config.commitment_cost,\n quantization_cost=self.config.quantization_cost,\n entropy_loss_ratio=self.config.entropy_loss_ratio,\n entropy_loss_type=self.config.entropy_loss_type,\n entropy_temperature=self.config.entropy_temperature,\n arch=self.config.vqvae_arch,\n action_only_quantization=self.config.action_only_quantization,\n reconstruction_loss_type=self.config.reconstruction_loss_type,\n )\n\n self._vqvae_train_state = TrainState.create(\n params=self.vqvae.init(\n next_rng(self.vqvae.rng_keys()),\n jnp.zeros((1, observation_dim)),\n jnp.zeros((1, action_dim)),\n train=True\n ),\n tx=optax.adam(self.config.vqvae_lr),\n apply_fn=None,\n )\n self._vqvae_total_steps = 0\n\n self.qf = FullyConnectedNetwork(\n output_dim=self.config.codebook_size,\n arch=self.config.qf_arch,\n )\n\n qf_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((1, observation_dim)),\n )\n\n self._qf_optimizer = optax.adam(self.config.qf_lr)\n self._qf_train_state = DQNTrainState.create(\n params=qf_params,\n target_params=deepcopy(qf_params),\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n self._dqn_total_steps = 0\n\n self._sampler_policy = VQSamplerPolicy(\n self.qf, self.vqvae,\n self._qf_train_state.params, self._vqvae_train_state.params\n )\n\n\n def train_vqvae(self, batch):\n self._vqvae_train_state, metrics = self._vqvae_train_step(\n next_rng(), self._vqvae_train_state, batch\n )\n self._vqvae_total_steps += 1\n return metrics\n\n @partial(jax.jit, static_argnames=('self', ))\n def _vqvae_train_step(self, rng, train_state, batch):\n observations = batch['observations']\n actions = batch['actions']\n rng_generator = JaxRNG(rng)\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_params):\n reconstructed, result_dict = self.vqvae.apply(\n train_params,\n observations,\n actions,\n train=True,\n )\n return result_dict['loss'], result_dict\n\n grads, aux_values = grad_fn(train_state.params)\n new_train_state = train_state.apply_gradients(grads=grads)\n metrics = collect_jax_metrics(\n aux_values,\n ['loss', 'reconstruction_loss', 'quantizer_loss', 'e_latent_loss', 'q_latent_loss',\n 'entropy_loss', 'action_prior_loss', 'action_prior_accuracy'],\n )\n return new_train_state, metrics\n\n def train_dqn(self, batch, bc=False):\n self._qf_train_state, metrics = self._dqn_train_step(\n next_rng(), self._qf_train_state, self._vqvae_train_state, batch,\n bc\n )\n self._dqn_total_steps += 1\n return metrics\n\n @partial(jax.jit, static_argnames=('self', 'bc'))\n def _dqn_train_step(self, rng, qf_train_state, vqvae_train_state, batch, bc=False):\n observations = batch['observations']\n original_actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n rng_generator = JaxRNG(rng)\n\n actions = self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n original_actions,\n method=self.vqvae.encode\n )\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_params):\n def select_by_action(q_vals, actions):\n return jnp.squeeze(\n jnp.take_along_axis(\n q_vals, jnp.expand_dims(actions, -1), axis=-1\n ),\n axis=-1\n )\n\n def select_actions(params, observations):\n q_values = self.qf.apply(params, observations)\n action_priors = jax.nn.softmax(\n self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n method=self.vqvae.action_prior_logits\n ),\n axis=-1\n )\n action_selection_threshold = jnp.minimum(\n jnp.amax(action_priors, axis=-1, keepdims=True),\n self.config.action_selection_threshold\n )\n action_mask = (\n action_priors >= action_selection_threshold\n ).astype(jnp.float32)\n masked_q_values = (\n action_mask * q_values + (1.0 - action_mask) * jnp.min(q_values)\n )\n return jnp.argmax(masked_q_values, axis=-1)\n\n\n q_values = self.qf.apply(train_params, observations)\n current_actions_q_values = select_by_action(q_values, actions)\n next_q_values = self.qf.apply(qf_train_state.target_params, next_observations)\n next_actions = select_actions(train_params, next_observations)\n target_q_values = select_by_action(next_q_values, next_actions)\n\n td_target = rewards + (1. - dones) * self.config.discount * target_q_values\n\n td_loss = mse_loss(current_actions_q_values, jax.lax.stop_gradient(td_target))\n loss = self.config.td_loss_weight * td_loss\n\n current_actions = jnp.argmax(q_values, axis=-1)\n max_q_values = jnp.max(q_values, axis=-1)\n advantage = max_q_values - current_actions_q_values\n\n policy_dataset_aggrement_rate = jnp.mean(current_actions == actions)\n reconstructed_current_actions = self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n current_actions,\n method=self.vqvae.decode\n )\n current_action_mse = jnp.sum(\n jnp.square(reconstructed_current_actions - original_actions),\n axis=-1\n ).mean()\n\n bc_loss = jnp.mean(optax.softmax_cross_entropy_with_integer_labels(q_values, actions))\n loss = loss + self.config.bc_loss_weight * bc_loss\n\n cql_lse_q_values = self.config.cql_temp * jax.scipy.special.logsumexp(\n q_values / self.config.cql_temp, axis=-1\n )\n cql_min_q_loss = jnp.mean(cql_lse_q_values - current_actions_q_values)\n loss = loss + self.config.cql_min_q_weight * cql_min_q_loss\n\n if self.config.q_value_penalty_aggregation == 'none':\n aggregated_q_values = q_values\n elif self.config.q_value_penalty_aggregation == 'mean':\n aggregated_q_values = jnp.mean(q_values)\n else:\n raise ValueError('Unsupport value penalty aggregation type!')\n\n if self.config.q_value_penalty_type == 'l1':\n q_value_penalty_loss = jnp.mean(jnp.abs(aggregated_q_values))\n elif self.config.q_value_penalty_type == 'l2':\n q_value_penalty_loss = jnp.mean(jnp.square(aggregated_q_values))\n else:\n raise ValueError('Unsupport value penalty type!')\n\n loss = loss + self.config.q_value_penalty_weight * q_value_penalty_loss\n\n if bc:\n loss = bc_loss\n\n return loss, locals()\n\n grads, aux_values = grad_fn(qf_train_state.params)\n new_target_params = jax.lax.cond(\n qf_train_state.step % self.config.target_update_period == self.config.target_update_period - 1,\n lambda: qf_train_state.params,\n lambda: qf_train_state.target_params,\n )\n if self.config.reset_qf:\n def reset_qf_params():\n qf_params = self.qf.init(\n rng_generator(self.qf.rng_keys()),\n jnp.zeros((1, self.observation_dim)),\n )\n return DQNTrainState.create(\n params=qf_params,\n target_params=new_target_params,\n tx=self._qf_optimizer,\n apply_fn=None,\n )\n\n new_qf_train_state = jax.lax.cond(\n qf_train_state.step % self.config.target_update_period == self.config.target_update_period - 1,\n reset_qf_params,\n lambda: qf_train_state.apply_gradients(grads=grads, target_params=new_target_params)\n )\n else:\n new_qf_train_state = qf_train_state.apply_gradients(\n grads=grads, target_params=new_target_params\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['loss', 'current_actions_q_values', 'max_q_values', 'target_q_values',\n 'advantage', 'td_target', 'td_loss', 'cql_lse_q_values', 'cql_min_q_loss',\n 'policy_dataset_aggrement_rate', 'bc_loss', 'current_action_mse',\n 'q_value_penalty_loss'],\n )\n\n return new_qf_train_state, metrics\n\n def get_sampler_policy(self):\n return self._sampler_policy.update_params(\n self._qf_train_state.params, self._vqvae_train_state.params\n )" }, { "identifier": "ConservativeSAC", "path": "vqn/conservative_sac.py", "snippet": "class ConservativeSAC(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.discount = 0.99\n config.alpha_multiplier = 0.0\n config.use_automatic_entropy_tuning = False\n config.backup_entropy = False\n config.target_entropy = 0.0\n config.policy_lr = 3e-4\n config.policy_weight_decay = 0.0\n config.qf_lr = 3e-4\n config.qf_weight_decay = 0.0\n config.optimizer_type = 'adam'\n config.soft_target_update_rate = 5e-3\n config.use_cql = False\n config.cql_n_actions = 10\n config.cql_importance_sample = True\n config.cql_lagrange = False\n config.cql_target_action_gap = 1.0\n config.cql_temp = 1.0\n config.cql_min_q_weight = 5.0\n config.cql_max_target_backup = False\n config.cql_clip_diff_min = -np.inf\n config.cql_clip_diff_max = np.inf\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, policy, qf):\n self.config = self.get_default_config(config)\n self.policy = policy\n self.qf = qf\n self.observation_dim = policy.observation_dim\n self.action_dim = policy.action_dim\n\n self._train_states = {}\n\n optimizer_class = {\n 'adam': optax.adam,\n 'sgd': optax.sgd,\n }[self.config.optimizer_type]\n\n policy_params = self.policy.init(\n next_rng(self.policy.rng_keys()),\n jnp.zeros((10, self.observation_dim))\n )\n self._train_states['policy'] = TrainState.create(\n params=policy_params,\n tx=optax.adamw(self.config.qf_lr, self.config.policy_weight_decay),\n apply_fn=None\n )\n\n qf1_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf1'] = TrainState.create(\n params=qf1_params,\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n qf2_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf2'] = TrainState.create(\n params=qf2_params,\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})\n\n model_keys = ['policy', 'qf1', 'qf2']\n\n if self.config.use_automatic_entropy_tuning:\n self.log_alpha = Scalar(0.0)\n self._train_states['log_alpha'] = TrainState.create(\n params=self.log_alpha.init(next_rng()),\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha')\n\n if self.config.cql_lagrange:\n self.log_alpha_prime = Scalar(1.0)\n self._train_states['log_alpha_prime'] = TrainState.create(\n params=self.log_alpha_prime.init(next_rng()),\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha_prime')\n\n self._model_keys = tuple(model_keys)\n self._total_steps = 0\n\n def train(self, batch, bc=False):\n self._total_steps += 1\n self._train_states, self._target_qf_params, metrics = self._train_step(\n self._train_states, self._target_qf_params, next_rng(), batch, bc\n )\n return metrics\n\n @partial(jax.jit, static_argnames=('self', 'bc'))\n def _train_step(self, train_states, target_qf_params, rng, batch, bc=False):\n rng_generator = JaxRNG(rng)\n\n def loss_fn(train_params):\n observations = batch['observations']\n actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n\n loss_collection = {}\n\n @wrap_function_with_rng(rng_generator())\n def forward_policy(rng, *args, **kwargs):\n return self.policy.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n @wrap_function_with_rng(rng_generator())\n def forward_qf(rng, *args, **kwargs):\n return self.qf.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.qf.rng_keys())\n )\n\n new_actions, log_pi = forward_policy(train_params['policy'], observations)\n\n if self.config.use_automatic_entropy_tuning:\n alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()\n loss_collection['log_alpha'] = alpha_loss\n alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier\n else:\n alpha_loss = 0.0\n alpha = self.config.alpha_multiplier\n\n \"\"\" Policy loss \"\"\"\n if bc:\n log_probs = forward_policy(train_params['policy'], observations, actions, method=self.policy.log_prob)\n policy_loss = (alpha*log_pi - log_probs).mean()\n else:\n q_new_actions = jnp.minimum(\n forward_qf(train_params['qf1'], observations, new_actions),\n forward_qf(train_params['qf2'], observations, new_actions),\n )\n policy_loss = (alpha*log_pi - q_new_actions).mean()\n\n loss_collection['policy'] = policy_loss\n\n \"\"\" Q function loss \"\"\"\n q1_pred = forward_qf(train_params['qf1'], observations, actions)\n q2_pred = forward_qf(train_params['qf2'], observations, actions)\n\n if self.config.cql_max_target_backup:\n new_next_actions, next_log_pi = forward_policy(\n train_params['policy'], next_observations, repeat=self.config.cql_n_actions\n )\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n max_target_indices = jnp.expand_dims(jnp.argmax(target_q_values, axis=-1), axis=-1)\n target_q_values = jnp.take_along_axis(target_q_values, max_target_indices, axis=-1).squeeze(-1)\n next_log_pi = jnp.take_along_axis(next_log_pi, max_target_indices, axis=-1).squeeze(-1)\n else:\n new_next_actions, next_log_pi = forward_policy(\n train_params['policy'], next_observations\n )\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n\n if self.config.backup_entropy:\n target_q_values = target_q_values - alpha * next_log_pi\n\n td_target = jax.lax.stop_gradient(\n rewards + (1. - dones) * self.config.discount * target_q_values\n )\n qf1_loss = mse_loss(q1_pred, td_target)\n qf2_loss = mse_loss(q2_pred, td_target)\n\n ### CQL\n if self.config.use_cql:\n batch_size = actions.shape[0]\n cql_random_actions = jax.random.uniform(\n rng_generator(), shape=(batch_size, self.config.cql_n_actions, self.action_dim),\n minval=-1.0, maxval=1.0\n )\n\n cql_current_actions, cql_current_log_pis = forward_policy(\n train_params['policy'], observations, repeat=self.config.cql_n_actions,\n )\n cql_next_actions, cql_next_log_pis = forward_policy(\n train_params['policy'], next_observations, repeat=self.config.cql_n_actions,\n )\n\n cql_q1_rand = forward_qf(train_params['qf1'], observations, cql_random_actions)\n cql_q2_rand = forward_qf(train_params['qf2'], observations, cql_random_actions)\n cql_q1_current_actions = forward_qf(train_params['qf1'], observations, cql_current_actions)\n cql_q2_current_actions = forward_qf(train_params['qf2'], observations, cql_current_actions)\n cql_q1_next_actions = forward_qf(train_params['qf1'], observations, cql_next_actions)\n cql_q2_next_actions = forward_qf(train_params['qf2'], observations, cql_next_actions)\n\n cql_cat_q1 = jnp.concatenate(\n [cql_q1_rand, jnp.expand_dims(q1_pred, 1), cql_q1_next_actions, cql_q1_current_actions], axis=1\n )\n cql_cat_q2 = jnp.concatenate(\n [cql_q2_rand, jnp.expand_dims(q2_pred, 1), cql_q2_next_actions, cql_q2_current_actions], axis=1\n )\n cql_std_q1 = jnp.std(cql_cat_q1, axis=1)\n cql_std_q2 = jnp.std(cql_cat_q2, axis=1)\n\n if self.config.cql_importance_sample:\n random_density = np.log(0.5 ** self.action_dim)\n cql_cat_q1 = jnp.concatenate(\n [cql_q1_rand - random_density,\n cql_q1_next_actions - cql_next_log_pis,\n cql_q1_current_actions - cql_current_log_pis],\n axis=1\n )\n cql_cat_q2 = jnp.concatenate(\n [cql_q2_rand - random_density,\n cql_q2_next_actions - cql_next_log_pis,\n cql_q2_current_actions - cql_current_log_pis],\n axis=1\n )\n\n cql_qf1_ood = (\n jax.scipy.special.logsumexp(cql_cat_q1 / self.config.cql_temp, axis=1)\n * self.config.cql_temp\n )\n cql_qf2_ood = (\n jax.scipy.special.logsumexp(cql_cat_q2 / self.config.cql_temp, axis=1)\n * self.config.cql_temp\n )\n\n \"\"\"Subtract the log likelihood of data\"\"\"\n cql_qf1_diff = jnp.clip(\n cql_qf1_ood - q1_pred,\n self.config.cql_clip_diff_min,\n self.config.cql_clip_diff_max,\n ).mean()\n cql_qf2_diff = jnp.clip(\n cql_qf2_ood - q2_pred,\n self.config.cql_clip_diff_min,\n self.config.cql_clip_diff_max,\n ).mean()\n\n if self.config.cql_lagrange:\n alpha_prime = jnp.clip(\n jnp.exp(self.log_alpha_prime.apply(train_params['log_alpha_prime'])),\n a_min=0.0, a_max=1000000.0\n )\n cql_min_qf1_loss = alpha_prime * self.config.cql_min_q_weight * (cql_qf1_diff - self.config.cql_target_action_gap)\n cql_min_qf2_loss = alpha_prime * self.config.cql_min_q_weight * (cql_qf2_diff - self.config.cql_target_action_gap)\n\n alpha_prime_loss = (-cql_min_qf1_loss - cql_min_qf2_loss)*0.5\n\n loss_collection['log_alpha_prime'] = alpha_prime_loss\n\n else:\n cql_min_qf1_loss = cql_qf1_diff * self.config.cql_min_q_weight\n cql_min_qf2_loss = cql_qf2_diff * self.config.cql_min_q_weight\n alpha_prime_loss = 0.0\n alpha_prime = 0.0\n\n qf1_loss = qf1_loss + cql_min_qf1_loss\n qf2_loss = qf2_loss + cql_min_qf2_loss\n\n loss_collection['qf1'] = qf1_loss\n loss_collection['qf2'] = qf2_loss\n return tuple(loss_collection[key] for key in self.model_keys), locals()\n\n train_params = {key: train_states[key].params for key in self.model_keys}\n (_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params)\n\n new_train_states = {\n key: train_states[key].apply_gradients(grads=grads[i][key])\n for i, key in enumerate(self.model_keys)\n }\n new_target_qf_params = {}\n new_target_qf_params['qf1'] = update_target_network(\n new_train_states['qf1'].params, target_qf_params['qf1'],\n self.config.soft_target_update_rate\n )\n new_target_qf_params['qf2'] = update_target_network(\n new_train_states['qf2'].params, target_qf_params['qf2'],\n self.config.soft_target_update_rate\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['log_pi', 'policy_loss', 'qf1_loss', 'qf2_loss', 'alpha_loss',\n 'alpha', 'q1_pred', 'q2_pred', 'target_q_values']\n )\n\n if self.config.use_cql:\n metrics.update(collect_jax_metrics(\n aux_values,\n ['cql_std_q1', 'cql_std_q2', 'cql_q1_rand', 'cql_q2_rand'\n 'cql_qf1_diff', 'cql_qf2_diff', 'cql_min_qf1_loss',\n 'cql_min_qf2_loss', 'cql_q1_current_actions', 'cql_q2_current_actions'\n 'cql_q1_next_actions', 'cql_q2_next_actions', 'alpha_prime',\n 'alpha_prime_loss'],\n 'cql'\n ))\n\n return new_train_states, new_target_qf_params, metrics\n\n @property\n def model_keys(self):\n return self._model_keys\n\n @property\n def train_states(self):\n return self._train_states\n\n @property\n def train_params(self):\n return {key: self.train_states[key].params for key in self.model_keys}\n\n @property\n def total_steps(self):\n return self._total_steps" }, { "identifier": "get_d4rl_dataset", "path": "vqn/replay_buffer.py", "snippet": "def get_d4rl_dataset(env):\n dataset = d4rl.qlearning_dataset(env)\n return dict(\n observations=dataset['observations'],\n actions=dataset['actions'],\n next_observations=dataset['next_observations'],\n rewards=dataset['rewards'],\n dones=dataset['terminals'].astype(np.float32),\n )" }, { "identifier": "subsample_batch", "path": "vqn/replay_buffer.py", "snippet": "def subsample_batch(batch, size):\n indices = np.random.randint(batch['observations'].shape[0], size=size)\n return index_batch(batch, indices)" }, { "identifier": "batch_to_jax", "path": "vqn/jax_utils.py", "snippet": "@jax.jit\ndef batch_to_jax(batch):\n return jax.tree_util.tree_map(jax.device_put, batch)" }, { "identifier": "TanhGaussianPolicy", "path": "vqn/model.py", "snippet": "class TanhGaussianPolicy(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n log_std_multiplier: float = 1.0\n log_std_offset: float = -1.0\n use_tanh: bool = True\n\n def setup(self):\n self.base_network = FullyConnectedNetwork(\n output_dim=2 * self.action_dim, arch=self.arch, orthogonal_init=self.orthogonal_init\n )\n self.log_std_multiplier_module = Scalar(self.log_std_multiplier)\n self.log_std_offset_module = Scalar(self.log_std_offset)\n\n def log_prob(self, observations, actions):\n if actions.ndim == 3:\n observations = extend_and_repeat(observations, 1, actions.shape[1])\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n return action_distribution.log_prob(actions)\n\n def __call__(self, observations, deterministic=False, repeat=None):\n if repeat is not None:\n observations = extend_and_repeat(observations, 1, repeat)\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n if deterministic:\n samples = mean\n if self.use_tanh:\n samples = jnp.tanh(samples)\n log_prob = action_distribution.log_prob(samples)\n else:\n samples, log_prob = action_distribution.sample_and_log_prob(seed=self.make_rng('noise'))\n\n return samples, log_prob\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', 'noise')" }, { "identifier": "FullyConnectedQFunction", "path": "vqn/model.py", "snippet": "class FullyConnectedQFunction(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n\n @nn.compact\n @multiple_action_q_function\n def __call__(self, observations, actions):\n x = jnp.concatenate([observations, actions], axis=-1)\n x = FullyConnectedNetwork(output_dim=1, arch=self.arch, orthogonal_init=self.orthogonal_init)(x)\n return jnp.squeeze(x, -1)\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', )" }, { "identifier": "SamplerPolicy", "path": "vqn/model.py", "snippet": "class SamplerPolicy(object):\n\n def __init__(self, policy, params):\n self.policy = policy\n self.params = params\n\n def update_params(self, params):\n self.params = params\n return self\n\n @partial(jax.jit, static_argnames=('self', 'deterministic'))\n def act(self, params, rng, observations, deterministic):\n return self.policy.apply(\n params, observations, deterministic, repeat=None,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n def __call__(self, observations, deterministic=False):\n actions, _ = self.act(self.params, next_rng(), observations, deterministic=deterministic)\n assert jnp.all(jnp.isfinite(actions))\n return jax.device_get(actions)" }, { "identifier": "StepSampler", "path": "vqn/sampler.py", "snippet": "class StepSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n def sample(self, policy, n_steps, deterministic=False, replay_buffer=None):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n for _ in range(n_steps):\n self._traj_steps += 1\n observation = self._current_observation\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n self._current_observation = next_observation\n\n if done or self._traj_steps >= self.max_traj_length:\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n return dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n )\n\n @property\n def env(self):\n return self._env" }, { "identifier": "TrajSampler", "path": "vqn/sampler.py", "snippet": "class TrajSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n\n def sample(self, policy, n_trajs, replay_buffer=None, deterministic=False):\n trajs = []\n for _ in range(n_trajs):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n observation = self.env.reset()\n\n for _ in range(self.max_traj_length):\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n observation = next_observation\n\n if done:\n break\n\n trajs.append(dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n ))\n\n return trajs\n\n @property\n def env(self):\n return self._env" }, { "identifier": "SequenceDataset", "path": "vqn/robomimic_utils.py", "snippet": "OBS_KEYS = (\"robot0_eef_pos\", \"robot0_eef_quat\", \"robot0_gripper_qpos\", \"object\")\nENV_TO_HORIZON_MAP = {'lift': 400,\n 'can': 400,\n 'square': 400,\n 'transport': 700,\n 'tool_hang': 700}\ndef make_dataset(dataset, env_name):\ndef process_robomimic_dataset(seq_dataset):\ndef get_robomimic_env(dataset_path, example_action, env_name):\n def __init__(self, env, horizon, example_action):\n def step(self, action):\n def reset(self):\n def render(self):\n def get_normalized_score(self, rewards):\n def _process_obs(self, obs):\ndef _check_lengths(dataset_dict: DatasetDict,\n dataset_len: Optional[int] = None) -> int:\ndef _subselect(dataset_dict: DatasetDict, index: np.ndarray) -> DatasetDict:\ndef _sample(dataset_dict: Union[np.ndarray, DatasetDict],\n indx: np.ndarray) -> DatasetDict:\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n def np_random(self) -> np.random.RandomState:\n def seed(self, seed: Optional[int] = None) -> list:\n def __len__(self) -> int:\n def sample(self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None) -> frozen_dict.FrozenDict:\n def split(self, ratio: float) -> Tuple['Dataset', 'Dataset']:\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n def filter(self,\n percentile: Optional[float] = None,\n threshold: Optional[float] = None):\n def normalize_returns(self, scaling: float = 1000):\n def __init__(self,\n dataset_dict: dict,\n clip_to_eps: bool = True,\n eps: float = 1e-5):\n def __init__(self,\n env: gym.Env,\n clip_to_eps: bool = True,\n eps: float = 1e-5,\n ignore_done: bool = False,\n custom_dataset: dict = None):\nclass RobosuiteGymWrapper():\nclass Dataset(object):\nclass OfflineDataset(Dataset):\nclass D4RLDataset(Dataset):" }, { "identifier": "Timer", "path": "vqn/utils.py", "snippet": "class Timer(object):\n\n def __init__(self):\n self._time = None\n\n def __enter__(self):\n self._start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._time = time.time() - self._start_time\n\n def __call__(self):\n return self._time" }, { "identifier": "define_flags_with_default", "path": "vqn/utils.py", "snippet": "def define_flags_with_default(**kwargs):\n for key, val in kwargs.items():\n if isinstance(val, ConfigDict):\n config_flags.DEFINE_config_dict(key, val)\n elif isinstance(val, bool):\n # Note that True and False are instances of int.\n absl.flags.DEFINE_bool(key, val, 'automatically defined flag')\n elif isinstance(val, int):\n absl.flags.DEFINE_integer(key, val, 'automatically defined flag')\n elif isinstance(val, float):\n absl.flags.DEFINE_float(key, val, 'automatically defined flag')\n elif isinstance(val, str):\n absl.flags.DEFINE_string(key, val, 'automatically defined flag')\n else:\n raise ValueError('Incorrect value type')\n return kwargs" }, { "identifier": "set_random_seed", "path": "vqn/utils.py", "snippet": "def set_random_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n init_rng(seed)" }, { "identifier": "print_flags", "path": "vqn/utils.py", "snippet": "def print_flags(flags, flags_def):\n logging.info(\n 'Running training with hyperparameters: \\n{}'.format(\n pprint.pformat(\n ['{}: {}'.format(key, val) for key, val in get_user_flags(flags, flags_def).items()]\n )\n )\n )" }, { "identifier": "get_user_flags", "path": "vqn/utils.py", "snippet": "def get_user_flags(flags, flags_def):\n output = {}\n for key in flags_def:\n val = getattr(flags, key)\n if isinstance(val, ConfigDict):\n output.update(flatten_config_dict(val, prefix=key))\n else:\n output[key] = val\n\n return output" }, { "identifier": "prefix_metrics", "path": "vqn/utils.py", "snippet": "def prefix_metrics(metrics, prefix):\n return {\n '{}/{}'.format(prefix, key): value for key, value in metrics.items()\n }" }, { "identifier": "WandBLogger", "path": "vqn/utils.py", "snippet": "class WandBLogger(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.online = False\n config.prefix = 'JaxCQL'\n config.project = ''\n config.output_dir = '/tmp/JaxCQL'\n config.random_delay = 0.0\n config.experiment_id = config_dict.placeholder(str)\n config.anonymous = config_dict.placeholder(str)\n config.notes = config_dict.placeholder(str)\n config.entity = config_dict.placeholder(str)\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, variant):\n self.config = self.get_default_config(config)\n\n if self.config.experiment_id is None:\n self.config.experiment_id = uuid.uuid4().hex\n\n if self.config.prefix != '':\n self.config.project = '{}--{}'.format(self.config.prefix, self.config.project)\n\n if self.config.output_dir == '':\n self.config.output_dir = tempfile.mkdtemp()\n else:\n self.config.output_dir = os.path.join(self.config.output_dir, self.config.experiment_id)\n os.makedirs(self.config.output_dir, exist_ok=True)\n\n self._variant = copy(variant)\n\n if 'hostname' not in self._variant:\n self._variant['hostname'] = gethostname()\n\n if self.config.random_delay > 0:\n time.sleep(np.random.uniform(0, self.config.random_delay))\n\n self.run = wandb.init(\n reinit=True,\n config=self._variant,\n project=self.config.project,\n dir=self.config.output_dir,\n entity=config.entity,\n id=self.config.experiment_id,\n anonymous=self.config.anonymous,\n notes=self.config.notes,\n settings=wandb.Settings(\n start_method=\"thread\",\n _disable_stats=True,\n ),\n mode='online' if self.config.online else 'offline',\n )\n\n def log(self, *args, **kwargs):\n self.run.log(*args, **kwargs)\n\n def save_pickle(self, obj, filename):\n with open(os.path.join(self.config.output_dir, filename), 'wb') as fout:\n pickle.dump(obj, fout)\n\n @property\n def experiment_id(self):\n return self.config.experiment_id\n\n @property\n def variant(self):\n return self.config.variant\n\n @property\n def output_dir(self):\n return self.config.output_dir" } ]
import os import time import uuid import numpy as np import pprint import jax import jax.numpy as jnp import flax import gym import d4rl import absl.app import absl.flags from copy import deepcopy from .vqn import VQN from .conservative_sac import ConservativeSAC from .replay_buffer import get_d4rl_dataset, subsample_batch from .jax_utils import batch_to_jax from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy from .sampler import StepSampler, TrajSampler from .robomimic_utils import ( SequenceDataset, make_dataset, process_robomimic_dataset, D4RLDataset, get_robomimic_env, ENV_TO_HORIZON_MAP, OBS_KEYS ) from .utils import ( Timer, define_flags_with_default, set_random_seed, print_flags, get_user_flags, prefix_metrics, WandBLogger ) from viskit.logging import logger, setup_logger
10,811
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis]
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis]
eval_sampler = TrajSampler(env, max_len)
9
2023-10-18 06:31:20+00:00
16k
SLDGroup/G-CASCADE
lib/networks.py
[ { "identifier": "pvt_v2_b2", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b2(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b2, self).__init__(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],\n drop_rate=0.0, drop_path_rate=0.1)" }, { "identifier": "pvt_v2_b5", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b5(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b5, self).__init__(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1],\n drop_rate=0.0, drop_path_rate=0.1)" }, { "identifier": "pvt_v2_b0", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b0(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b0, self).__init__(\n patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],\n drop_rate=0.0, drop_path_rate=0.1)" }, { "identifier": "CUP", "path": "lib/decoders.py", "snippet": "class CUP(nn.Module):\n def __init__(self, channels=[512,320,128,64]):\n super(CUP,self).__init__()\n \n self.ConvBlock4 = conv_block(ch_in=channels[0], ch_out=channels[0])\n\n self.Up3 = up_conv(ch_in=channels[0],ch_out=channels[1])\n self.ConvBlock3 = conv_block(ch_in=2*channels[1], ch_out=channels[1])\n\n self.Up2 = up_conv(ch_in=channels[1],ch_out=channels[2])\n self.ConvBlock2 = conv_block(ch_in=2*channels[2], ch_out=channels[2])\n \n self.Up1 = up_conv(ch_in=channels[2],ch_out=channels[3])\n self.ConvBlock1 = conv_block(ch_in=2*channels[3], ch_out=channels[3])\n\n def forward(self,x, skips):\n\n d4 = self.ConvBlock4(x)\n \n # decoding + concat path\n d3 = self.Up3(d4)\n d3 = torch.cat((skips[0],d3),dim=1)\n \n d3 = self.ConvBlock3(d3)\n \n d2 = self.Up2(d3)\n d2 = torch.cat((skips[1],d2),dim=1)\n d2 = self.ConvBlock2(d2)\n\n d1 = self.Up1(d2)\n d1 = torch.cat((skips[2],d1),dim=1)\n d1 = self.ConvBlock1(d1)\n return d4, d3, d2, d1 " }, { "identifier": "CASCADE", "path": "lib/decoders.py", "snippet": "class CASCADE(nn.Module):\n def __init__(self, channels=[512,320,128,64]):\n super(CASCADE,self).__init__()\n \n self.Conv_1x1 = nn.Conv2d(channels[0],channels[0],kernel_size=1,stride=1,padding=0)\n self.ConvBlock4 = conv_block(ch_in=channels[0], ch_out=channels[0])\n\t\n self.Up3 = up_conv(ch_in=channels[0],ch_out=channels[1])\n self.AG3 = Attention_block(F_g=channels[1],F_l=channels[1],F_int=channels[2])\n self.ConvBlock3 = conv_block(ch_in=channels[1], ch_out=channels[1])\n\n self.Up2 = up_conv(ch_in=channels[1],ch_out=channels[2])\n self.AG2 = Attention_block(F_g=channels[2],F_l=channels[2],F_int=channels[3])\n self.ConvBlock2 = conv_block(ch_in=channels[2], ch_out=channels[2])\n \n self.Up1 = up_conv(ch_in=channels[2],ch_out=channels[3])\n self.AG1 = Attention_block(F_g=channels[3],F_l=channels[3],F_int=int(channels[3]/2))\n self.ConvBlock1 = conv_block(ch_in=channels[3], ch_out=channels[3])\n \n self.CA4 = ChannelAttention(channels[0])\n self.CA3 = ChannelAttention(channels[1])\n self.CA2 = ChannelAttention(channels[2])\n self.CA1 = ChannelAttention(channels[3])\n \n self.SA = SPA()\n \n def forward(self,x, skips):\n \n d4 = self.Conv_1x1(x)\n \n # CAM4\n d4 = self.CA4(d4)*d4\n d4 = self.SA(d4)*d4 \n d4 = self.ConvBlock4(d4)\n \n # upconv3\n d3 = self.Up3(d4)\n \n # AG3\n x3 = self.AG3(g=d3,x=skips[0])\n \n # Concat 3\n d3 = d3 + x3\n \n # CAM3\n d3 = self.CA3(d3)*d3\n d3 = self.SA(d3)*d3 \n d3 = self.ConvBlock3(d3)\n \n # upconv2\n d2 = self.Up2(d3)\n \n # AG2\n x2 = self.AG2(g=d2,x=skips[1])\n \n # Concat 2\n d2 = d2 + x2\n \n # CAM2\n d2 = self.CA2(d2)*d2\n d2 = self.SA(d2)*d2\n #print(d2.shape)\n d2 = self.ConvBlock2(d2)\n \n # upconv1\n d1 = self.Up1(d2)\n \n #print(skips[2])\n # AG1\n x1 = self.AG1(g=d1,x=skips[2])\n \n # Concat 1\n d1 = d1 + x1\n \n # CAM1\n d1 = self.CA1(d1)*d1\n d1 = self.SA(d1)*d1\n d1 = self.ConvBlock1(d1)\n return d4, d3, d2, d1" }, { "identifier": "CASCADE_Cat", "path": "lib/decoders.py", "snippet": "class CASCADE_Cat(nn.Module):\n def __init__(self, channels=[512,320,128,64]):\n super(CASCADE_Cat,self).__init__()\n \n self.Conv_1x1 = nn.Conv2d(channels[0],channels[0],kernel_size=1,stride=1,padding=0)\n self.ConvBlock4 = conv_block(ch_in=channels[0], ch_out=channels[0])\n\t\n self.Up3 = up_conv(ch_in=channels[0],ch_out=channels[1])\n self.AG3 = Attention_block(F_g=channels[1],F_l=channels[1],F_int=channels[2])\n self.ConvBlock3 = conv_block(ch_in=2*channels[1], ch_out=channels[1])\n\n self.Up2 = up_conv(ch_in=channels[1],ch_out=channels[2])\n self.AG2 = Attention_block(F_g=channels[2],F_l=channels[2],F_int=channels[3])\n self.ConvBlock2 = conv_block(ch_in=2*channels[2], ch_out=channels[2])\n \n self.Up1 = up_conv(ch_in=channels[2],ch_out=channels[3])\n self.AG1 = Attention_block(F_g=channels[3],F_l=channels[3],F_int=int(channels[3]/2))\n self.ConvBlock1 = conv_block(ch_in=2*channels[3], ch_out=channels[3])\n \n self.CA4 = ChannelAttention(channels[0])\n self.CA3 = ChannelAttention(2*channels[1])\n self.CA2 = ChannelAttention(2*channels[2])\n self.CA1 = ChannelAttention(2*channels[3])\n \n self.SA = SPA()\n \n def forward(self,x, skips):\n \n d4 = self.Conv_1x1(x)\n \n # CAM4\n d4 = self.CA4(d4)*d4\n d4 = self.SA(d4)*d4 \n d4 = self.ConvBlock4(d4)\n \n # upconv3\n d3 = self.Up3(d4)\n \n # AG3\n x3 = self.AG3(g=d3,x=skips[0])\n \n # Concat 3\n d3 = torch.cat((x3,d3),dim=1)\n \n # CAM3\n d3 = self.CA3(d3)*d3\n d3 = self.SA(d3)*d3 \n d3 = self.ConvBlock3(d3)\n \n # upconv2\n d2 = self.Up2(d3)\n \n # AG2\n x2 = self.AG2(g=d2,x=skips[1])\n \n # Concat 2\n d2 = torch.cat((x2,d2),dim=1)\n \n # CAM2\n d2 = self.CA2(d2)*d2\n d2 = self.SA(d2)*d2\n #print(d2.shape)\n d2 = self.ConvBlock2(d2)\n \n # upconv1\n d1 = self.Up1(d2)\n \n #print(skips[2])\n # AG1\n x1 = self.AG1(g=d1,x=skips[2])\n \n # Concat 1\n d1 = torch.cat((x1,d1),dim=1)\n \n # CAM1\n d1 = self.CA1(d1)*d1\n d1 = self.SA(d1)*d1\n d1 = self.ConvBlock1(d1)\n return d4, d3, d2, d1 " }, { "identifier": "GCUP", "path": "lib/decoders.py", "snippet": "class GCUP(nn.Module):\n def __init__(self, channels=[512,320,128,64], img_size=224, drop_path_rate=0.0, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCUP,self).__init__()\n \n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4, 2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0],ch_out=channels[1], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(channels[1], self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=channels[1],ch_out=channels[2], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(channels[2], self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=channels[2],ch_out=channels[3], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(channels[3], self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n )\n \n def forward(self,x, skips):\n \n # GCAM4\n d4 = self.gcb4(x) \n \n # UCB3\n d3 = self.ucb3(d4)\n \n # Aggregation 3\n d3 = d3 + skips[0]\n \n # GCAM3\n d3 = self.gcb3(d3) \n \n # UCB2\n d2 = self.ucb2(d3) \n \n # Aggregation 2\n d2 = d2 + skips[1] \n \n # GCAM2\n d2 = self.gcb2(d2)\n \n # UCB1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = d1 + skips[2]\n \n # GCAM1\n d1 = self.gcb1(d1)\n \n return d4, d3, d2, d1" }, { "identifier": "GCUP_Cat", "path": "lib/decoders.py", "snippet": "class GCUP_Cat(nn.Module):\n def __init__(self, channels=[512,320,128,64], img_size=224, drop_path_rate=0.0, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCUP_Cat,self).__init__()\n \n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4, 2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0],ch_out=channels[1], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(2*channels[1], self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=2*channels[1],ch_out=channels[2], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(2*channels[2], self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=2*channels[2],ch_out=channels[3], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(2*channels[3], self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n ) \n \n def forward(self,x, skips):\n \n # GCAM4\n d4 = self.gcb4(x) \n \n # UCB3\n d3 = self.ucb3(d4)\n\n # Aggregation 3\n d3 = torch.cat((skips[0],d3),dim=1)\n \n # GCAM3\n d3 = self.gcb3(d3)\n\n # UCB2\n d2 = self.ucb2(d3)\n\n # Aggregation 2\n d2 = torch.cat((skips[1],d2),dim=1)\n \n # GCAM2\n d2 = self.gcb2(d2)\n \n # UCB1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = torch.cat((skips[2],d1),dim=1)\n \n # GCAM1\n d1 = self.gcb1(d1)\n\n return d4, d3, d2, d1" }, { "identifier": "GCASCADE", "path": "lib/decoders.py", "snippet": "class GCASCADE(nn.Module):\n def __init__(self, channels=[512,320,128,64], drop_path_rate=0.0, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCASCADE,self).__init__()\n\n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4,2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0], ch_out=channels[1], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(channels[1], self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=channels[1], ch_out=channels[2], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(channels[2], self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=channels[2], ch_out=channels[3], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(channels[3], self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n )\n\n self.spa = SPA()\n\n \n def forward(self,x, skips):\n \n # GCAM4\n d4 = self.gcb4(x) \n d4 = self.spa(d4)*d4 \n \n # UCB3\n d3 = self.ucb3(d4)\n \n # Aggregation 3\n d3 = d3 + skips[0] #torch.cat((skips[0],d3),dim=1)\n \n # GCAM3\n d3 = self.gcb3(d3)\n d3 = self.spa(d3)*d3 \n \n # UCB2\n d2 = self.ucb2(d3)\n \n # Aggregation 2\n d2 = d2 + skips[1] #torch.cat((skips[1],d2),dim=1)\n \n # GCAM2\n d2 = self.gcb2(d2)\n d2 = self.spa(d2)*d2\n \n \n # UCB1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = d1 + skips[2] #torch.cat((skips[2],d1),dim=1)\n \n # GCAM1\n d1 = self.gcb1(d1)\n d1 = self.spa(d1)*d1\n \n return d4, d3, d2, d1" }, { "identifier": "GCASCADE_Cat", "path": "lib/decoders.py", "snippet": "class GCASCADE_Cat(nn.Module):\n def __init__(self, channels=[512,320,128,64], drop_path_rate=0.0, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCASCADE_Cat,self).__init__()\n\n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4,2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0], ch_out=channels[1], kernel_size=self.ucb_ks, stride = self.ucb_stride, padding = self.ucb_pad, groups = channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(channels[1]*2, self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=channels[1]*2, ch_out=channels[2], kernel_size=self.ucb_ks, stride = self.ucb_stride, padding = self.ucb_pad, groups = channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(channels[2]*2, self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=channels[2]*2, ch_out=channels[3], kernel_size=self.ucb_ks, stride = self.ucb_stride, padding = self.ucb_pad, groups = channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(channels[3]*2, self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n ) \n \n self.spa = SPA()\n\n \n def forward(self,x, skips): \n \n # GCAM4\n d4 = self.gcb4(x) \n d4 = self.spa(d4)*d4 \n \n # UCB3\n d3 = self.ucb3(d4)\n \n # Aggregation 3\n d3 = torch.cat((skips[0],d3),dim=1)\n \n # GCAM3\n d3 = self.gcb3(d3)\n d3 = self.spa(d3)*d3 \n \n # ucb2\n d2 = self.ucb2(d3)\n \n # Aggregation 2\n d2 = torch.cat((skips[1],d2),dim=1)\n \n # GCAM2\n d2 = self.gcb2(d2)\n d2 = self.spa(d2)*d2\n \n \n # ucb1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = torch.cat((skips[2],d1),dim=1)\n \n # GCAM1\n d1 = self.gcb1(d1)\n d1 = self.spa(d1)*d1\n \n return d4, d3, d2, d1" }, { "identifier": "pvig_ti_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_ti_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,6,2] # number of basic blocks in the backbone\n self.channels = [48, 96, 240, 384] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_224_gelu']\n return model" }, { "identifier": "pvig_s_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_s_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,6,2] # number of basic blocks in the backbone\n self.channels = [80, 160, 400, 640] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_224_gelu']\n return model" }, { "identifier": "pvig_m_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_m_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,16,2] # number of basic blocks in the backbone\n self.channels = [96, 192, 384, 768] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_224_gelu']\n return model" }, { "identifier": "pvig_b_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_b_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,18,2] # number of basic blocks in the backbone\n self.channels = [128, 256, 512, 1024] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_b_224_gelu']\n return model" }, { "identifier": "maxvit_tiny_rw_224", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxvit_tiny_rw_224(pretrained=False, **kwargs):\n return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs)" }, { "identifier": "maxvit_rmlp_tiny_rw_256", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs):\n return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs)" }, { "identifier": "maxxvit_rmlp_small_rw_256", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs):\n return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs)" }, { "identifier": "maxvit_rmlp_small_rw_224", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxvit_rmlp_small_rw_224(pretrained=False, **kwargs):\n return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs)" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import timm import logging from scipy import ndimage from lib.pvtv2 import pvt_v2_b2, pvt_v2_b5, pvt_v2_b0 from lib.decoders import CUP, CASCADE, CASCADE_Cat, GCUP, GCUP_Cat, GCASCADE, GCASCADE_Cat from lib.pyramid_vig import pvig_ti_224_gelu, pvig_s_224_gelu, pvig_m_224_gelu, pvig_b_224_gelu from lib.maxxvit_4out import maxvit_tiny_rw_224 as maxvit_tiny_rw_224_4out from lib.maxxvit_4out import maxvit_rmlp_tiny_rw_256 as maxvit_rmlp_tiny_rw_256_4out from lib.maxxvit_4out import maxxvit_rmlp_small_rw_256 as maxxvit_rmlp_small_rw_256_4out from lib.maxxvit_4out import maxvit_rmlp_small_rw_224 as maxvit_rmlp_small_rw_224_4out
12,026
self.decoder = CASCADE(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE_Cat(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE_Cat, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) print('Model %s created, param count: %d' % ('PVT backbone: ', sum([m.numel() for m in self.backbone.parameters()]))) # decoder initialization self.decoder = CASCADE_Cat(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE_Cat decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCUP(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCUP, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive':
logger = logging.getLogger(__name__) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class PVT_CUP(nn.Module): def __init__(self, n_class=1): super(PVT_CUP, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CUP(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CUP decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CASCADE(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE_Cat(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE_Cat, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) print('Model %s created, param count: %d' % ('PVT backbone: ', sum([m.numel() for m in self.backbone.parameters()]))) # decoder initialization self.decoder = CASCADE_Cat(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE_Cat decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCUP(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCUP, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive':
self.decoder = GCUP(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation)
6
2023-10-24 17:49:10+00:00
16k
boppreh/hello_tls
src/hello_tls/scan.py
[ { "identifier": "ClientHello", "path": "src/hello_tls/protocol.py", "snippet": "class ScanError(Exception):\nclass ServerAlertError(ScanError):\nclass BadServerResponse(ScanError):\nclass ServerHello:\nclass ClientHello:\n def __init__(self, level: AlertLevel, description: AlertDescription):\ndef _make_stream_parser(packets: Iterable[bytes]) -> Tuple[Callable[[int], bytes], Callable[[], int]]:\n def read_next(length: int) -> bytes:\ndef _bytes_to_int(b: bytes) -> int:\ndef parse_server_hello(packets: Iterable[bytes]) -> ServerHello:\ndef make_client_hello(client_hello: ClientHello) -> bytes:\n def prefix_length(block_name: str, width_bytes: int = 2) -> Iterator[None]:" }, { "identifier": "AlertDescription", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class AlertDescription(Enum):\n \"\"\" Different alert messages that can be sent by the server. \"\"\"\n close_notify = b'\\x00'\n unexpected_message = b'\\x0a'\n bad_record_mac = b'\\x14'\n record_overflow = b'\\x16'\n handshake_failure = b'\\x28'\n bad_certificate = b'\\x2a'\n unsupported_certificate = b'\\x2b'\n certificate_revoked = b'\\x2c'\n certificate_expired = b'\\x2d'\n certificate_unknown = b'\\x2e'\n illegal_parameter = b'\\x2f'\n unknown_ca = b'\\x30'\n access_denied = b'\\x31'\n decode_error = b'\\x32'\n decrypt_error = b'\\x33'\n protocol_version = b'\\x46'\n insufficient_security = b'\\x47'\n internal_error = b'\\x50'\n inappropriate_fallback = b'\\x56'\n user_canceled = b'\\x5a'\n missing_extension = b'\\x6d'\n unsupported_extension = b'\\x6e'\n unrecognized_name = b'\\x70'\n bad_certificate_status_response = b'\\x71'\n unknown_psk_identity = b'\\x73'\n certificate_required = b'\\x74'\n no_application_protocol = b'\\x78'" }, { "identifier": "CipherSuite", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CipherSuite(Enum):\n def __repr__(self):\n return self.name\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each cipher suite with the protocols it's supported at.\n # Default to all but TLS 1.3, because that's the most common.\n def __init__(self, _: bytes, protocols: Sequence[Protocol] = (Protocol.SSLv3, Protocol.TLS1_0, Protocol.TLS1_1, Protocol.TLS1_2)):\n self.protocols = protocols\n\n # Pseudo cipher suite, not actually picked.\n #TLS_EMPTY_RENEGOTIATION_INFO_SCSV = b\"\\x00\\xff\"\n\n # TLS 1.3 cipher suites.\n TLS_AES_128_GCM_SHA256 = b\"\\x13\\x01\", (Protocol.TLS1_3,)\n TLS_AES_256_GCM_SHA384 = b\"\\x13\\x02\", (Protocol.TLS1_3,)\n TLS_CHACHA20_POLY1305_SHA256 = b\"\\x13\\x03\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_SHA256 = b\"\\x13\\x04\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_8_SHA256 = b\"\\x13\\x05\", (Protocol.TLS1_3,)\n\n # Cipher suite that had its number reassigned.\n OLD_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xcc\\x13'\n \n # Cipher suites adapted from IANA assignments:\n # https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4\n TLS_AEGIS_128L_SHA256 = b'\\x13\\x07' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_AEGIS_256_SHA384 = b'\\x13\\x06' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x19' # [RFC4346]\n TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x17' # [RFC4346][RFC6347]\n TLS_DH_anon_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1B' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA = b'\\x00\\x34' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA256 = b'\\x00\\x6C' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA6' # [RFC5288]\n TLS_DH_anon_WITH_AES_256_CBC_SHA = b'\\x00\\x3A' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6D' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA7' # [RFC5288]\n TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x46' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5A' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x47' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5B' # [RFC6209]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x46' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBF' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x84' # [RFC6367]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x89' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC5' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x85' # [RFC6367]\n TLS_DH_anon_WITH_DES_CBC_SHA = b'\\x00\\x1A' # [RFC8996]\n TLS_DH_anon_WITH_RC4_128_MD5 = b'\\x00\\x18' # [RFC5246][RFC6347]\n TLS_DH_anon_WITH_SEED_CBC_SHA = b'\\x00\\x9B' # [RFC4162]\n TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0B' # [RFC4346]\n TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0D' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x30' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3E' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA4' # [RFC5288]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x36' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x68' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA5' # [RFC5288]\n TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3E' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x58' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3F' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x59' # [RFC6209]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x42' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBB' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x82' # [RFC6367]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x85' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC1' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x83' # [RFC6367]\n TLS_DH_DSS_WITH_DES_CBC_SHA = b'\\x00\\x0C' # [RFC8996]\n TLS_DH_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x97' # [RFC4162]\n TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0E' # [RFC4346]\n TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x10' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x31' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3F' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA0' # [RFC5288]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x37' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x69' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA1' # [RFC5288]\n TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x40' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x54' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x41' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x55' # [RFC6209]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x43' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBC' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7E' # [RFC6367]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x86' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC2' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7F' # [RFC6367]\n TLS_DH_RSA_WITH_DES_CBC_SHA = b'\\x00\\x0F' # [RFC8996]\n TLS_DH_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x98' # [RFC4162]\n TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x11' # [RFC4346]\n TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x13' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x32' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x40' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA2' # [RFC5288]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x38' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6A' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA3' # [RFC5288]\n TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x42' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x56' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x43' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x57' # [RFC6209]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x44' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBD' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x80' # [RFC6367]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x87' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC3' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x81' # [RFC6367]\n TLS_DHE_DSS_WITH_DES_CBC_SHA = b'\\x00\\x12' # [RFC8996]\n TLS_DHE_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x99' # [RFC4162]\n TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8F' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x90' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB2' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_128_CCM = b'\\xC0\\xA6' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAA' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x91' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB3' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CCM = b'\\xC0\\xA7' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAB' # [RFC5487]\n TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x66' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6C' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x67' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6D' # [RFC6209]\n TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x96' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x90' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x97' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x91' # [RFC6367]\n TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAD' # [RFC7905]\n TLS_DHE_PSK_WITH_NULL_SHA = b'\\x00\\x2D' # [RFC4785]\n TLS_DHE_PSK_WITH_NULL_SHA256 = b'\\x00\\xB4' # [RFC5487]\n TLS_DHE_PSK_WITH_NULL_SHA384 = b'\\x00\\xB5' # [RFC5487]\n TLS_DHE_PSK_WITH_RC4_128_SHA = b'\\x00\\x8E' # [RFC4279][RFC6347]\n TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x14' # [RFC4346]\n TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x16' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x33' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x67' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CCM = b'\\xC0\\x9E' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA2' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9E' # [RFC5288]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x39' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6B' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CCM = b'\\xC0\\x9F' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA3' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9F' # [RFC5288]\n TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x44' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x52' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x45' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x53' # [RFC6209]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x45' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBE' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7C' # [RFC6367]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x88' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC4' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7D' # [RFC6367]\n TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAA' # [RFC7905]\n TLS_DHE_RSA_WITH_DES_CBC_SHA = b'\\x00\\x15' # [RFC8996]\n TLS_DHE_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x9A' # [RFC4162]\n TLS_ECCPWD_WITH_AES_128_CCM_SHA256 = b'\\xC0\\xB2' # [RFC8492]\n TLS_ECCPWD_WITH_AES_128_GCM_SHA256 = b'\\xC0\\xB0' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_CCM_SHA384 = b'\\xC0\\xB3' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_GCM_SHA384 = b'\\xC0\\xB1' # [RFC8492]\n TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x17' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_128_CBC_SHA = b'\\xC0\\x18' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_256_CBC_SHA = b'\\xC0\\x19' # [RFC8422]\n TLS_ECDH_anon_WITH_NULL_SHA = b'\\xC0\\x15' # [RFC8422]\n TLS_ECDH_anon_WITH_RC4_128_SHA = b'\\xC0\\x16' # [RFC8422][RFC6347]\n TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x03' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x04' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x25' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2D' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x05' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x26' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2E' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4A' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5E' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4B' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5F' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x74' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x88' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x75' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x89' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_NULL_SHA = b'\\xC0\\x01' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x02' # [RFC8422][RFC6347]\n TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x0D' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x0E' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x29' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x31' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0F' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x2A' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x32' # [RFC5289]\n TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4E' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x62' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4F' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x63' # [RFC6209]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x78' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8C' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x79' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8D' # [RFC6367]\n TLS_ECDH_RSA_WITH_NULL_SHA = b'\\xC0\\x0B' # [RFC8422]\n TLS_ECDH_RSA_WITH_RC4_128_SHA = b'\\xC0\\x0C' # [RFC8422][RFC6347]\n TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x08' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x09' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x23' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM = b'\\xC0\\xAC' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = b'\\xC0\\xAE' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2B' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0A' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x24' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM = b'\\xC0\\xAD' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = b'\\xC0\\xAF' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2C' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x48' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5C' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x49' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5D' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x72' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x86' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x73' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x87' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA9' # [RFC7905]\n TLS_ECDHE_ECDSA_WITH_NULL_SHA = b'\\xC0\\x06' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x07' # [RFC8422][RFC6347]\n TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x34' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = b'\\xC0\\x35' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x37' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256 = b'\\xD0\\x03' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256 = b'\\xD0\\x05' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\xD0\\x01' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = b'\\xC0\\x36' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x38' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\xD0\\x02' # [RFC8442]\n TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x70' # [RFC6209]\n TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x71' # [RFC6209]\n TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x9A' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x9B' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAC' # [RFC7905]\n TLS_ECDHE_PSK_WITH_NULL_SHA = b'\\xC0\\x39' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA256 = b'\\xC0\\x3A' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA384 = b'\\xC0\\x3B' # [RFC5489]\n TLS_ECDHE_PSK_WITH_RC4_128_SHA = b'\\xC0\\x33' # [RFC5489][RFC6347]\n TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x12' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x13' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x27' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2F' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x14' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x28' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x30' # [RFC5289]\n TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4C' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x60' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4D' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x61' # [RFC6209]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x76' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8A' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x77' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8B' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA8' # [RFC7905]\n TLS_ECDHE_RSA_WITH_NULL_SHA = b'\\xC0\\x10' # [RFC8422]\n TLS_ECDHE_RSA_WITH_RC4_128_SHA = b'\\xC0\\x11' # [RFC8422][RFC6347]\n TLS_GOSTR341112_256_WITH_28147_CNT_IMIT = b'\\xC1\\x02' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_CTR_OMAC = b'\\xC1\\x00' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_L = b'\\xC1\\x03' # [RFC9367]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_S = b'\\xC1\\x05' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_CTR_OMAC = b'\\xC1\\x01' # [RFC9189]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_L = b'\\xC1\\x04' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_S = b'\\xC1\\x06' # [RFC9367]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = b'\\x00\\x29' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = b'\\x00\\x26' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x2A' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = b'\\x00\\x27' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x2B' # [RFC2712][RFC6347]\n TLS_KRB5_EXPORT_WITH_RC4_40_SHA = b'\\x00\\x28' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = b'\\x00\\x23' # [RFC2712]\n TLS_KRB5_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1F' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_MD5 = b'\\x00\\x22' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_SHA = b'\\x00\\x1E' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_MD5 = b'\\x00\\x25' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_SHA = b'\\x00\\x21' # [RFC2712]\n TLS_KRB5_WITH_RC4_128_MD5 = b'\\x00\\x24' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_RC4_128_SHA = b'\\x00\\x20' # [RFC2712][RFC6347]\n TLS_NULL_WITH_NULL_NULL = b'\\x00\\x00' # [RFC5246]\n TLS_PSK_DHE_WITH_AES_128_CCM_8 = b'\\xC0\\xAA' # [RFC6655]\n TLS_PSK_DHE_WITH_AES_256_CCM_8 = b'\\xC0\\xAB' # [RFC6655]\n TLS_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8B' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x8C' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xAE' # [RFC5487]\n TLS_PSK_WITH_AES_128_CCM = b'\\xC0\\xA4' # [RFC6655]\n TLS_PSK_WITH_AES_128_CCM_8 = b'\\xC0\\xA8' # [RFC6655]\n TLS_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA8' # [RFC5487]\n TLS_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x8D' # [RFC4279]\n TLS_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xAF' # [RFC5487]\n TLS_PSK_WITH_AES_256_CCM = b'\\xC0\\xA5' # [RFC6655]\n TLS_PSK_WITH_AES_256_CCM_8 = b'\\xC0\\xA9' # [RFC6655]\n TLS_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA9' # [RFC5487]\n TLS_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x64' # [RFC6209]\n TLS_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6A' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x65' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6B' # [RFC6209]\n TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x94' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8E' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x95' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8F' # [RFC6367]\n TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAB' # [RFC7905]\n TLS_PSK_WITH_NULL_SHA = b'\\x00\\x2C' # [RFC4785]\n TLS_PSK_WITH_NULL_SHA256 = b'\\x00\\xB0' # [RFC5487]\n TLS_PSK_WITH_NULL_SHA384 = b'\\x00\\xB1' # [RFC5487]\n TLS_PSK_WITH_RC4_128_SHA = b'\\x00\\x8A' # [RFC4279][RFC6347]\n TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x08' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x06' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x03' # [RFC4346][RFC6347]\n TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x93' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x94' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB6' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAC' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x95' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB7' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAD' # [RFC5487]\n TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x68' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6E' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x69' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6F' # [RFC6209]\n TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x98' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x92' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x99' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x93' # [RFC6367]\n TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAE' # [RFC7905]\n TLS_RSA_PSK_WITH_NULL_SHA = b'\\x00\\x2E' # [RFC4785]\n TLS_RSA_PSK_WITH_NULL_SHA256 = b'\\x00\\xB8' # [RFC5487]\n TLS_RSA_PSK_WITH_NULL_SHA384 = b'\\x00\\xB9' # [RFC5487]\n TLS_RSA_PSK_WITH_RC4_128_SHA = b'\\x00\\x92' # [RFC4279][RFC6347]\n TLS_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0A' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x2F' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3C' # [RFC5246]\n TLS_RSA_WITH_AES_128_CCM = b'\\xC0\\x9C' # [RFC6655]\n TLS_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA0' # [RFC6655]\n TLS_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9C' # [RFC5288]\n TLS_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x35' # [RFC5246]\n TLS_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x3D' # [RFC5246]\n TLS_RSA_WITH_AES_256_CCM = b'\\xC0\\x9D' # [RFC6655]\n TLS_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA1' # [RFC6655]\n TLS_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9D' # [RFC5288]\n TLS_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3C' # [RFC6209]\n TLS_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x50' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3D' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x51' # [RFC6209]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x41' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBA' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7A' # [RFC6367]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x84' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC0' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7B' # [RFC6367]\n TLS_RSA_WITH_DES_CBC_SHA = b'\\x00\\x09' # [RFC8996]\n TLS_RSA_WITH_IDEA_CBC_SHA = b'\\x00\\x07' # [RFC8996]\n TLS_RSA_WITH_NULL_MD5 = b'\\x00\\x01' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA = b'\\x00\\x02' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA256 = b'\\x00\\x3B' # [RFC5246]\n TLS_RSA_WITH_RC4_128_MD5 = b'\\x00\\x04' # [RFC5246][RFC6347]\n TLS_RSA_WITH_RC4_128_SHA = b'\\x00\\x05' # [RFC5246][RFC6347]\n TLS_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x96' # [RFC4162]\n TLS_SHA256_SHA256 = b'\\xC0\\xB4' # [RFC9150]\n TLS_SHA384_SHA384 = b'\\xC0\\xB5' # [RFC9150]\n TLS_SM4_CCM_SM3 = b'\\x00\\xC7' # [RFC8998]\n TLS_SM4_GCM_SM3 = b'\\x00\\xC6' # [RFC8998]\n TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1C' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = b'\\xC0\\x1F' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = b'\\xC0\\x22' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1B' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1E' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x21' # [RFC5054]\n TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1A' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1D' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_256_CBC_SHA = b'\\xC0\\x20' # [RFC5054]" }, { "identifier": "Group", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Group(Enum):\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each group with whether it's a PQ group.\n def __init__(self, _: bytes, is_pq: bool = False):\n self.is_pq = is_pq\n def __repr__(self):\n return self.name\n \n sect163k1 = b'\\x00\\x01'\n sect163r1 = b'\\x00\\x02'\n sect163r2 = b'\\x00\\x03'\n sect193r1 = b'\\x00\\x04'\n sect193r2 = b'\\x00\\x05'\n sect233k1 = b'\\x00\\x06'\n sect233r1 = b'\\x00\\x07'\n sect239k1 = b'\\x00\\x08'\n sect283k1 = b'\\x00\\x09'\n sect283r1 = b'\\x00\\x0a'\n sect409k1 = b'\\x00\\x0b'\n sect409r1 = b'\\x00\\x0c'\n sect571k1 = b'\\x00\\x0d'\n sect571r1 = b'\\x00\\x0e'\n secp160k1 = b'\\x00\\x0f'\n secp160r1 = b'\\x00\\x10'\n secp160r2 = b'\\x00\\x11'\n secp192k1 = b'\\x00\\x12'\n secp192r1 = b'\\x00\\x13'\n secp224k1 = b'\\x00\\x14'\n secp224r1 = b'\\x00\\x15'\n secp256k1 = b'\\x00\\x16'\n secp256r1 = b'\\x00\\x17'\n secp384r1 = b'\\x00\\x18'\n secp521r1 = b'\\x00\\x19'\n brainpoolP256r1 = b'\\x00\\x1a'\n brainpoolP384r1 = b'\\x00\\x1b'\n brainpoolP512r1 = b'\\x00\\x1c'\n x25519 = b'\\x00\\x1d'\n x448 = b'\\x00\\x1e'\n brainpoolP256r1tls13 = b'\\x00\\x1f'\n brainpoolP384r1tls13 = b'\\x00\\x20'\n brainpoolP512r1tls13 = b'\\x00\\x21'\n GC256A = b'\\x00\\x22'\n GC256B = b'\\x00\\x23'\n GC256C = b'\\x00\\x24'\n GC256D = b'\\x00\\x25'\n GC512A = b'\\x00\\x26'\n GC512B = b'\\x00\\x27'\n GC512C = b'\\x00\\x28'\n curveSM2 = b'\\x00\\x29'\n ffdhe2048 = b'\\x01\\x00'\n ffdhe3072 = b'\\x01\\x01'\n ffdhe4096 = b'\\x01\\x02'\n ffdhe6144 = b'\\x01\\x03'\n ffdhe8192 = b'\\x01\\x04'\n arbitrary_explicit_prime_curves = b'\\xff\\x01'\n arbitrary_explicit_char2_curves = b'\\xff\\x02'\n\n # Somewhat common post-quantum groups, not yet standardized:\n X25519Kyber768Draft00 = b'\\x63\\x99', True\n X25519Kyber768Draft00_obsolete = b'\\xfe\\x31', True\n X25519Kyber512Draft00 = b'\\xfe\\x30', True\n SecP256r1Kyber768Draft00 = b'\\x63\\x9a', True\n\n # Long list of unusual post-quantum groups from liboqs:\n # https://github.com/open-quantum-safe/oqs-provider/blob/main/ALGORITHMS.md?plain=1#L13\n frodo640aes = b'\\x02\\x00', True\n p256_frodo640aes = b'\\x2F\\x00', True\n x25519_frodo640aes = b'\\x2F\\x80', True\n frodo640shake = b'\\x02\\x01', True\n p256_frodo640shake = b'\\x2F\\x01', True\n x25519_frodo640shake = b'\\x2F\\x81', True\n frodo976aes = b'\\x02\\x02', True\n p384_frodo976aes = b'\\x2F\\x02', True\n x448_frodo976aes = b'\\x2F\\x82', True\n frodo976shake = b'\\x02\\x03', True\n p384_frodo976shake = b'\\x2F\\x03', True\n x448_frodo976shake = b'\\x2F\\x83', True\n frodo1344aes = b'\\x02\\x04', True\n p521_frodo1344aes = b'\\x2F\\x04', True\n frodo1344shake = b'\\x02\\x05', True\n p521_frodo1344shake = b'\\x2F\\x05', True\n kyber512 = b'\\x02\\x3A', True\n p256_kyber512 = b'\\x2F\\x3A', True\n x25519_kyber512 = b'\\x2F\\x39', True\n kyber768 = b'\\x02\\x3C', True\n p384_kyber768 = b'\\x2F\\x3C', True\n x448_kyber768 = b'\\x2F\\x90', True\n kyber1024 = b'\\x02\\x3D', True\n p521_kyber1024 = b'\\x2F\\x3D', True\n bikel1 = b'\\x02\\x41', True\n p256_bikel1 = b'\\x2F\\x41', True\n x25519_bikel1 = b'\\x2F\\xAE', True\n bikel3 = b'\\x02\\x42', True\n p384_bikel3 = b'\\x2F\\x42', True\n x448_bikel3 = b'\\x2F\\xAF', True\n bikel5 = b'\\x02\\x43', True\n p521_bikel5 = b'\\x2F\\x43', True\n hqc128 = b'\\x02\\x2C', True\n p256_hqc128 = b'\\x2F\\x2C', True\n x25519_hqc128 = b'\\x2F\\xAC', True\n hqc192 = b'\\x02\\x2D', True\n p384_hqc192 = b'\\x2F\\x2D', True\n x448_hqc192 = b'\\x2F\\xAD', True\n hqc256 = b'\\x02\\x2E', True\n p521_hqc256 = b'\\x2F\\x2E', True\n dilithium2 = b'\\xfe\\xa0', True\n p256_dilithium2 = b'\\xfe\\xa1', True\n rsa3072_dilithium2 = b'\\xfe\\xa2', True\n dilithium3 = b'\\xfe\\xa3', True\n p384_dilithium3 = b'\\xfe\\xa4', True\n dilithium5 = b'\\xfe\\xa5', True\n p521_dilithium5 = b'\\xfe\\xa6', True\n falcon512 = b'\\xfe\\xae', True\n p256_falcon512 = b'\\xfe\\xaf', True\n rsa3072_falcon512 = b'\\xfe\\xb0', True\n falcon1024 = b'\\xfe\\xb1', True\n p521_falcon1024 = b'\\xfe\\xb2', True\n sphincssha2128fsimple = b'\\xfe\\xb3', True\n p256_sphincssha2128fsimple = b'\\xfe\\xb4', True\n rsa3072_sphincssha2128fsimple = b'\\xfe\\xb5', True\n sphincssha2128ssimple = b'\\xfe\\xb6', True\n p256_sphincssha2128ssimple = b'\\xfe\\xb7', True\n rsa3072_sphincssha2128ssimple = b'\\xfe\\xb8', True\n sphincssha2192fsimple = b'\\xfe\\xb9', True\n p384_sphincssha2192fsimple = b'\\xfe\\xba', True\n sphincssha2192ssimple = b'\\xfe\\xbb', True\n p384_sphincssha2192ssimple = b'\\xfe\\xbc', True\n sphincssha2256fsimple = b'\\xfe\\xbd', True\n p521_sphincssha2256fsimple = b'\\xfe\\xbe', True\n sphincssha2256ssimple = b'\\xfe\\xc0', True\n p521_sphincssha2256ssimple = b'\\xfe\\xc1', True\n sphincsshake128fsimple = b'\\xfe\\xc2', True\n p256_sphincsshake128fsimple = b'\\xfe\\xc3', True\n rsa3072_sphincsshake128fsimple = b'\\xfe\\xc4', True\n sphincsshake128ssimple = b'\\xfe\\xc5', True\n p256_sphincsshake128ssimple = b'\\xfe\\xc6', True\n rsa3072_sphincsshake128ssimple = b'\\xfe\\xc7', True\n sphincsshake192fsimple = b'\\xfe\\xc8', True\n p384_sphincsshake192fsimple = b'\\xfe\\xc9', True\n sphincsshake192ssimple = b'\\xfe\\xca', True\n p384_sphincsshake192ssimple = b'\\xfe\\xcb', True\n sphincsshake256fsimple = b'\\xfe\\xcc', True\n p521_sphincsshake256fsimple = b'\\xfe\\xcd', True\n sphincsshake256ssimple = b'\\xfe\\xce', True\n p521_sphincsshake256ssimple = b'\\xfe\\xcf', True" }, { "identifier": "Protocol", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Protocol(Enum):\n # Keep protocols in order of preference.\n TLS1_3 = b\"\\x03\\x04\"\n TLS1_2 = b\"\\x03\\x03\"\n TLS1_1 = b\"\\x03\\x02\"\n TLS1_0 = b\"\\x03\\x01\"\n SSLv3 = b\"\\x03\\x00\"\n\n def __repr__(self):\n return self.name\n def __lt__(self, other):\n if self.__class__ != other.__class__:\n return NotImplemented\n return self.value < other.value" }, { "identifier": "CompressionMethod", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CompressionMethod(Enum):\n NULL = b'\\x00'\n DEFLATE = b'\\x01'" } ]
from enum import Enum from multiprocessing.pool import ThreadPool from typing import Iterable, Union, List, Optional, Iterator, Callable, Any from urllib.parse import urlparse from datetime import datetime, timezone from .protocol import ClientHello, ScanError, make_client_hello, parse_server_hello, ServerAlertError, BadServerResponse, ServerHello, logger from .names_and_numbers import AlertDescription, CipherSuite, Group, Protocol, CompressionMethod from OpenSSL import SSL, crypto import socket import re import dataclasses import ssl, select
14,182
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e
def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello:
0
2023-10-21 02:00:13+00:00
16k
YefanZhou/TempBalance
object_detection/src/YOLOv8/ultralytics/vit/sam/modules/mask_generator.py
[ { "identifier": "MaskData", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n \"\"\"Initialize a MaskData object, ensuring all values are supported types.\"\"\"\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)), 'MaskData only supports list, numpy arrays, and torch tensors.'\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n \"\"\"Set an item in the MaskData object, ensuring it is a supported type.\"\"\"\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)), 'MaskData only supports list, numpy arrays, and torch tensors.'\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n \"\"\"Delete an item from the MaskData object.\"\"\"\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n \"\"\"Get an item from the MaskData object.\"\"\"\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n \"\"\"Return an ItemsView of the MaskData object.\"\"\"\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n \"\"\"Filter the MaskData object based on the given boolean tensor.\"\"\"\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f'MaskData key {k} has an unsupported type {type(v)}.')\n\n def cat(self, new_stats: 'MaskData') -> None:\n \"\"\"Concatenate a new MaskData object to the current one.\"\"\"\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f'MaskData key {k} has an unsupported type {type(v)}.')\n\n def to_numpy(self) -> None:\n \"\"\"Convert all torch tensors in the MaskData object to numpy arrays.\"\"\"\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n \"\"\"Calculate the area of a mask from its uncompressed RLE.\"\"\"\n return sum(rle['counts'][1::2])" }, { "identifier": "batch_iterator", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n \"\"\"Yield batches of data from the input arguments.\"\"\"\n assert args and all(len(a) == len(args[0]) for a in args), 'Batched iteration must have same-size inputs.'\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size:(b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n masks = masks.flatten(0, -3) if len(shape) > 2 else masks.unsqueeze(0)\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n return out.reshape(*shape[:-2], 4) if len(shape) > 2 else out[0]" }, { "identifier": "box_xyxy_to_xywh", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n \"\"\"Convert bounding boxes from XYXY format to XYWH format.\"\"\"\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]:\n \"\"\"Generate point grids for all crop layers.\"\"\"\n return [build_point_grid(int(n_per_side / (scale_per_layer ** i))) for i in range(n_layers + 1)]" }, { "identifier": "calculate_stability_score", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = ((masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1,\n dtype=torch.int32))\n unions = ((masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32))\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Encode uncompressed RLE (run-length encoding) to COCO RLE format.\"\"\"\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle['size']\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle['counts'] = rle['counts'].decode('utf-8') # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def generate_crop_boxes(im_size: Tuple[int, ...], n_layers: int,\n overlap_ratio: float) -> Tuple[List[List[int]], List[int]]:\n \"\"\"Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.\"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n \"\"\"Crops bounding boxes to the size of the input image.\"\"\"\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def is_box_near_crop_edge(boxes: torch.Tensor,\n crop_box: List[int],\n orig_box: List[int],\n atol: float = 20.0) -> torch.Tensor:\n \"\"\"Return a boolean tensor indicating if boxes are near the crop edge.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"Encode masks as uncompressed RLEs in the format expected by pycocotools.\"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat([\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), ])\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({'size': [h, w], 'counts': counts})\n return out" }, { "identifier": "remove_small_regions", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]:\n \"\"\"Remove small disconnected regions or holes in a mask, returning the mask and a modification indicator.\"\"\"\n import cv2 # type: ignore\n\n assert mode in {'holes', 'islands'}\n correct_holes = mode == 'holes'\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if not small_regions:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if not fill_labels:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle['size']\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle['counts']:\n mask[idx:idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n \"\"\"Uncrop bounding boxes by adding the crop box offset.\"\"\"\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor:\n \"\"\"Uncrop masks by padding them to the original image size.\"\"\"\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n \"\"\"Uncrop points by adding the crop box offset.\"\"\"\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" }, { "identifier": "PromptPredictor", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/modules/prompt_predictor.py", "snippet": "class PromptPredictor:\n\n def __init__(self, sam_model: Sam) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(self, image: np.ndarray, image_format: str = 'RGB') -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in {'RGB', 'BGR'}, f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(self, transformed_image: torch.Tensor, original_image_size: Tuple[int, ...]) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n if len(transformed_image.shape) != 4 \\\n or transformed_image.shape[1] != 3 \\\n or max(*transformed_image.shape[2:]) != self.model.image_encoder.img_size:\n raise ValueError('set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.')\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray, None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray, None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray, None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError('An image must be set with .set_image(...) before mask prediction.')\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (point_labels is not None), 'point_labels must be supplied if point_coords is supplied.'\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor, None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor, None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray, None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError('An image must be set with .set_image(...) before mask prediction.')\n\n points = (point_coords, point_labels) if point_coords is not None else None\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError('An image must be set with .set_image(...) to generate an embedding.')\n assert self.features is not None, 'Features must exist if an image has been set.'\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "Sam", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/modules/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = 'RGB'\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer('pixel_mean', torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer('pixel_std', torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x['image']) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if 'point_coords' in image_record:\n points = (image_record['point_coords'], image_record['point_labels'])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get('boxes', None),\n masks=image_record.get('mask_inputs', None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record['image'].shape[-2:],\n original_size=image_record['original_size'],\n )\n masks = masks > self.mask_threshold\n outputs.append({\n 'masks': masks,\n 'iou_predictions': iou_predictions,\n 'low_res_logits': low_res_masks, })\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode='bilinear',\n align_corners=False,\n )\n masks = masks[..., :input_size[0], :input_size[1]]\n masks = F.interpolate(masks, original_size, mode='bilinear', align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n return F.pad(x, (0, padw, 0, padh))" } ]
from typing import Any, Dict, List, Optional, Tuple from torchvision.ops.boxes import batched_nms, box_area # type: ignore from ..amg import (MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points) from .prompt_predictor import PromptPredictor from .sam import Sam from pycocotools import mask as mask_utils # type: ignore # noqa: F401 import numpy as np import torch import cv2 # type: ignore # noqa: F401
11,245
'predicted_iou': mask_data['iou_preds'][idx].item(), 'point_coords': [mask_data['points'][idx].tolist()], 'stability_score': mask_data['stability_score'][idx].item(), 'crop_box': box_xyxy_to_xywh(mask_data['crop_boxes'][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = 'binary_mask', ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int, None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray), None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != (point_grids is None), \ 'Exactly one of points_per_side or point_grid must be provided.' if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in {'binary_mask', 'uncompressed_rle', 'coco_rle'}, f'Unknown output_mode {output_mode}.' if output_mode == 'coco_rle': if min_mask_region_area > 0: self.predictor = PromptPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode # TODO: Temporary implementation for compatibility def __call__(self, image: np.ndarray, augment=False, visualize=False) -> List[Dict[str, Any]]: return self.generate(image) @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any), np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == 'coco_rle': mask_data['segmentations'] = [coco_encode_rle(rle) for rle in mask_data['rles']] elif self.output_mode == 'binary_mask': mask_data['segmentations'] = [rle_to_mask(rle) for rle in mask_data['rles']] else: mask_data['segmentations'] = mask_data['rles'] # Write mask records curr_anns = [] for idx in range(len(mask_data['segmentations'])): ann = { 'segmentation': mask_data['segmentations'][idx], 'area': area_from_rle(mask_data['rles'][idx]), 'bbox': box_xyxy_to_xywh(mask_data['boxes'][idx]).tolist(), 'predicted_iou': mask_data['iou_preds'][idx].item(), 'point_coords': [mask_data['points'][idx].tolist()], 'stability_score': mask_data['stability_score'][idx].item(), 'crop_box': box_xyxy_to_xywh(mask_data['crop_boxes'][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries
keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h])
9
2023-10-24 00:45:55+00:00
16k
bytedance/ColTrack
models/dino/dino.py
[ { "identifier": "box_ops", "path": "util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "NestedTensor", "path": "util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == 'auto':\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\"tensors dim must be 3 or 4 but {}({})\".format(self.tensors.dim(), self.tensors.shape))\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\n 'tensors.shape': self.tensors.shape,\n 'mask.shape': self.mask.shape\n }" }, { "identifier": "nested_tensor_from_tensor_list", "path": "util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)" }, { "identifier": "accuracy", "path": "util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "is_dist_avail_and_initialized", "path": "util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "inverse_sigmoid", "path": "util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat(((x[..., :2] + 1) / 3, x[..., 2:] / 2), dim=-1)\n elif x.shape[-1] == 2:\n x = (x + 1) / 3\n else:\n raise ValueError\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)" }, { "identifier": "scale_sigmoid", "path": "util/misc.py", "snippet": "def scale_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat((3 * (x[..., :2]) - 1, x[..., 2:] * 2), dim=-1)\n elif x.shape[-1] == 2:\n x = 3 * x - 1\n else:\n raise ValueError\n return x" }, { "identifier": "build_backbone", "path": "models/dino/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone: \n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords: \n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]\n backbone_freeze_keywords = args.backbone_freeze_keywords\n use_checkpoint = getattr(args, 'use_checkpoint', False)\n\n if args.backbone in ['resnet50', 'resnet101']:\n backbone = Backbone(args.backbone, train_backbone, args.dilation, \n return_interm_indices, \n batch_norm=FrozenBatchNorm2d)\n bb_num_channels = backbone.num_channels\n elif args.backbone in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']:\n pretrain_img_size = int(args.backbone.split('_')[-2])\n backbone = build_swin_transformer(args.backbone, \\\n pretrain_img_size=pretrain_img_size, \\\n out_indices=tuple(return_interm_indices), \\\n dilation=args.dilation, use_checkpoint=use_checkpoint)\n\n # freeze some layers\n if backbone_freeze_keywords is not None:\n for name, parameter in backbone.named_parameters():\n for keyword in backbone_freeze_keywords:\n if keyword in name:\n parameter.requires_grad_(False)\n break\n\n pretrained_dir = args.backbone_dir\n PTDICT = {\n 'swin_T_224_1k': 'swin_tiny_patch4_window7_224.pth',\n 'swin_B_384_22k': 'swin_base_patch4_window12_384.pth',\n 'swin_L_384_22k': 'swin_large_patch4_window12_384_22k.pth',\n }\n pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])\n checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']\n from collections import OrderedDict\n def key_select_function(keyname):\n if 'head' in keyname:\n return False\n if args.dilation and 'layers.3' in keyname:\n return False\n return True\n _tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})\n _tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)\n print(str(_tmp_st_output))\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]\n elif args.backbone in ['convnext_xlarge_22k']:\n backbone = build_convnext(modelname=args.backbone, pretrained=True, out_indices=tuple(return_interm_indices),backbone_dir=args.backbone_dir)\n bb_num_channels = backbone.dims[4 - len(return_interm_indices):]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n \n\n assert len(bb_num_channels) == len(return_interm_indices), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels \n assert isinstance(bb_num_channels, List), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n return model" }, { "identifier": "build_matcher", "path": "models/dino/matcher.py", "snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))" }, { "identifier": "DETRsegm", "path": "models/dino/segmentation.py", "snippet": "class DETRsegm(nn.Module):\n def __init__(self, detr, freeze_detr=False):\n super().__init__()\n self.detr = detr\n\n if freeze_detr:\n for p in self.parameters():\n p.requires_grad_(False)\n\n hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead\n self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)\n self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)\n\n def forward(self, samples: NestedTensor):\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.detr.backbone(samples)\n\n bs = features[-1].tensors.shape[0]\n\n src, mask = features[-1].decompose()\n assert mask is not None\n src_proj = self.detr.input_proj(src)\n hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])\n\n outputs_class = self.detr.class_embed(hs)\n outputs_coord = self.detr.bbox_embed(hs).sigmoid()\n out = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.detr.aux_loss:\n out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)\n\n # FIXME h_boxes takes the last one computed, keep this in mind\n bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)\n\n seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])\n outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])\n\n out[\"pred_masks\"] = outputs_seg_masks\n return out" }, { "identifier": "PostProcessPanoptic", "path": "models/dino/segmentation.py", "snippet": "class PostProcessPanoptic(nn.Module):\n \"\"\"This class converts the output of the model to the final panoptic result, in the format expected by the\n coco panoptic API \"\"\"\n\n def __init__(self, is_thing_map, threshold=0.85):\n \"\"\"\n Parameters:\n is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether\n the class is a thing (True) or a stuff (False) class\n threshold: confidence threshold: segments with confidence lower than this will be deleted\n \"\"\"\n super().__init__()\n self.threshold = threshold\n self.is_thing_map = is_thing_map\n\n def forward(self, outputs, processed_sizes, target_sizes=None):\n \"\"\" This function computes the panoptic prediction from the model's predictions.\n Parameters:\n outputs: This is a dict coming directly from the model. See the model doc for the content.\n processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the\n model, ie the size after data augmentation but before batching.\n target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size\n of each prediction. If left to None, it will default to the processed_sizes\n \"\"\"\n if target_sizes is None:\n target_sizes = processed_sizes\n assert len(processed_sizes) == len(target_sizes)\n out_logits, raw_masks, raw_boxes = outputs[\"pred_logits\"], outputs[\"pred_masks\"], outputs[\"pred_boxes\"]\n assert len(out_logits) == len(raw_masks) == len(target_sizes)\n preds = []\n\n def to_tuple(tup):\n if isinstance(tup, tuple):\n return tup\n return tuple(tup.cpu().tolist())\n\n for cur_logits, cur_masks, cur_boxes, size, target_size in zip(\n out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes\n ):\n # we filter empty queries and detection below threshold\n scores, labels = cur_logits.softmax(-1).max(-1)\n keep = labels.ne(outputs[\"pred_logits\"].shape[-1] - 1) & (scores > self.threshold)\n cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n cur_scores = cur_scores[keep]\n cur_classes = cur_classes[keep]\n cur_masks = cur_masks[keep]\n cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode=\"bilinear\").squeeze(1)\n cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n\n h, w = cur_masks.shape[-2:]\n assert len(cur_boxes) == len(cur_classes)\n\n # It may be that we have several predicted masks for the same stuff class.\n # In the following, we track the list of masks ids for each stuff class (they are merged later on)\n cur_masks = cur_masks.flatten(1)\n stuff_equiv_classes = defaultdict(lambda: [])\n for k, label in enumerate(cur_classes):\n if not self.is_thing_map[label.item()]:\n stuff_equiv_classes[label.item()].append(k)\n\n def get_ids_area(masks, scores, dedup=False):\n # This helper function creates the final panoptic segmentation image\n # It also returns the area of the masks that appears on the image\n\n m_id = masks.transpose(0, 1).softmax(-1)\n\n if m_id.shape[-1] == 0:\n # We didn't detect any mask :(\n m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n else:\n m_id = m_id.argmax(-1).view(h, w)\n\n if dedup:\n # Merge the masks corresponding to the same stuff class\n for equiv in stuff_equiv_classes.values():\n if len(equiv) > 1:\n for eq_id in equiv:\n m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n\n final_h, final_w = to_tuple(target_size)\n\n seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)\n\n np_seg_img = (\n torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()\n )\n m_id = torch.from_numpy(rgb2id(np_seg_img))\n\n area = []\n for i in range(len(scores)):\n area.append(m_id.eq(i).sum().item())\n return area, seg_img\n\n area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n if cur_classes.numel() > 0:\n # We know filter empty masks as long as we find some\n while True:\n filtered_small = torch.as_tensor(\n [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device\n )\n if filtered_small.any().item():\n cur_scores = cur_scores[~filtered_small]\n cur_classes = cur_classes[~filtered_small]\n cur_masks = cur_masks[~filtered_small]\n area, seg_img = get_ids_area(cur_masks, cur_scores)\n else:\n break\n\n else:\n cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n\n segments_info = []\n for i, a in enumerate(area):\n cat = cur_classes[i].item()\n segments_info.append({\"id\": i, \"isthing\": self.is_thing_map[cat], \"category_id\": cat, \"area\": a})\n del cur_classes\n\n with io.BytesIO() as out:\n seg_img.save(out, format=\"PNG\")\n predictions = {\"png_string\": out.getvalue(), \"segments_info\": segments_info}\n preds.append(predictions)\n return preds" }, { "identifier": "PostProcessSegm", "path": "models/dino/segmentation.py", "snippet": "class PostProcessSegm(nn.Module):\n def __init__(self, threshold=0.5):\n super().__init__()\n self.threshold = threshold\n\n @torch.no_grad()\n def forward(self, results, outputs, orig_target_sizes, max_target_sizes):\n assert len(orig_target_sizes) == len(max_target_sizes)\n max_h, max_w = max_target_sizes.max(0)[0].tolist()\n outputs_masks = outputs[\"pred_masks\"].squeeze(2)\n outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode=\"bilinear\", align_corners=False)\n outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()\n\n for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):\n img_h, img_w = t[0], t[1]\n results[i][\"masks\"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)\n results[i][\"masks\"] = F.interpolate(\n results[i][\"masks\"].float(), size=tuple(tt.tolist()), mode=\"nearest\"\n ).byte()\n\n return results" }, { "identifier": "dice_loss", "path": "models/dino/segmentation.py", "snippet": "def dice_loss(inputs, targets, num_boxes):\n \"\"\"\n Compute the DICE loss, similar to generalized IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_boxes" }, { "identifier": "build_deformable_transformer", "path": "models/dino/deformable_transformer.py", "snippet": "def build_deformable_transformer(args):\n decoder_query_perturber = None\n if args.decoder_layer_noise:\n from .utils import RandomBoxPerturber\n decoder_query_perturber=RandomBoxPerturber(\n x_noise_scale=args.dln_xy_noise, y_noise_scale=args.dln_xy_noise, \n w_noise_scale=args.dln_hw_noise, h_noise_scale=args.dln_hw_noise)\n\n use_detached_boxes_dec_out = False\n try:\n use_detached_boxes_dec_out = args.use_detached_boxes_dec_out\n except:\n use_detached_boxes_dec_out =False\n\n return DeformableTransformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_unicoder_layers=args.unic_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n modulate_hw_attn=True,\n\n deformable_encoder=True,\n deformable_decoder=True,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n use_deformable_box_attn=args.use_deformable_box_attn,\n box_attn_type=args.box_attn_type,\n\n learnable_tgt_init=True,\n decoder_query_perturber=decoder_query_perturber,\n\n add_channel_attention=args.add_channel_attention,\n add_pos_value=args.add_pos_value,\n random_refpoints_xy=args.random_refpoints_xy,\n\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n two_stage_pat_embed=args.two_stage_pat_embed,\n two_stage_add_query_num=args.two_stage_add_query_num,\n two_stage_learn_wh=args.two_stage_learn_wh,\n two_stage_keep_all_tokens=args.two_stage_keep_all_tokens,\n dec_layer_number=args.dec_layer_number,\n rm_self_attn_layers=None,\n key_aware_type=None,\n layer_share_type=None,\n\n rm_detach=None,\n decoder_sa_type=args.decoder_sa_type,\n module_seq=args.decoder_module_seq,\n\n embed_init_tgt=args.embed_init_tgt,\n use_detached_boxes_dec_out=use_detached_boxes_dec_out\n )" }, { "identifier": "sigmoid_focal_loss", "path": "models/dino/utils.py", "snippet": "def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "MLP", "path": "models/dino/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')" }, { "identifier": "prepare_for_cdn", "path": "models/dino/dn_components.py", "snippet": "def prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):\n \"\"\"\n A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector\n forward function and use learnable tgt embedding, so we change this function a little bit.\n :param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale\n :param training: if it is training or inference\n :param num_queries: number of queires\n :param num_classes: number of classes\n :param hidden_dim: transformer hidden dim\n :param label_enc: encode labels in dn\n :return:\n \"\"\"\n if training:\n targets, dn_number, label_noise_ratio, box_noise_scale = dn_args\n # positive and negative dn queries\n dn_number = dn_number * 2\n known = [(torch.ones_like(t['labels'])).cuda() for t in targets]\n batch_size = len(known)\n known_num = [sum(k) for k in known]\n if int(max(known_num)) == 0:\n dn_number = 1\n else:\n if dn_number >= 100:\n dn_number = dn_number // (int(max(known_num) * 2))\n elif dn_number < 1:\n dn_number = 1\n if dn_number == 0:\n dn_number = 1\n unmask_bbox = unmask_label = torch.cat(known)\n labels = torch.cat([t['labels'] for t in targets])\n boxes = torch.cat([t['boxes'] for t in targets])\n batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])\n\n known_indice = torch.nonzero(unmask_label + unmask_bbox)\n known_indice = known_indice.view(-1)\n\n known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)\n known_labels = labels.repeat(2 * dn_number, 1).view(-1)\n known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)\n known_bboxs = boxes.repeat(2 * dn_number, 1)\n known_labels_expaned = known_labels.clone()\n known_bbox_expand = known_bboxs.clone()\n\n if label_noise_ratio > 0:\n p = torch.rand_like(known_labels_expaned.float())\n chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1) # half of bbox prob\n new_label = torch.randint_like(chosen_indice, 0, num_classes) # randomly put a new one here\n known_labels_expaned.scatter_(0, chosen_indice, new_label)\n single_pad = int(max(known_num))\n\n pad_size = int(single_pad * 2 * dn_number)\n positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)\n positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)\n positive_idx = positive_idx.flatten()\n negative_idx = positive_idx + len(boxes)\n if box_noise_scale > 0:\n known_bbox_ = torch.zeros_like(known_bboxs)\n known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2\n known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2\n\n diff = torch.zeros_like(known_bboxs)\n diff[:, :2] = known_bboxs[:, 2:] / 2\n diff[:, 2:] = known_bboxs[:, 2:] / 2\n\n rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0\n rand_part = torch.rand_like(known_bboxs)\n rand_part[negative_idx] += 1.0\n rand_part *= rand_sign\n known_bbox_ = known_bbox_ + torch.mul(rand_part,\n diff).cuda() * box_noise_scale\n # known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)\n known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2\n known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]\n\n m = known_labels_expaned.long().to('cuda')\n input_label_embed = label_enc(m)\n input_bbox_embed = inverse_sigmoid(known_bbox_expand)\n\n padding_label = torch.zeros(pad_size, hidden_dim).cuda()\n padding_bbox = torch.zeros(pad_size, 4).cuda()\n\n input_query_label = padding_label.repeat(batch_size, 1, 1)\n input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)\n\n map_known_indice = torch.tensor([]).to('cuda')\n if len(known_num):\n map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]\n map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()\n if len(known_bid):\n input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed\n input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed\n\n tgt_size = pad_size + num_queries\n attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0\n # match query cannot see the reconstruct\n attn_mask[pad_size:, :pad_size] = True\n # reconstruct cannot see each other\n for i in range(dn_number):\n if i == 0:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n if i == dn_number - 1:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True\n else:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True\n\n dn_meta = {\n 'pad_size': pad_size,\n 'num_dn_group': dn_number,\n }\n else:\n\n input_query_label = None\n input_query_bbox = None\n attn_mask = None\n dn_meta = None\n\n return input_query_label, input_query_bbox, attn_mask, dn_meta" }, { "identifier": "dn_post_process", "path": "models/dino/dn_components.py", "snippet": "def dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):\n \"\"\"\n post process of dn after output from the transformer\n put the dn part in the dn_meta\n \"\"\"\n if dn_meta and dn_meta['pad_size'] > 0:\n output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]\n output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]\n outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]\n outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]\n out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}\n if aux_loss:\n out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)\n dn_meta['output_known_lbs_bboxes'] = out\n return outputs_class, outputs_coord" } ]
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from util import box_ops from util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid, scale_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
10,846
bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] # self.replace_sa_with_double_ca = replace_sa_with_double_ca if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): raise NotImplementedError def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0]+=self.label_enc.weight[0,0]*0.0 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig)
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] # self.replace_sa_with_double_ca = replace_sa_with_double_ca if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): raise NotImplementedError def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0]+=self.label_enc.weight[0,0]*0.0 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig)
layer_outputs_unsig = scale_sigmoid(layer_outputs_unsig.sigmoid())
8
2023-10-16 02:18:33+00:00
16k
YuroFR/freqtrade-modded-crypto-trading-bot
freqtrade/data/history/idatahandler.py
[ { "identifier": "misc", "path": "freqtrade/misc.py", "snippet": "def decimals_per_coin(coin: str):\ndef round_coin_value(\n value: float, coin: str, show_coin_name=True, keep_trailing_zeros=False) -> str:\ndef file_dump_json(filename: Path, data: Any, is_zip: bool = False, log: bool = True) -> None:\ndef file_dump_joblib(filename: Path, data: Any, log: bool = True) -> None:\ndef json_load(datafile: Union[gzip.GzipFile, TextIO]) -> Any:\ndef file_load_json(file: Path):\ndef is_file_in_dir(file: Path, directory: Path) -> bool:\ndef pair_to_filename(pair: str) -> str:\ndef deep_merge_dicts(source, destination, allow_null_overrides: bool = True):\ndef round_dict(d, n):\ndef safe_value_fallback(obj: dict, key1: str, key2: Optional[str] = None, default_value=None):\ndef safe_value_fallback2(dict1: dictMap, dict2: dictMap, key1: str, key2: str, default_value=None):\ndef plural(num: float, singular: str, plural: Optional[str] = None) -> str:\ndef chunks(lst: List[Any], n: int) -> Iterator[List[Any]]:\ndef parse_db_uri_for_logging(uri: str):\ndef dataframe_to_json(dataframe: pd.DataFrame) -> str:\ndef json_to_dataframe(data: str) -> pd.DataFrame:\ndef remove_entry_exit_signals(dataframe: pd.DataFrame):\ndef append_candles_to_dataframe(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:" }, { "identifier": "TimeRange", "path": "freqtrade/configuration/timerange.py", "snippet": "class TimeRange:\n \"\"\"\n object defining timerange inputs.\n [start/stop]type defines if [start/stop]ts shall be used.\n if *type is None, don't use corresponding startvalue.\n \"\"\"\n\n def __init__(self, starttype: Optional[str] = None, stoptype: Optional[str] = None,\n startts: int = 0, stopts: int = 0):\n\n self.starttype: Optional[str] = starttype\n self.stoptype: Optional[str] = stoptype\n self.startts: int = startts\n self.stopts: int = stopts\n\n @property\n def startdt(self) -> Optional[datetime]:\n if self.startts:\n return datetime.fromtimestamp(self.startts, tz=timezone.utc)\n return None\n\n @property\n def stopdt(self) -> Optional[datetime]:\n if self.stopts:\n return datetime.fromtimestamp(self.stopts, tz=timezone.utc)\n return None\n\n @property\n def timerange_str(self) -> str:\n \"\"\"\n Returns a string representation of the timerange as used by parse_timerange.\n Follows the format yyyymmdd-yyyymmdd - leaving out the parts that are not set.\n \"\"\"\n start = ''\n stop = ''\n if startdt := self.startdt:\n start = startdt.strftime('%Y%m%d')\n if stopdt := self.stopdt:\n stop = stopdt.strftime('%Y%m%d')\n return f\"{start}-{stop}\"\n\n @property\n def start_fmt(self) -> str:\n \"\"\"\n Returns a string representation of the start date\n \"\"\"\n val = 'unbounded'\n if (startdt := self.startdt) is not None:\n val = startdt.strftime(DATETIME_PRINT_FORMAT)\n return val\n\n @property\n def stop_fmt(self) -> str:\n \"\"\"\n Returns a string representation of the stop date\n \"\"\"\n val = 'unbounded'\n if (stopdt := self.stopdt) is not None:\n val = stopdt.strftime(DATETIME_PRINT_FORMAT)\n return val\n\n def __eq__(self, other):\n \"\"\"Override the default Equals behavior\"\"\"\n return (self.starttype == other.starttype and self.stoptype == other.stoptype\n and self.startts == other.startts and self.stopts == other.stopts)\n\n def subtract_start(self, seconds: int) -> None:\n \"\"\"\n Subtracts <seconds> from startts if startts is set.\n :param seconds: Seconds to subtract from starttime\n :return: None (Modifies the object in place)\n \"\"\"\n if self.startts:\n self.startts = self.startts - seconds\n\n def adjust_start_if_necessary(self, timeframe_secs: int, startup_candles: int,\n min_date: datetime) -> None:\n \"\"\"\n Adjust startts by <startup_candles> candles.\n Applies only if no startup-candles have been available.\n :param timeframe_secs: Timeframe in seconds e.g. `timeframe_to_seconds('5m')`\n :param startup_candles: Number of candles to move start-date forward\n :param min_date: Minimum data date loaded. Key kriterium to decide if start-time\n has to be moved\n :return: None (Modifies the object in place)\n \"\"\"\n if (not self.starttype or (startup_candles\n and min_date.timestamp() >= self.startts)):\n # If no startts was defined, or backtest-data starts at the defined backtest-date\n logger.warning(\"Moving start-date by %s candles to account for startup time.\",\n startup_candles)\n self.startts = int(min_date.timestamp() + timeframe_secs * startup_candles)\n self.starttype = 'date'\n\n @classmethod\n def parse_timerange(cls, text: Optional[str]) -> Self:\n \"\"\"\n Parse the value of the argument --timerange to determine what is the range desired\n :param text: value from --timerange\n :return: Start and End range period\n \"\"\"\n if not text:\n return cls(None, None, 0, 0)\n syntax = [(r'^-(\\d{8})$', (None, 'date')),\n (r'^(\\d{8})-$', ('date', None)),\n (r'^(\\d{8})-(\\d{8})$', ('date', 'date')),\n (r'^-(\\d{10})$', (None, 'date')),\n (r'^(\\d{10})-$', ('date', None)),\n (r'^(\\d{10})-(\\d{10})$', ('date', 'date')),\n (r'^-(\\d{13})$', (None, 'date')),\n (r'^(\\d{13})-$', ('date', None)),\n (r'^(\\d{13})-(\\d{13})$', ('date', 'date')),\n ]\n for rex, stype in syntax:\n # Apply the regular expression to text\n match = re.match(rex, text)\n if match: # Regex has matched\n rvals = match.groups()\n index = 0\n start: int = 0\n stop: int = 0\n if stype[0]:\n starts = rvals[index]\n if stype[0] == 'date' and len(starts) == 8:\n start = int(datetime.strptime(starts, '%Y%m%d').replace(\n tzinfo=timezone.utc).timestamp())\n elif len(starts) == 13:\n start = int(starts) // 1000\n else:\n start = int(starts)\n index += 1\n if stype[1]:\n stops = rvals[index]\n if stype[1] == 'date' and len(stops) == 8:\n stop = int(datetime.strptime(stops, '%Y%m%d').replace(\n tzinfo=timezone.utc).timestamp())\n elif len(stops) == 13:\n stop = int(stops) // 1000\n else:\n stop = int(stops)\n if start > stop > 0:\n raise OperationalException(\n f'Start date is after stop date for timerange \"{text}\"')\n return cls(stype[0], stype[1], start, stop)\n raise OperationalException(f'Incorrect syntax for timerange \"{text}\"')" }, { "identifier": "DEFAULT_TRADES_COLUMNS", "path": "freqtrade/constants.py", "snippet": "DOCS_LINK = \"https://www.freqtrade.io/en/stable\"\nDEFAULT_CONFIG = 'config.json'\nPROCESS_THROTTLE_SECS = 5 # sec\nHYPEROPT_EPOCH = 100 # epochs\nRETRY_TIMEOUT = 30 # sec\nTIMEOUT_UNITS = ['minutes', 'seconds']\nEXPORT_OPTIONS = ['none', 'trades', 'signals']\nDEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite'\nDEFAULT_DB_DRYRUN_URL = 'sqlite:///tradesv3.dryrun.sqlite'\nUNLIMITED_STAKE_AMOUNT = 'unlimited'\nDEFAULT_AMOUNT_RESERVE_PERCENT = 0.05\nREQUIRED_ORDERTIF = ['entry', 'exit']\nREQUIRED_ORDERTYPES = ['entry', 'exit', 'stoploss', 'stoploss_on_exchange']\nPRICING_SIDES = ['ask', 'bid', 'same', 'other']\nORDERTYPE_POSSIBILITIES = ['limit', 'market']\n_ORDERTIF_POSSIBILITIES = ['GTC', 'FOK', 'IOC', 'PO']\nORDERTIF_POSSIBILITIES = _ORDERTIF_POSSIBILITIES + [t.lower() for t in _ORDERTIF_POSSIBILITIES]\nSTOPLOSS_PRICE_TYPES = [p for p in PriceType]\nHYPEROPT_LOSS_BUILTIN = ['ShortTradeDurHyperOptLoss', 'OnlyProfitHyperOptLoss',\n 'SharpeHyperOptLoss', 'SharpeHyperOptLossDaily',\n 'SortinoHyperOptLoss', 'SortinoHyperOptLossDaily',\n 'CalmarHyperOptLoss',\n 'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss',\n 'ProfitDrawDownHyperOptLoss']\nAVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', 'RemotePairList',\n 'AgeFilter', \"FullTradesFilter\", 'OffsetFilter', 'PerformanceFilter',\n 'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter',\n 'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter']\nAVAILABLE_PROTECTIONS = ['CooldownPeriod',\n 'LowProfitPairs', 'MaxDrawdown', 'StoplossGuard']\nAVAILABLE_DATAHANDLERS = ['json', 'jsongz', 'hdf5', 'feather', 'parquet']\nBACKTEST_BREAKDOWNS = ['day', 'week', 'month']\nBACKTEST_CACHE_AGE = ['none', 'day', 'week', 'month']\nBACKTEST_CACHE_DEFAULT = 'day'\nDRY_RUN_WALLET = 1000\nDATETIME_PRINT_FORMAT = '%Y-%m-%d %H:%M:%S'\nMATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons\nDEFAULT_DATAFRAME_COLUMNS = ['date', 'open', 'high', 'low', 'close', 'volume']\nDEFAULT_TRADES_COLUMNS = ['timestamp', 'id', 'type', 'side', 'price', 'amount', 'cost']\nTRADES_DTYPES = {\n 'timestamp': 'int64',\n 'id': 'str',\n 'type': 'str',\n 'side': 'str',\n 'price': 'float64',\n 'amount': 'float64',\n 'cost': 'float64',\n}\nTRADING_MODES = ['spot', 'margin', 'futures']\nMARGIN_MODES = ['cross', 'isolated', '']\nLAST_BT_RESULT_FN = '.last_result.json'\nFTHYPT_FILEVERSION = 'fthypt_fileversion'\nUSERPATH_HYPEROPTS = 'hyperopts'\nUSERPATH_STRATEGIES = 'strategies'\nUSERPATH_NOTEBOOKS = 'notebooks'\nUSERPATH_FREQAIMODELS = 'freqaimodels'\nTELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']\nWEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']\nFULL_DATAFRAME_THRESHOLD = 100\nCUSTOM_TAG_MAX_LENGTH = 255\nDL_DATA_TIMEFRAMES = ['1m', '5m']\nENV_VAR_PREFIX = 'FREQTRADE__'\nCANCELED_EXCHANGE_STATES = ('cancelled', 'canceled', 'expired')\nNON_OPEN_EXCHANGE_STATES = CANCELED_EXCHANGE_STATES + ('closed',)\nDECIMAL_PER_COIN_FALLBACK = 3 # Should be low to avoid listing all possible FIAT's\nDECIMALS_PER_COIN = {\n 'BTC': 8,\n 'ETH': 5,\n}\nDUST_PER_COIN = {\n 'BTC': 0.0001,\n 'ETH': 0.01\n}\nUSER_DATA_FILES = {\n 'sample_strategy.py': USERPATH_STRATEGIES,\n 'sample_hyperopt_loss.py': USERPATH_HYPEROPTS,\n 'strategy_analysis_example.ipynb': USERPATH_NOTEBOOKS,\n}\nSUPPORTED_FIAT = [\n \"AUD\", \"BRL\", \"CAD\", \"CHF\", \"CLP\", \"CNY\", \"CZK\", \"DKK\",\n \"EUR\", \"GBP\", \"HKD\", \"HUF\", \"IDR\", \"ILS\", \"INR\", \"JPY\",\n \"KRW\", \"MXN\", \"MYR\", \"NOK\", \"NZD\", \"PHP\", \"PKR\", \"PLN\",\n \"RUB\", \"UAH\", \"SEK\", \"SGD\", \"THB\", \"TRY\", \"TWD\", \"ZAR\",\n \"USD\", \"BTC\", \"ETH\", \"XRP\", \"LTC\", \"BCH\"\n]\nMINIMAL_CONFIG = {\n \"stake_currency\": \"\",\n \"dry_run\": True,\n \"exchange\": {\n \"name\": \"\",\n \"key\": \"\",\n \"secret\": \"\",\n \"pair_whitelist\": [],\n \"ccxt_async_config\": {\n }\n }\n}\n__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType}\nCONF_SCHEMA = {\n 'type': 'object',\n 'properties': {\n 'max_open_trades': {'type': ['integer', 'number'], 'minimum': -1},\n 'new_pairs_days': {'type': 'integer', 'default': 30},\n 'timeframe': {'type': 'string'},\n 'stake_currency': {'type': 'string'},\n 'stake_amount': {\n 'type': ['number', 'string'],\n 'minimum': 0.0001,\n 'pattern': UNLIMITED_STAKE_AMOUNT\n },\n 'tradable_balance_ratio': {\n 'type': 'number',\n 'minimum': 0.0,\n 'maximum': 1,\n 'default': 0.99\n },\n 'available_capital': {\n 'type': 'number',\n 'minimum': 0,\n },\n 'amend_last_stake_amount': {'type': 'boolean', 'default': False},\n 'last_stake_amount_min_ratio': {\n 'type': 'number', 'minimum': 0.0, 'maximum': 1.0, 'default': 0.5\n },\n 'fiat_display_currency': {'type': 'string', 'enum': SUPPORTED_FIAT},\n 'dry_run': {'type': 'boolean'},\n 'dry_run_wallet': {'type': 'number', 'default': DRY_RUN_WALLET},\n 'cancel_open_orders_on_exit': {'type': 'boolean', 'default': False},\n 'process_only_new_candles': {'type': 'boolean'},\n 'minimal_roi': {\n 'type': 'object',\n 'patternProperties': {\n '^[0-9.]+$': {'type': 'number'}\n },\n },\n 'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5},\n 'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True},\n 'trailing_stop': {'type': 'boolean'},\n 'trailing_stop_positive': {'type': 'number', 'minimum': 0, 'maximum': 1},\n 'trailing_stop_positive_offset': {'type': 'number', 'minimum': 0, 'maximum': 1},\n 'trailing_only_offset_is_reached': {'type': 'boolean'},\n 'use_exit_signal': {'type': 'boolean'},\n 'exit_profit_only': {'type': 'boolean'},\n 'exit_profit_offset': {'type': 'number'},\n 'ignore_roi_if_entry_signal': {'type': 'boolean'},\n 'ignore_buying_expired_candle_after': {'type': 'number'},\n 'trading_mode': {'type': 'string', 'enum': TRADING_MODES},\n 'margin_mode': {'type': 'string', 'enum': MARGIN_MODES},\n 'reduce_df_footprint': {'type': 'boolean', 'default': False},\n 'minimum_trade_amount': {'type': 'number', 'default': 10},\n 'targeted_trade_amount': {'type': 'number', 'default': 20},\n 'lookahead_analysis_exportfilename': {'type': 'string'},\n 'startup_candle': {\n 'type': 'array',\n 'uniqueItems': True,\n 'default': [199, 399, 499, 999, 1999],\n },\n 'liquidation_buffer': {'type': 'number', 'minimum': 0.0, 'maximum': 0.99},\n 'backtest_breakdown': {\n 'type': 'array',\n 'items': {'type': 'string', 'enum': BACKTEST_BREAKDOWNS}\n },\n 'bot_name': {'type': 'string'},\n 'unfilledtimeout': {\n 'type': 'object',\n 'properties': {\n 'entry': {'type': 'number', 'minimum': 1},\n 'exit': {'type': 'number', 'minimum': 1},\n 'exit_timeout_count': {'type': 'number', 'minimum': 0, 'default': 0},\n 'unit': {'type': 'string', 'enum': TIMEOUT_UNITS, 'default': 'minutes'}\n }\n },\n 'entry_pricing': {\n 'type': 'object',\n 'properties': {\n 'price_last_balance': {\n 'type': 'number',\n 'minimum': 0,\n 'maximum': 1,\n 'exclusiveMaximum': False,\n },\n 'price_side': {'type': 'string', 'enum': PRICING_SIDES, 'default': 'same'},\n 'use_order_book': {'type': 'boolean'},\n 'order_book_top': {'type': 'integer', 'minimum': 1, 'maximum': 50, },\n 'check_depth_of_market': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'bids_to_ask_delta': {'type': 'number', 'minimum': 0},\n }\n },\n },\n 'required': ['price_side']\n },\n 'exit_pricing': {\n 'type': 'object',\n 'properties': {\n 'price_side': {'type': 'string', 'enum': PRICING_SIDES, 'default': 'same'},\n 'price_last_balance': {\n 'type': 'number',\n 'minimum': 0,\n 'maximum': 1,\n 'exclusiveMaximum': False,\n },\n 'use_order_book': {'type': 'boolean'},\n 'order_book_top': {'type': 'integer', 'minimum': 1, 'maximum': 50, },\n },\n 'required': ['price_side']\n },\n 'custom_price_max_distance_ratio': {\n 'type': 'number', 'minimum': 0.0\n },\n 'order_types': {\n 'type': 'object',\n 'properties': {\n 'entry': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'exit': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'force_exit': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'force_entry': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'emergency_exit': {\n 'type': 'string',\n 'enum': ORDERTYPE_POSSIBILITIES,\n 'default': 'market'},\n 'stoploss': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'stoploss_on_exchange': {'type': 'boolean'},\n 'stoploss_price_type': {'type': 'string', 'enum': STOPLOSS_PRICE_TYPES},\n 'stoploss_on_exchange_interval': {'type': 'number'},\n 'stoploss_on_exchange_limit_ratio': {'type': 'number', 'minimum': 0.0,\n 'maximum': 1.0}\n },\n 'required': ['entry', 'exit', 'stoploss', 'stoploss_on_exchange']\n },\n 'order_time_in_force': {\n 'type': 'object',\n 'properties': {\n 'entry': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES},\n 'exit': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES}\n },\n 'required': REQUIRED_ORDERTIF\n },\n 'exchange': {'$ref': '#/definitions/exchange'},\n 'edge': {'$ref': '#/definitions/edge'},\n 'freqai': {'$ref': '#/definitions/freqai'},\n 'external_message_consumer': {'$ref': '#/definitions/external_message_consumer'},\n 'experimental': {\n 'type': 'object',\n 'properties': {\n 'block_bad_exchanges': {'type': 'boolean'}\n }\n },\n 'pairlists': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'method': {'type': 'string', 'enum': AVAILABLE_PAIRLISTS},\n },\n 'required': ['method'],\n }\n },\n 'protections': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'method': {'type': 'string', 'enum': AVAILABLE_PROTECTIONS},\n 'stop_duration': {'type': 'number', 'minimum': 0.0},\n 'stop_duration_candles': {'type': 'number', 'minimum': 0},\n 'trade_limit': {'type': 'number', 'minimum': 1},\n 'lookback_period': {'type': 'number', 'minimum': 1},\n 'lookback_period_candles': {'type': 'number', 'minimum': 1},\n },\n 'required': ['method'],\n }\n },\n 'telegram': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'token': {'type': 'string'},\n 'chat_id': {'type': 'string'},\n 'allow_custom_messages': {'type': 'boolean', 'default': True},\n 'balance_dust_level': {'type': 'number', 'minimum': 0.0},\n 'notification_settings': {\n 'type': 'object',\n 'default': {},\n 'properties': {\n 'status': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'warning': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'startup': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'entry': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'entry_fill': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'off'\n },\n 'entry_cancel': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS, },\n 'exit': {\n 'type': ['string', 'object'],\n 'additionalProperties': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS\n }\n },\n 'exit_fill': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n 'exit_cancel': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'protection_trigger': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n 'protection_trigger_global': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n 'show_candle': {\n 'type': 'string',\n 'enum': ['off', 'ohlc'],\n 'default': 'off'\n },\n 'strategy_msg': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n }\n },\n 'reload': {'type': 'boolean'},\n },\n 'required': ['enabled', 'token', 'chat_id'],\n },\n 'webhook': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'url': {'type': 'string'},\n 'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'},\n 'retries': {'type': 'integer', 'minimum': 0},\n 'retry_delay': {'type': 'number', 'minimum': 0},\n **__MESSAGE_TYPE_DICT,\n # **{x: {'type': 'object'} for x in RPCMessageType},\n # Below -> Deprecated\n 'webhookentry': {'type': 'object'},\n 'webhookentrycancel': {'type': 'object'},\n 'webhookentryfill': {'type': 'object'},\n 'webhookexit': {'type': 'object'},\n 'webhookexitcancel': {'type': 'object'},\n 'webhookexitfill': {'type': 'object'},\n 'webhookstatus': {'type': 'object'},\n },\n },\n 'discord': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'webhook_url': {'type': 'string'},\n \"exit_fill\": {\n 'type': 'array', 'items': {'type': 'object'},\n 'default': [\n {\"Trade ID\": \"{trade_id}\"},\n {\"Exchange\": \"{exchange}\"},\n {\"Pair\": \"{pair}\"},\n {\"Direction\": \"{direction}\"},\n {\"Open rate\": \"{open_rate}\"},\n {\"Close rate\": \"{close_rate}\"},\n {\"Amount\": \"{amount}\"},\n {\"Open date\": \"{open_date:%Y-%m-%d %H:%M:%S}\"},\n {\"Close date\": \"{close_date:%Y-%m-%d %H:%M:%S}\"},\n {\"Profit\": \"{profit_amount} {stake_currency}\"},\n {\"Profitability\": \"{profit_ratio:.2%}\"},\n {\"Enter tag\": \"{enter_tag}\"},\n {\"Exit Reason\": \"{exit_reason}\"},\n {\"Strategy\": \"{strategy}\"},\n {\"Timeframe\": \"{timeframe}\"},\n ]\n },\n \"entry_fill\": {\n 'type': 'array', 'items': {'type': 'object'},\n 'default': [\n {\"Trade ID\": \"{trade_id}\"},\n {\"Exchange\": \"{exchange}\"},\n {\"Pair\": \"{pair}\"},\n {\"Direction\": \"{direction}\"},\n {\"Open rate\": \"{open_rate}\"},\n {\"Amount\": \"{amount}\"},\n {\"Open date\": \"{open_date:%Y-%m-%d %H:%M:%S}\"},\n {\"Enter tag\": \"{enter_tag}\"},\n {\"Strategy\": \"{strategy} {timeframe}\"},\n ]\n },\n }\n },\n 'api_server': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'listen_ip_address': {'format': 'ipv4'},\n 'listen_port': {\n 'type': 'integer',\n 'minimum': 1024,\n 'maximum': 65535\n },\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'ws_token': {'type': ['string', 'array'], 'items': {'type': 'string'}},\n 'jwt_secret_key': {'type': 'string'},\n 'CORS_origins': {'type': 'array', 'items': {'type': 'string'}},\n 'verbosity': {'type': 'string', 'enum': ['error', 'info']},\n },\n 'required': ['enabled', 'listen_ip_address', 'listen_port', 'username', 'password']\n },\n 'db_url': {'type': 'string'},\n 'export': {'type': 'string', 'enum': EXPORT_OPTIONS, 'default': 'trades'},\n 'disableparamexport': {'type': 'boolean'},\n 'initial_state': {'type': 'string', 'enum': ['running', 'stopped']},\n 'force_entry_enable': {'type': 'boolean'},\n 'disable_dataframe_checks': {'type': 'boolean'},\n 'internals': {\n 'type': 'object',\n 'default': {},\n 'properties': {\n 'process_throttle_secs': {'type': 'integer'},\n 'interval': {'type': 'integer'},\n 'sd_notify': {'type': 'boolean'},\n }\n },\n 'dataformat_ohlcv': {\n 'type': 'string',\n 'enum': AVAILABLE_DATAHANDLERS,\n 'default': 'feather'\n },\n 'dataformat_trades': {\n 'type': 'string',\n 'enum': AVAILABLE_DATAHANDLERS,\n 'default': 'feather'\n },\n 'position_adjustment_enable': {'type': 'boolean'},\n 'max_entry_position_adjustment': {'type': ['integer', 'number'], 'minimum': -1},\n },\n 'definitions': {\n 'exchange': {\n 'type': 'object',\n 'properties': {\n 'name': {'type': 'string'},\n 'key': {'type': 'string', 'default': ''},\n 'secret': {'type': 'string', 'default': ''},\n 'password': {'type': 'string', 'default': ''},\n 'uid': {'type': 'string'},\n 'pair_whitelist': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n },\n 'uniqueItems': True\n },\n 'pair_blacklist': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n },\n 'uniqueItems': True\n },\n 'unknown_fee_rate': {'type': 'number'},\n 'outdated_offset': {'type': 'integer', 'minimum': 1},\n 'markets_refresh_interval': {'type': 'integer'},\n 'ccxt_config': {'type': 'object'},\n 'ccxt_async_config': {'type': 'object'}\n },\n 'required': ['name']\n },\n 'edge': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'process_throttle_secs': {'type': 'integer', 'minimum': 600},\n 'calculate_since_number_of_days': {'type': 'integer'},\n 'allowed_risk': {'type': 'number'},\n 'stoploss_range_min': {'type': 'number'},\n 'stoploss_range_max': {'type': 'number'},\n 'stoploss_range_step': {'type': 'number'},\n 'minimum_winrate': {'type': 'number'},\n 'minimum_expectancy': {'type': 'number'},\n 'min_trade_number': {'type': 'number'},\n 'max_trade_duration_minute': {'type': 'integer'},\n 'remove_pumps': {'type': 'boolean'}\n },\n 'required': ['process_throttle_secs', 'allowed_risk']\n },\n 'external_message_consumer': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean', 'default': False},\n 'producers': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'name': {'type': 'string'},\n 'host': {'type': 'string'},\n 'port': {\n 'type': 'integer',\n 'default': 8080,\n 'minimum': 0,\n 'maximum': 65535\n },\n 'secure': {'type': 'boolean', 'default': False},\n 'ws_token': {'type': 'string'},\n },\n 'required': ['name', 'host', 'ws_token']\n }\n },\n 'wait_timeout': {'type': 'integer', 'minimum': 0},\n 'sleep_time': {'type': 'integer', 'minimum': 0},\n 'ping_timeout': {'type': 'integer', 'minimum': 0},\n 'remove_entry_exit_signals': {'type': 'boolean', 'default': False},\n 'initial_candle_limit': {\n 'type': 'integer',\n 'minimum': 0,\n 'maximum': 1500,\n 'default': 1500\n },\n 'message_size_limit': { # In megabytes\n 'type': 'integer',\n 'minimum': 1,\n 'maxmium': 20,\n 'default': 8,\n }\n },\n 'required': ['producers']\n },\n \"freqai\": {\n \"type\": \"object\",\n \"properties\": {\n \"enabled\": {\"type\": \"boolean\", \"default\": False},\n \"keras\": {\"type\": \"boolean\", \"default\": False},\n \"write_metrics_to_disk\": {\"type\": \"boolean\", \"default\": False},\n \"purge_old_models\": {\"type\": [\"boolean\", \"number\"], \"default\": 2},\n \"conv_width\": {\"type\": \"integer\", \"default\": 1},\n \"train_period_days\": {\"type\": \"integer\", \"default\": 0},\n \"backtest_period_days\": {\"type\": \"number\", \"default\": 7},\n \"identifier\": {\"type\": \"string\", \"default\": \"example\"},\n \"feature_parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"include_corr_pairlist\": {\"type\": \"array\"},\n \"include_timeframes\": {\"type\": \"array\"},\n \"label_period_candles\": {\"type\": \"integer\"},\n \"include_shifted_candles\": {\"type\": \"integer\", \"default\": 0},\n \"DI_threshold\": {\"type\": \"number\", \"default\": 0},\n \"weight_factor\": {\"type\": \"number\", \"default\": 0},\n \"principal_component_analysis\": {\"type\": \"boolean\", \"default\": False},\n \"use_SVM_to_remove_outliers\": {\"type\": \"boolean\", \"default\": False},\n \"plot_feature_importances\": {\"type\": \"integer\", \"default\": 0},\n \"svm_params\": {\"type\": \"object\",\n \"properties\": {\n \"shuffle\": {\"type\": \"boolean\", \"default\": False},\n \"nu\": {\"type\": \"number\", \"default\": 0.1}\n },\n },\n \"shuffle_after_split\": {\"type\": \"boolean\", \"default\": False},\n \"buffer_train_data_candles\": {\"type\": \"integer\", \"default\": 0}\n },\n \"required\": [\"include_timeframes\", \"include_corr_pairlist\", ]\n },\n \"data_split_parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"test_size\": {\"type\": \"number\"},\n \"random_state\": {\"type\": \"integer\"},\n \"shuffle\": {\"type\": \"boolean\", \"default\": False}\n },\n },\n \"model_training_parameters\": {\n \"type\": \"object\"\n },\n \"rl_config\": {\n \"type\": \"object\",\n \"properties\": {\n \"drop_ohlc_from_features\": {\"type\": \"boolean\", \"default\": False},\n \"train_cycles\": {\"type\": \"integer\"},\n \"max_trade_duration_candles\": {\"type\": \"integer\"},\n \"add_state_info\": {\"type\": \"boolean\", \"default\": False},\n \"max_training_drawdown_pct\": {\"type\": \"number\", \"default\": 0.02},\n \"cpu_count\": {\"type\": \"integer\", \"default\": 1},\n \"model_type\": {\"type\": \"string\", \"default\": \"PPO\"},\n \"policy_type\": {\"type\": \"string\", \"default\": \"MlpPolicy\"},\n \"net_arch\": {\"type\": \"array\", \"default\": [128, 128]},\n \"randomize_starting_position\": {\"type\": \"boolean\", \"default\": False},\n \"progress_bar\": {\"type\": \"boolean\", \"default\": True},\n \"model_reward_parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"rr\": {\"type\": \"number\", \"default\": 1},\n \"profit_aim\": {\"type\": \"number\", \"default\": 0.025}\n }\n }\n },\n },\n },\n \"required\": [\n \"enabled\",\n \"train_period_days\",\n \"backtest_period_days\",\n \"identifier\",\n \"feature_parameters\",\n \"data_split_parameters\"\n ]\n },\n },\n}\nSCHEMA_TRADE_REQUIRED = [\n 'exchange',\n 'timeframe',\n 'max_open_trades',\n 'stake_currency',\n 'stake_amount',\n 'tradable_balance_ratio',\n 'last_stake_amount_min_ratio',\n 'dry_run',\n 'dry_run_wallet',\n 'exit_pricing',\n 'entry_pricing',\n 'stoploss',\n 'minimal_roi',\n 'internals',\n 'dataformat_ohlcv',\n 'dataformat_trades',\n]\nSCHEMA_BACKTEST_REQUIRED = [\n 'exchange',\n 'stake_currency',\n 'stake_amount',\n 'dry_run_wallet',\n 'dataformat_ohlcv',\n 'dataformat_trades',\n]\nSCHEMA_BACKTEST_REQUIRED_FINAL = SCHEMA_BACKTEST_REQUIRED + [\n 'stoploss',\n 'minimal_roi',\n 'max_open_trades'\n]\nSCHEMA_MINIMAL_REQUIRED = [\n 'exchange',\n 'dry_run',\n 'dataformat_ohlcv',\n 'dataformat_trades',\n]\nSCHEMA_MINIMAL_WEBSERVER = SCHEMA_MINIMAL_REQUIRED + [\n 'api_server',\n]\nCANCEL_REASON = {\n \"TIMEOUT\": \"cancelled due to timeout\",\n \"PARTIALLY_FILLED_KEEP_OPEN\": \"partially filled - keeping order open\",\n \"PARTIALLY_FILLED\": \"partially filled\",\n \"FULLY_CANCELLED\": \"fully cancelled\",\n \"ALL_CANCELLED\": \"cancelled (all unfilled and partially filled open orders cancelled)\",\n \"CANCELLED_ON_EXCHANGE\": \"cancelled on exchange\",\n \"FORCE_EXIT\": \"forcesold\",\n \"REPLACE\": \"cancelled to be replaced by new limit order\",\n \"REPLACE_FAILED\": \"failed to replace order, deleting Trade\",\n \"USER_CANCEL\": \"user requested order cancel\"\n}" }, { "identifier": "clean_ohlcv_dataframe", "path": "freqtrade/data/converter/converter.py", "snippet": "def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *,\n fill_missing: bool, drop_incomplete: bool) -> DataFrame:\n \"\"\"\n Cleanse a OHLCV dataframe by\n * Grouping it by date (removes duplicate tics)\n * dropping last candles if requested\n * Filling up missing data (if requested)\n :param data: DataFrame containing candle (OHLCV) data.\n :param timeframe: timeframe (e.g. 5m). Used to fill up eventual missing data\n :param pair: Pair this data is for (used to warn if fillup was necessary)\n :param fill_missing: fill up missing candles with 0 candles\n (see ohlcv_fill_up_missing_data for details)\n :param drop_incomplete: Drop the last candle of the dataframe, assuming it's incomplete\n :return: DataFrame\n \"\"\"\n # group by index and aggregate results to eliminate duplicate ticks\n data = data.groupby(by='date', as_index=False, sort=True).agg({\n 'open': 'first',\n 'high': 'max',\n 'low': 'min',\n 'close': 'last',\n 'volume': 'max',\n })\n # eliminate partial candle\n if drop_incomplete:\n data.drop(data.tail(1).index, inplace=True)\n logger.debug('Dropping last candle')\n\n if fill_missing:\n return ohlcv_fill_up_missing_data(data, timeframe, pair)\n else:\n return data" }, { "identifier": "trim_dataframe", "path": "freqtrade/data/converter/converter.py", "snippet": "def trim_dataframe(df: DataFrame, timerange, *, df_date_col: str = 'date',\n startup_candles: int = 0) -> DataFrame:\n \"\"\"\n Trim dataframe based on given timerange\n :param df: Dataframe to trim\n :param timerange: timerange (use start and end date if available)\n :param df_date_col: Column in the dataframe to use as Date column\n :param startup_candles: When not 0, is used instead the timerange start date\n :return: trimmed dataframe\n \"\"\"\n if startup_candles:\n # Trim candles instead of timeframe in case of given startup_candle count\n df = df.iloc[startup_candles:, :]\n else:\n if timerange.starttype == 'date':\n df = df.loc[df[df_date_col] >= timerange.startdt, :]\n if timerange.stoptype == 'date':\n df = df.loc[df[df_date_col] <= timerange.stopdt, :]\n return df" }, { "identifier": "trades_convert_types", "path": "freqtrade/data/converter/trade_converter.py", "snippet": "def trades_convert_types(trades: DataFrame) -> DataFrame:\n \"\"\"\n Convert Trades dtypes and add 'date' column\n \"\"\"\n trades = trades.astype(TRADES_DTYPES)\n trades['date'] = to_datetime(trades['timestamp'], unit='ms', utc=True)\n return trades" }, { "identifier": "trades_df_remove_duplicates", "path": "freqtrade/data/converter/trade_converter.py", "snippet": "def trades_df_remove_duplicates(trades: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Removes duplicates from the trades DataFrame.\n Uses pandas.DataFrame.drop_duplicates to remove duplicates based on the 'timestamp' column.\n :param trades: DataFrame with the columns constants.DEFAULT_TRADES_COLUMNS\n :return: DataFrame with duplicates removed based on the 'timestamp' column\n \"\"\"\n return trades.drop_duplicates(subset=['timestamp', 'id'])" }, { "identifier": "CandleType", "path": "freqtrade/enums/candletype.py", "snippet": "class CandleType(str, Enum):\n \"\"\"Enum to distinguish candle types\"\"\"\n SPOT = \"spot\"\n FUTURES = \"futures\"\n MARK = \"mark\"\n INDEX = \"index\"\n PREMIUMINDEX = \"premiumIndex\"\n\n # TODO: Could take up less memory if these weren't a CandleType\n FUNDING_RATE = \"funding_rate\"\n # BORROW_RATE = \"borrow_rate\" # * unimplemented\n\n def __str__(self):\n return f\"{self.name.lower()}\"\n\n @staticmethod\n def from_string(value: str) -> 'CandleType':\n if not value:\n # Default to spot\n return CandleType.SPOT\n return CandleType(value)\n\n @staticmethod\n def get_default(trading_mode: str) -> 'CandleType':\n if trading_mode == 'futures':\n return CandleType.FUTURES\n return CandleType.SPOT" }, { "identifier": "TradingMode", "path": "freqtrade/enums/tradingmode.py", "snippet": "class TradingMode(str, Enum):\n \"\"\"\n Enum to distinguish between\n spot, margin, futures or any other trading method\n \"\"\"\n SPOT = \"spot\"\n MARGIN = \"margin\"\n FUTURES = \"futures\"" }, { "identifier": "timeframe_to_seconds", "path": "freqtrade/exchange/exchange_utils.py", "snippet": "def timeframe_to_seconds(timeframe: str) -> int:\n \"\"\"\n Translates the timeframe interval value written in the human readable\n form ('1m', '5m', '1h', '1d', '1w', etc.) to the number\n of seconds for one timeframe interval.\n \"\"\"\n return ccxt.Exchange.parse_timeframe(timeframe)" } ]
import logging import re from abc import ABC, abstractmethod from copy import deepcopy from datetime import datetime, timezone from pathlib import Path from typing import List, Optional, Tuple, Type from pandas import DataFrame from freqtrade import misc from freqtrade.configuration import TimeRange from freqtrade.constants import DEFAULT_TRADES_COLUMNS, ListPairsWithTimeframes from freqtrade.data.converter import (clean_ohlcv_dataframe, trades_convert_types, trades_df_remove_duplicates, trim_dataframe) from freqtrade.enums import CandleType, TradingMode from freqtrade.exchange import timeframe_to_seconds from .jsondatahandler import JsonDataHandler from .jsondatahandler import JsonGzDataHandler from .hdf5datahandler import HDF5DataHandler from .featherdatahandler import FeatherDataHandler from .parquetdatahandler import ParquetDataHandler
10,948
""" Abstract datahandler interface. It's subclasses handle and storing data from disk. """ logger = logging.getLogger(__name__) class IDataHandler(ABC): _OHLCV_REGEX = r'^([a-zA-Z_\d-]+)\-(\d+[a-zA-Z]{1,2})\-?([a-zA-Z_]*)?(?=\.)' def __init__(self, datadir: Path) -> None: self._datadir = datadir @classmethod def _get_file_extension(cls) -> str: """ Get file extension for this particular datahandler """ raise NotImplementedError() @classmethod def ohlcv_get_available_data( cls, datadir: Path, trading_mode: TradingMode) -> ListPairsWithTimeframes: """ Returns a list of all pairs with ohlcv data available in this datadir :param datadir: Directory to search for ohlcv files :param trading_mode: trading-mode to be used :return: List of Tuples of (pair, timeframe, CandleType) """ if trading_mode == TradingMode.FUTURES: datadir = datadir.joinpath('futures') _tmp = [ re.search( cls._OHLCV_REGEX, p.name ) for p in datadir.glob(f"*.{cls._get_file_extension()}")] return [ ( cls.rebuild_pair_from_filename(match[1]), cls.rebuild_timeframe_from_filename(match[2]),
""" Abstract datahandler interface. It's subclasses handle and storing data from disk. """ logger = logging.getLogger(__name__) class IDataHandler(ABC): _OHLCV_REGEX = r'^([a-zA-Z_\d-]+)\-(\d+[a-zA-Z]{1,2})\-?([a-zA-Z_]*)?(?=\.)' def __init__(self, datadir: Path) -> None: self._datadir = datadir @classmethod def _get_file_extension(cls) -> str: """ Get file extension for this particular datahandler """ raise NotImplementedError() @classmethod def ohlcv_get_available_data( cls, datadir: Path, trading_mode: TradingMode) -> ListPairsWithTimeframes: """ Returns a list of all pairs with ohlcv data available in this datadir :param datadir: Directory to search for ohlcv files :param trading_mode: trading-mode to be used :return: List of Tuples of (pair, timeframe, CandleType) """ if trading_mode == TradingMode.FUTURES: datadir = datadir.joinpath('futures') _tmp = [ re.search( cls._OHLCV_REGEX, p.name ) for p in datadir.glob(f"*.{cls._get_file_extension()}")] return [ ( cls.rebuild_pair_from_filename(match[1]), cls.rebuild_timeframe_from_filename(match[2]),
CandleType.from_string(match[3])
7
2023-10-21 10:02:05+00:00
16k
yanzhh/HGERE
transformers/src/transformers/modeling_roberta.py
[ { "identifier": "RobertaConfig", "path": "transformers/src/transformers/configuration_roberta.py", "snippet": "class RobertaConfig(BertConfig):\n r\"\"\"\n This is the configuration class to store the configuration of an :class:`~transformers.RobertaModel`.\n It is used to instantiate an RoBERTa model according to the specified arguments, defining the model\n architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of\n the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture.\n\n Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used\n to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`\n for more information.\n\n The :class:`~transformers.RobertaConfig` class directly inherits :class:`~transformers.BertConfig`.\n It reuses the same defaults. Please check the parent class for more information.\n\n Example::\n\n from transformers import RobertaConfig, RobertaModel\n\n # Initializing a RoBERTa configuration\n configuration = RobertaConfig()\n\n # Initializing a model from the configuration\n model = RobertaModel(configuration)\n\n # Accessing the model configuration\n configuration = model.config\n\n Attributes:\n pretrained_config_archive_map (Dict[str, str]):\n A dictionary containing all the available pre-trained checkpoints.\n \"\"\"\n pretrained_config_archive_map = ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP\n model_type = \"roberta\"" }, { "identifier": "add_start_docstrings", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "add_start_docstrings_to_callable", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings_to_callable(*docstr):\n def docstring_decorator(fn):\n class_name = \":class:`~transformers.{}`\".format(fn.__qualname__.split(\".\")[0])\n intro = \" The {} forward method, overrides the :func:`__call__` special method.\".format(class_name)\n note = r\"\"\"\n\n .. note::\n Although the recipe for forward pass needs to be defined within\n this function, one should call the :class:`Module` instance afterwards\n instead of this since the former takes care of running the\n pre and post processing steps while the latter silently ignores them.\n \"\"\"\n fn.__doc__ = intro + note + \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "BertEmbeddings", "path": "transformers/src/transformers/modeling_bert.py", "snippet": "BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {\n\t\"bert-base-uncased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin\",\n\t\"bert-large-uncased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin\",\n\t\"bert-base-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin\",\n\t\"bert-large-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin\",\n\t\"bert-base-multilingual-uncased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin\",\n\t\"bert-base-multilingual-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin\",\n\t\"bert-base-chinese\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin\",\n\t\"bert-base-german-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin\",\n\t\"bert-large-uncased-whole-word-masking\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin\",\n\t\"bert-large-cased-whole-word-masking\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin\",\n\t\"bert-large-uncased-whole-word-masking-finetuned-squad\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin\",\n\t\"bert-large-cased-whole-word-masking-finetuned-squad\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin\",\n\t\"bert-base-cased-finetuned-mrpc\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin\",\n\t\"bert-base-german-dbmdz-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-pytorch_model.bin\",\n\t\"bert-base-german-dbmdz-uncased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-pytorch_model.bin\",\n\t\"bert-base-japanese\": \"https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-pytorch_model.bin\",\n\t\"bert-base-japanese-whole-word-masking\": \"https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-whole-word-masking-pytorch_model.bin\",\n\t\"bert-base-japanese-char\": \"https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-pytorch_model.bin\",\n\t\"bert-base-japanese-char-whole-word-masking\": \"https://s3.amazonaws.com/models.huggingface.co/bert/cl-tohoku/bert-base-japanese-char-whole-word-masking-pytorch_model.bin\",\n\t\"bert-base-finnish-cased-v1\": \"https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-cased-v1/pytorch_model.bin\",\n\t\"bert-base-finnish-uncased-v1\": \"https://s3.amazonaws.com/models.huggingface.co/bert/TurkuNLP/bert-base-finnish-uncased-v1/pytorch_model.bin\",\n\t\"bert-base-dutch-cased\": \"https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/pytorch_model.bin\",\n}\nACT2FN = {\"gelu\": gelu, \"relu\": torch.nn.functional.relu, \"swish\": swish, \"gelu_new\": gelu_new, \"mish\": mish}\nBERT_START_DOCSTRING = r\"\"\"\n\tThis model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.\n\tUse it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general\n\tusage and behavior.\n\n\tParameters:\n\t\tconfig (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.\n\t\t\tInitializing with a config file does not load the weights associated with the model, only the configuration.\n\t\t\tCheck out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.\n\"\"\"\nBERT_INPUTS_DOCSTRING = r\"\"\"\n\tArgs:\n\t\tinput_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):\n\t\t\tIndices of input sequence tokens in the vocabulary.\n\n\t\t\tIndices can be obtained using :class:`transformers.BertTokenizer`.\n\t\t\tSee :func:`transformers.PreTrainedTokenizer.encode` and\n\t\t\t:func:`transformers.PreTrainedTokenizer.encode_plus` for details.\n\n\t\t\t`What are input IDs? <../glossary.html#input-ids>`__\n\t\tattention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n\t\t\tMask to avoid performing attention on padding token indices.\n\t\t\tMask values selected in ``[0, 1]``:\n\t\t\t``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\n\t\t\t`What are attention masks? <../glossary.html#attention-mask>`__\n\t\ttoken_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n\t\t\tSegment token indices to indicate first and second portions of the inputs.\n\t\t\tIndices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``\n\t\t\tcorresponds to a `sentence B` token\n\n\t\t\t`What are token type IDs? <../glossary.html#token-type-ids>`_\n\t\tposition_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n\t\t\tIndices of positions of each input sequence tokens in the position embeddings.\n\t\t\tSelected in the range ``[0, config.max_position_embeddings - 1]``.\n\n\t\t\t`What are position IDs? <../glossary.html#position-ids>`_\n\t\thead_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):\n\t\t\tMask to nullify selected heads of the self-attention modules.\n\t\t\tMask values selected in ``[0, 1]``:\n\t\t\t:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.\n\t\tinputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n\t\t\tOptionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.\n\t\t\tThis is useful if you want more control over how to convert `input_ids` indices into associated vectors\n\t\t\tthan the model's internal embedding lookup matrix.\n\t\tencoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):\n\t\t\tSequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention\n\t\t\tif the model is configured as a decoder.\n\t\tencoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):\n\t\t\tMask to avoid performing attention on the padding token indices of the encoder input. This mask\n\t\t\tis used in the cross-attention if the model is configured as a decoder.\n\t\t\tMask values selected in ``[0, 1]``:\n\t\t\t``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n\"\"\"\ndef load_tf_weights_in_bert(model, config, tf_checkpoint_path):\ndef mish(x):\n\tdef __init__(self, config):\n\tdef forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):\n\tdef __init__(self, config):\n\tdef transpose_for_scores(self, x):\n\tdef forward(\n\t\tself,\n\t\thidden_states,\n\t\tattention_mask=None,\n\t\thead_mask=None,\n\t\tencoder_hidden_states=None,\n\t\tencoder_attention_mask=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(self, hidden_states, input_tensor):\n\tdef __init__(self, config):\n\tdef prune_heads(self, heads):\n\tdef forward(\n\t\tself,\n\t\thidden_states,\n\t\tattention_mask=None,\n\t\thead_mask=None,\n\t\tencoder_hidden_states=None,\n\t\tencoder_attention_mask=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(self, hidden_states):\n\tdef __init__(self, config):\n\tdef forward(self, hidden_states, input_tensor):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\thidden_states,\n\t\tattention_mask=None,\n\t\thead_mask=None,\n\t\tencoder_hidden_states=None,\n\t\tencoder_attention_mask=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\thidden_states,\n\t\tattention_mask=None,\n\t\thead_mask=None,\n\t\tencoder_hidden_states=None,\n\t\tencoder_attention_mask=None,\n\t\tfull_attention_mask=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(self, hidden_states):\n\tdef __init__(self, config):\n\tdef forward(self, hidden_states):\n\tdef __init__(self, config):\n\tdef forward(self, hidden_states):\n\tdef __init__(self, config):\n\tdef forward(self, sequence_output):\n\tdef __init__(self, config):\n\tdef forward(self, pooled_output):\n\tdef __init__(self, config):\n\tdef forward(self, sequence_output, pooled_output):\n\tdef _init_weights(self, module):\n\tdef __init__(self, config):\n\tdef get_input_embeddings(self):\n\tdef set_input_embeddings(self, value):\n\tdef _prune_heads(self, heads_to_prune):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tencoder_hidden_states=None,\n\t\tencoder_attention_mask=None,\n\t\tfull_attention_mask=None,\n\t):\n\tdef __init__(self, config):\n\tdef get_output_embeddings(self):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tmasked_lm_labels=None,\n\t\tnext_sentence_label=None,\n\t):\n\tdef __init__(self, config):\n\tdef get_output_embeddings(self):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tmasked_lm_labels=None,\n\t\tencoder_hidden_states=None,\n\t\tencoder_attention_mask=None,\n\t\tlm_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tnext_sentence_label=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tstart_positions=None,\n\t\tend_positions=None,\n\t):\n\tdef __init__(self, config):\n\tdef or_softmax_cross_entropy_loss_one_doc(self, logits, target, ignore_index=-1, dim=-1):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tstart_positions=None,\n\t\tend_positions=None,\n\t\tanswer_masks=None,\n\t):\n\tdef __init__(self, config):\n\tdef or_softmax_cross_entropy_loss_one_doc(self, logits, target, ignore_index=-1, dim=-1):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tstart_positions=None,\n\t\tend_positions=None,\n\t\tanswer_masks=None,\n\t\tsent_start_mapping=None,\n\t\tsent_end_mapping=None,\n\t\tsent_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tcand_positions=None,\n\t\tanswer_index=None,\n\t\tsent_start_mapping=None,\n\t\tsent_end_mapping=None,\n\t\tsent_labels=None,\n\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tcand_positions=None,\n\t\tanswer_index=None,\n\t\tinstance_mask=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tstart_positions=None,\n\t\tend_positions=None,\n\t\tswitch_labels=None,\n\t\tsent_start_mapping=None,\n\t\tsent_end_mapping=None,\n\t\tsent_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t\tmention_pos=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tm1_ner_labels=None,\n\t\tm2_ner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t\tsub_ner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,\n\t\t\t\tstart_positions=None, end_positions=None, answer_masks=None, answer_nums=None):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t\texample_L=None,\n\t\t# mention_pos=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t\tmention_pos=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t\tmention_pos=None,\n\t\tfull_attention_mask=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t\tmention_pos=None,\n\t\tfull_attention_mask=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(self, hidden_states):\n\tdef __init__(self, config):\n\tdef forward(self, sequence_output):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tmasked_lm_labels=None,\n\t\tencoder_hidden_states=None,\n\t\tencoder_attention_mask=None,\n\t\tlm_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t\texample_L=None,\n\t\t# mention_pos=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t\t# mention_pos=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\t# labels=None,\n\t\t# mention_pos=None,\n\t\tner_labels=None,\n\t\ttrigger_labels=None,\n\t\t# argument_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\t# labels=None,\n\t\t# mention_pos=None,\n\t\tsub_positions=None,\n\t\tner_labels=None,\n\t\ttrigger_labels=None,\n\t\targument_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t\tstart_positions=None,\n\t\tend_positions=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t):\n\tdef __init__(self, config, args=None):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tlabels=None,\n\t\tmention_pos=None,\n\t\tfull_attention_mask=None,\n\t):\n\tdef __init__(self, config, args=None):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\tlabels=None,\n\t\tner_labels=None,\n\t\tsub_ner_labels=None,\n\t):\n\tdef __init__(self, config, args=None):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\t# obj_positions=None,\n\t\trel_labels=None,\n\t\tner_labels=None,\n\t\tent_numbers=None,\n\t\t# sub_ner_labels=None,\n\t):\n\tdef __init__(self, config, args=None):\n\tdef forward(\n\t\tself,\n\t\tinput_ids=None,\n\t\tattention_mask=None,\n\t\tmentions=None,\n\t\ttoken_type_ids=None,\n\t\tposition_ids=None,\n\t\thead_mask=None,\n\t\tinputs_embeds=None,\n\t\tsub_positions=None,\n\t\t# obj_positions=None,\n\t\trel_labels=None, # bsz * max_ent_num * max_ent_num\n\t\tner_labels=None, # bsz * max_ent_num\n\t\tent_numbers=None,\n\t\t# sub_ner_labels=None,\n\t):\nclass BertEmbeddings(nn.Module):\nclass BertSelfAttention(nn.Module):\nclass BertSelfOutput(nn.Module):\nclass BertAttention(nn.Module):\nclass BertIntermediate(nn.Module):\nclass BertOutput(nn.Module):\nclass BertLayer(nn.Module):\nclass BertEncoder(nn.Module):\nclass BertPooler(nn.Module):\nclass BertPredictionHeadTransform(nn.Module):\nclass BertLMPredictionHead(nn.Module):\nclass BertOnlyMLMHead(nn.Module):\nclass BertOnlyNSPHead(nn.Module):\nclass BertPreTrainingHeads(nn.Module):\nclass BertPreTrainedModel(PreTrainedModel):\nclass BertModel(BertPreTrainedModel):\nclass BertForPreTraining(BertPreTrainedModel):\nclass BertForMaskedLM(BertPreTrainedModel):\nclass BertForNextSentencePrediction(BertPreTrainedModel):\nclass BertForSequenceClassification(BertPreTrainedModel):\nclass BertForSequenceClassification(BertPreTrainedModel):\nclass BertForMultipleChoice(BertPreTrainedModel):\nclass BertForTokenClassification(BertPreTrainedModel):\nclass BertForQuestionAnswering(BertPreTrainedModel):\nclass BertForTriviaQuestionAnswering(BertPreTrainedModel):\nclass BertForQuestionAnsweringHotpotSeg(BertPreTrainedModel):\nclass BertForWikihop(BertPreTrainedModel):\nclass BertForWikihopMulti(BertPreTrainedModel):\nclass BertForQuestionAnsweringHotpot(BertPreTrainedModel):\nclass BertForACEBothSub(BertPreTrainedModel):\nclass BertForACEBothOneDropoutSpanSub(BertPreTrainedModel):\nclass BertForACEBothOneDropoutSub(BertPreTrainedModel):\nclass BertForACEBothOneDropoutLeviPair(BertPreTrainedModel):\nclass BertForACEBothOneDropout(BertPreTrainedModel):\nclass BertForACEBothOneDropoutNERSub(BertPreTrainedModel):\nclass BertForACEBothLMSub(BertPreTrainedModel):\nclass BertForACEBothOneDropoutLMSub(BertPreTrainedModel):\nclass BertForMarkerQA(BertPreTrainedModel):\nclass BertForCorefSub(BertPreTrainedModel):\nclass BertForACEBothOneDropoutSubNoNer(BertPreTrainedModel):\nclass BertForACEBothSubNoNer(BertPreTrainedModel):\nclass BertForQuestionAnsweringMultiAnswer(BertPreTrainedModel):\nclass BertForNER(BertPreTrainedModel):\nclass BertForSpanNER(BertPreTrainedModel):\nclass BertForSpanMarkerNER(BertPreTrainedModel):\nclass BertForSpanMarkerBiNER(BertPreTrainedModel):\nclass BertLMPredictionHeadTransform(nn.Module):\nclass BertOnlyMLMHeadTransform(nn.Module):\nclass BertForMaskedLMTransform(BertPreTrainedModel):\nclass BertForLeftLMNER(BertPreTrainedModel):\nclass BertForRightLMNER(BertPreTrainedModel):\nclass BertForEvent(BertPreTrainedModel):\nclass BertForEventArg(BertPreTrainedModel):\nclass BertForMarkerSEQA(BertPreTrainedModel):\nclass BertForTACRED(BertPreTrainedModel):\nclass BertForTACREDNer(BertPreTrainedModel):\nclass BertForSpanMarkerNerPruner(BertPreTrainedModel):\nclass BertForPlmarker(BertPreTrainedModel):\nclass BertForBaselines(BertPreTrainedModel):\nclass BertForHyperGNN(BertPreTrainedModel):" }, { "identifier": "create_position_ids_from_input_ids", "path": "transformers/src/transformers/modeling_utils.py", "snippet": "def create_position_ids_from_input_ids(input_ids, padding_idx):\n \"\"\" Replace non-padding symbols with their position numbers. Position numbers begin at\n padding_idx+1. Padding symbols are ignored. This is modified from fairseq's\n `utils.make_positions`.\n\n :param torch.Tensor x:\n :return torch.Tensor:\n \"\"\"\n # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.\n mask = input_ids.ne(padding_idx).int()\n incremental_indicies = torch.cumsum(mask, dim=1).type_as(mask) * mask\n return incremental_indicies.long() + padding_idx" } ]
import logging import torch import torch.nn as nn from torch.nn import CrossEntropyLoss, MSELoss from .configuration_roberta import RobertaConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_bert import BertEmbeddings, BertLayerNorm, BertModel, BertPreTrainedModel, gelu, BertModel from .modeling_utils import create_position_ids_from_input_ids
10,831
config_class = RobertaConfig pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "roberta" def __init__(self, config): super().__init__(config) self.embeddings = RobertaEmbeddings(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING) class RobertaForMaskedLM(BertPreTrainedModel): config_class = RobertaConfig pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "roberta" def __init__(self, config): super().__init__(config) self.roberta = RobertaModel(config) self.lm_head = RobertaLMHead(config) self.init_weights() def get_output_embeddings(self): return self.lm_head.decoder @add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None, ): r""" masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs: masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`) Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: from transformers import RobertaTokenizer, RobertaForMaskedLM import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForMaskedLM.from_pretrained('roberta-base') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here if masked_lm_labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) outputs = (masked_lm_loss,) + outputs return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) class RobertaLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features)
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch RoBERTa model. """ # from .modeling_ensemblebert import EnsembleBertModel logger = logging.getLogger(__name__) ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = { "roberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-pytorch_model.bin", "roberta-large": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-pytorch_model.bin", "roberta-large-mnli": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-pytorch_model.bin", "distilroberta-base": "https://s3.amazonaws.com/models.huggingface.co/bert/distilroberta-base-pytorch_model.bin", "roberta-base-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-openai-detector-pytorch_model.bin", "roberta-large-openai-detector": "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-openai-detector-pytorch_model.bin", } class RobertaEmbeddings(BertEmbeddings): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__(config) self.padding_idx = 1 self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx) self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) return super().forward( input_ids, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds ) def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. :param torch.Tensor inputs_embeds: :return torch.Tensor: """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) ROBERTA_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.RobertaConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ROBERTA_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.RobertaTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings( "The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.", ROBERTA_START_DOCSTRING, ) class RobertaModel(BertModel): """ This class overrides :class:`~transformers.BertModel`. Please check the superclass for the appropriate documentation alongside usage examples. """ config_class = RobertaConfig pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "roberta" def __init__(self, config): super().__init__(config) self.embeddings = RobertaEmbeddings(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value @add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """, ROBERTA_START_DOCSTRING) class RobertaForMaskedLM(BertPreTrainedModel): config_class = RobertaConfig pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP base_model_prefix = "roberta" def __init__(self, config): super().__init__(config) self.roberta = RobertaModel(config) self.lm_head = RobertaLMHead(config) self.init_weights() def get_output_embeddings(self): return self.lm_head.decoder @add_start_docstrings_to_callable(ROBERTA_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None, ): r""" masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs: masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``: Masked language modeling loss. prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`) Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Examples:: from transformers import RobertaTokenizer, RobertaForMaskedLM import torch tokenizer = RobertaTokenizer.from_pretrained('roberta-base') model = RobertaForMaskedLM.from_pretrained('roberta-base') input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 outputs = model(input_ids, masked_lm_labels=input_ids) loss, prediction_scores = outputs[:2] """ outputs = self.roberta( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here if masked_lm_labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) outputs = (masked_lm_loss,) + outputs return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions) class RobertaLMHead(nn.Module): """Roberta Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features)
x = gelu(x)
3
2023-10-15 02:31:09+00:00
16k
generative-skill-chaining/gsc-code
generative_skill_chaining/envs/pybullet/table/primitives.py
[ { "identifier": "base", "path": "generative_skill_chaining/envs/base.py", "snippet": "class Primitive:\nclass Env(gym.Env[np.ndarray, np.ndarray]):\nclass PrimitiveEnv(Env):\n class Scope:\n def __init__(self, env: \"Env\", idx_policy: int):\n def env(self) -> \"Env\":\n def idx_policy(self) -> int:\n def set_idx_policy(self, idx_policy: int) -> None:\n def get_policy_args(self) -> Optional[Dict[str, List[int]]]:\n def scale_action(cls, action: np.ndarray) -> np.ndarray:\n def normalize_action(cls, action: np.ndarray) -> np.ndarray:\n def sample(self) -> np.ndarray:\n def __str__(self) -> str:\n def action_space(self) -> gym.spaces.Box: # type: ignore\n def action_scale(self) -> gym.spaces.Box:\n def action_skeleton(self) -> Sequence[Primitive]:\n def primitives(self) -> List[str]:\n def get_primitive(self) -> Primitive:\n def set_primitive(\n self,\n primitive: Optional[Primitive] = None,\n action_call: Optional[str] = None,\n idx_policy: Optional[int] = None,\n policy_args: Optional[Any] = None,\n ) -> \"Env\":\n def get_primitive_info(\n self,\n action_call: Optional[str] = None,\n idx_policy: Optional[int] = None,\n policy_args: Optional[Any] = None,\n ) -> Primitive:\n def create_primitive_env(self, primitive: Primitive) -> \"Env\":\n def get_state(self) -> np.ndarray:\n def set_state(self, state: np.ndarray) -> bool:\n def get_observation(self, image: Optional[bool] = None) -> np.ndarray:\n def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, bool, dict]:\n def reset(\n self, *, seed: Optional[int] = None, options: Optional[dict] = None\n ) -> Tuple[np.ndarray, dict]:\n def render(self) -> np.ndarray: # type: ignore\n def record_start(\n self,\n prepend_id: Optional[Any] = None,\n frequency: Optional[int] = None,\n mode: str = \"default\",\n ) -> bool:\n def record_stop(self, save_id: Optional[Any] = None, mode: str = \"default\") -> bool:\n def record_save(\n self,\n path: Union[str, pathlib.Path],\n reset: bool = True,\n mode: Optional[str] = None,\n ) -> bool:\n def __init__(self, primitive_env: \"PrimitiveEnv\"):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def __init__(self, env: Env, primitive: Primitive):\n def get_primitive(self) -> Primitive:\n def set_primitive(\n self,\n primitive: Optional[Primitive] = None,\n action_call: Optional[str] = None,\n idx_policy: Optional[int] = None,\n policy_args: Optional[Any] = None,\n ) -> Env:\n def get_primitive_info(\n self,\n action_call: Optional[str] = None,\n idx_policy: Optional[int] = None,\n policy_args: Optional[Any] = None,\n ) -> Primitive:\n def create_primitive_env(self, primitive: Primitive) -> Env:\n def get_state(self) -> np.ndarray:\n def set_state(self, state: np.ndarray) -> bool:\n def get_observation(self, image: Optional[bool] = None) -> np.ndarray:\n def reset(\n self, *, seed: Optional[int] = None, options: Optional[dict] = None\n ) -> Tuple[np.ndarray, dict]:\n def step(self, action: np.ndarray) -> Tuple[np.ndarray, float, bool, bool, dict]:\n def render(self) -> np.ndarray: # type: ignore\n def record_start(\n self,\n prepend_id: Optional[Any] = None,\n frequency: Optional[int] = None,\n mode: str = \"default\",\n ) -> bool:\n def record_stop(self, save_id: Optional[Any] = None, mode: str = \"default\") -> bool:\n def record_save(\n self,\n path: Union[str, pathlib.Path],\n reset: bool = True,\n mode: Optional[str] = None,\n ) -> bool:" }, { "identifier": "math", "path": "generative_skill_chaining/envs/pybullet/sim/math.py", "snippet": "PYBULLET_STEPS_PER_SEC = 240\nPYBULLET_TIMESTEP = 1 / PYBULLET_STEPS_PER_SEC\nclass Pose:\n def from_eigen(pose: eigen.Isometry3d) -> \"Pose\":\n def to_eigen(self) -> eigen.Isometry3d:\ndef comb(n: int, r: int) -> int:" }, { "identifier": "ControlException", "path": "generative_skill_chaining/envs/pybullet/sim/robot.py", "snippet": "class ControlException(Exception):\n \"\"\"An exception raised due to a control fault (e.g. reaching singularity).\"\"\"\n\n pass" }, { "identifier": "Robot", "path": "generative_skill_chaining/envs/pybullet/sim/robot.py", "snippet": "class Robot(body.Body):\n \"\"\"User-facing robot interface.\"\"\"\n\n def __init__(\n self,\n physics_id: int,\n step_simulation_fn: Callable[[], None],\n urdf: str,\n arm_class: Union[str, Type[arm.Arm]],\n arm_kwargs: Dict[str, Any],\n gripper_class: Union[str, Type[gripper.Gripper]],\n gripper_kwargs: Dict[str, Any],\n ):\n \"\"\"Loads the robot from a urdf file.\n\n Args:\n physics_id: Pybullet physics client id.\n step_simulation_fn: Function to step simulation.\n urdf: Path to urdf.\n arm_class: In the generative_skill_chaining.envs.pybullet namespace.\n arm_kwargs: Arm kwargs from yaml config.\n gripper_class: In the generative_skill_chaining.envs.pybullet namespace.\n gripper_kwargs: Gripper kwargs from yaml config.\n \"\"\"\n body_id = p.loadURDF(\n fileName=urdf,\n useFixedBase=True,\n flags=p.URDF_USE_INERTIA_FROM_FILE\n | p.URDF_MAINTAIN_LINK_ORDER, # | p.URDF_MERGE_FIXED_LINKS\n physicsClientId=physics_id,\n )\n super().__init__(physics_id, body_id)\n\n if isinstance(arm_class, str):\n arm_class = configs.get_class(arm_class, pybullet)\n if isinstance(gripper_class, str):\n gripper_class = configs.get_class(gripper_class, pybullet)\n\n self._arm = arm_class(self.physics_id, self.body_id, **arm_kwargs)\n T_world_to_ee = dyn.cartesian_pose(self.arm.ab).inverse()\n self._gripper = gripper_class(\n self.physics_id, self.body_id, T_world_to_ee, **gripper_kwargs\n )\n\n self.step_simulation = step_simulation_fn\n\n @property\n def arm(self) -> arm.Arm:\n \"\"\"Controllable arm.\"\"\"\n return self._arm\n\n @property\n def gripper(self) -> gripper.Gripper:\n \"\"\"Controllable gripper.\"\"\"\n return self._gripper\n\n @property\n def home_pose(self) -> math.Pose:\n return self.arm.home_pose\n\n def reset(self) -> bool:\n \"\"\"Resets the robot by setting the arm to its home configuration and the gripper to the open position.\n\n This method disables torque control and bypasses simulation.\n \"\"\"\n self.gripper.reset()\n self.clear_load()\n status = self.arm.reset()\n if isinstance(self.arm, real.arm.Arm):\n status = self.goto_configuration(self.arm.q_home)\n return status\n\n def clear_load(self) -> None:\n \"\"\"Resets the end-effector load to the gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n self.arm.ab.replace_load(self.gripper.inertia)\n else:\n self.arm.ab.clear_load()\n\n def set_load(self, inertia: dyn.SpatialInertiad) -> None:\n \"\"\"Sets the end-effector load to the sum of the given inertia and gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n inertia = inertia + self.gripper.inertia\n self.arm.ab.replace_load(inertia)\n\n def get_state(self) -> Dict[str, Any]:\n return {\n \"arm\": self.arm.get_state(),\n \"gripper\": self.gripper.get_state(),\n \"load\": copy.deepcopy(self.arm.ab.inertia_load),\n }\n\n def set_state(self, state: Dict[str, Any]) -> None:\n self.arm.set_state(state[\"arm\"])\n self.gripper.set_state(state[\"gripper\"])\n idx_link, load_inertia = next(iter(state[\"load\"].items()))\n self.arm.ab.replace_load(load_inertia, idx_link)\n\n def goto_home(self) -> bool:\n \"\"\"Uses opspace control to go to the home position.\"\"\"\n return self.goto_pose(\n self.home_pose.pos,\n self.home_pose.quat,\n pos_gains=(64, 16),\n ori_gains=(64, 16),\n )\n\n def _is_colliding(\n self, body_id_a: int, body_id_b: int, link_id_a: Optional[int] = None\n ) -> bool:\n kwargs = {}\n if link_id_a is not None:\n kwargs[\"linkIndexA\"] = link_id_a\n contacts = p.getContactPoints(\n bodyA=body_id_a, bodyB=body_id_b, physicsClientId=self.physics_id, **kwargs\n )\n\n if not contacts:\n return False\n\n force = contacts[0][9]\n return force > 0.0\n\n def goto_pose(\n self,\n pos: Optional[np.ndarray] = None,\n quat: Optional[Union[eigen.Quaterniond, np.ndarray]] = None,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n ori_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n check_collisions: Sequence[int] = [],\n check_collision_freq: int = 10,\n ) -> bool:\n \"\"\"Uses opspace control to go to the desired pose.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n pos: Optional position. Maintains current position if None.\n quat: Optional quaternion. Maintains current orientation if None.\n pos_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n ori_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n timeout: Uses the timeout specified in the yaml arm config if None.\n check_collisions: Raise an exception if the gripper or grasped\n object collides with any of the body_ids in this list.\n check_collision_freq: Iteration interval with which to check\n collisions.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n if check_collisions:\n body_ids_a = [self.body_id] * len(self.gripper.finger_links)\n link_ids_a: List[Optional[int]] = list(self.gripper.finger_links)\n grasp_body_id = self.gripper._gripper_state.grasp_body_id\n if grasp_body_id is not None:\n body_ids_a.append(grasp_body_id)\n link_ids_a.append(None)\n\n # Set the pose goal.\n self.arm.set_pose_goal(pos, quat, pos_gains, ori_gains, timeout)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter = 0\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter += 1\n\n if isinstance(self.arm, real.arm.Arm):\n continue\n\n if not check_collisions or iter % check_collision_freq != 0:\n continue\n\n # Terminate early if there are collisions with the gripper fingers\n # or grasped object.\n for body_id_a, link_id_a in zip(body_ids_a, link_ids_a):\n for body_id_b in check_collisions:\n if self._is_colliding(body_id_a, body_id_b, link_id_a):\n raise ControlException(\n f\"Robot.goto_pose({pos}, {quat}): Collision {body_id_a}:{link_id_a}, {body_id_b}\"\n )\n # print(\"Robot.goto_pose:\", pos, quat, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.goto_pose({pos}, {quat}): Singularity\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def goto_configuration(self, q: np.ndarray) -> bool:\n \"\"\"Sets the robot to the desired joint configuration.\n\n Args:\n q: Joint configuration.\n Returns:\n True if the controller converges to the desired position or zero\n velocity, false if the command times out.\n \"\"\"\n # Set the configuration goal.\n self.arm.set_configuration_goal(q)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp(\n self,\n command: float,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n ) -> bool:\n \"\"\"Sets the gripper to the desired grasp (0.0 open, 1.0 closed).\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Any existing grasp constraints will be cleared and no new ones will be\n created. Use `Robot.grasp_object()` to create a grasp constraint.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n # Clear any existing grasp constraints.\n self.gripper.remove_grasp_constraint()\n self.clear_load()\n\n # Set the new grasp command.\n self.gripper.set_grasp(command, pos_gains, timeout)\n\n # Simulate until the grasp command finishes.\n status = self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.arm.update_torques()\n self.step_simulation()\n status = self.gripper.update_torques()\n # print(\"Robot.grasp:\", command, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.grasp({command})\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp_object(\n self,\n obj: body.Body,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n realistic: bool = True,\n ) -> bool:\n \"\"\"Attempts to grasp an object and attaches the object to the gripper via a pose constraint.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n realistic: If false, creates a pose constraint regardless of whether\n the object is in a secure grasp.\n Returns:\n True if the object is successfully grasped, false otherwise.\n \"\"\"\n if realistic:\n self.grasp(1, pos_gains, timeout)\n\n # Wait for grasped object to settle.\n status = self.gripper.update_torques()\n while (\n status\n in (\n articulated_body.ControlStatus.VEL_CONVERGED,\n articulated_body.ControlStatus.IN_PROGRESS,\n )\n and self.gripper._gripper_state.iter_timeout >= 0\n and (obj.twist() > 0.001).any()\n ):\n self.arm.update_torques()\n status = self.gripper.update_torques()\n self.step_simulation()\n\n # Make sure fingers aren't fully closed.\n if status == articulated_body.ControlStatus.POS_CONVERGED:\n return False\n\n # Lock the object in place with a grasp constraint.\n if not self.gripper.create_grasp_constraint(obj.body_id, realistic):\n return False\n\n # Add object load.\n T_obj_to_world = obj.pose().to_eigen()\n T_ee_to_world = dyn.cartesian_pose(self.arm.ab)\n T_obj_to_ee = T_ee_to_world.inverse() * T_obj_to_world\n self.set_load(obj.inertia * T_obj_to_ee)\n\n return True" }, { "identifier": "Box", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Box(Object):\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 0.1,\n ):\n box = shapes.Box(size=np.array(size), mass=mass, color=np.array(color))\n body_id = shapes.create_body(box, physics_id=physics_id)\n self._shape = box\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = box.size\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return [self._shape]" }, { "identifier": "Hook", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Hook(Object):\n @staticmethod\n def compute_link_positions(\n head_length: float, handle_length: float, handle_y: float, radius: float\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n dy = (\n 0.5\n * np.sign(handle_y)\n * max(0, (abs(handle_y) - 1.0) * head_length / 2 + radius)\n )\n pos_handle = np.array([-radius / 2, handle_y * head_length / 2 - dy, 0.0])\n pos_head = np.array([(handle_length - radius) / 2, -dy, 0.0])\n pos_joint = np.array(\n [(handle_length - radius) / 2, handle_y * head_length / 2 - dy, 0.0]\n )\n\n return pos_handle, pos_head, pos_joint\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n head_length: float,\n handle_length: float,\n handle_y: float,\n color: Union[List[float], np.ndarray],\n radius: float = 0.02,\n mass: float = 0.1,\n ):\n if not isinstance(color, np.ndarray):\n color = np.array(color)\n\n pos_handle, pos_head, pos_joint = Hook.compute_link_positions(\n head_length=head_length,\n handle_length=handle_length,\n handle_y=handle_y,\n radius=radius,\n )\n handle = shapes.Cylinder(\n radius=radius,\n length=handle_length,\n mass=(handle_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_handle,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([0.0, 1.0, 0.0]))\n ).coeffs,\n ),\n )\n head = shapes.Cylinder(\n radius=radius,\n length=head_length,\n mass=(head_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_head,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([1.0, 0.0, 0.0]))\n ).coeffs,\n ),\n )\n joint = shapes.Sphere(\n radius=radius,\n mass=(radius / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(pos=pos_joint),\n )\n self._shapes = [joint, handle, head]\n body_id = shapes.create_body(\n self.shapes, link_parents=[0, 0], physics_id=physics_id\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.head_length = head_length\n self._state.handle_length = handle_length\n self._state.handle_y = handle_y\n self._radius = radius\n\n self._size = np.array(\n [handle_length + radius, head_length + 2 * abs(pos_head[1]), 2 * radius]\n )\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def head_length(self) -> float:\n return self._state.head_length # type: ignore\n\n @property\n def handle_length(self) -> float:\n return self._state.handle_length # type: ignore\n\n @property\n def handle_y(self) -> float:\n return self._state.handle_y # type: ignore\n\n @property\n def radius(self) -> float:\n return self._radius\n\n @property\n def size(self) -> np.ndarray:\n return self._size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the convex hulls of the handle and head links.\"\"\"\n handle_pose = self.shapes[1].pose\n head_pose = self.shapes[2].pose\n assert handle_pose is not None and head_pose is not None\n\n positions = np.array(\n [\n [0.0, handle_pose.pos[1], 0.0],\n [head_pose.pos[0], 0.0, 0.0],\n ]\n )\n sizes = np.array(\n [\n [self.size[0], 2 * self.radius, 2 * self.radius],\n [2 * self.radius, self.size[1], 2 * self.radius],\n ]\n )\n bboxes = np.array([positions - 0.5 * sizes, positions + 0.5 * sizes]).swapaxes(\n 0, 1\n )\n\n pose = self.pose() if world_frame else None\n vertices = [compute_bbox_vertices(bbox, pose, project_2d) for bbox in bboxes]\n\n return vertices\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes\n\n # def aabb(self) -> np.ndarray:\n # raise NotImplementedError" }, { "identifier": "Rack", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Rack(Object):\n TOP_THICKNESS = 0.01\n LEG_THICKNESS = 0.01\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 1.0,\n ):\n mass /= 7 # Divide mass among all 7 parts.\n top = shapes.Box(\n size=np.array([*size[:2], Rack.TOP_THICKNESS]),\n mass=mass,\n color=np.array(color),\n pose=math.Pose(\n pos=np.array([0.0, 0.0, -Rack.TOP_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n xy_legs = np.array([(x, y) for x in (-1, 1) for y in (-1, 1)]) * (\n (np.array(size[:2])[None, :] - Rack.LEG_THICKNESS) / 2\n )\n legs = [\n shapes.Box(\n size=np.array(\n [\n Rack.LEG_THICKNESS,\n Rack.LEG_THICKNESS,\n size[2] - Rack.TOP_THICKNESS - Rack.LEG_THICKNESS,\n ]\n ),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array(\n [\n *xy_leg,\n -(size[2] + Rack.TOP_THICKNESS - Rack.LEG_THICKNESS) / 2,\n ]\n ),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for xy_leg in xy_legs\n ]\n stabilizers = [\n shapes.Box(\n size=np.array([size[0], Rack.LEG_THICKNESS, Rack.LEG_THICKNESS]),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array([0.0, y_leg, -size[2] + Rack.LEG_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for y_leg in xy_legs[:2, 1]\n ]\n self._shapes = [top, *legs, *stabilizers]\n body_id = shapes.create_body(\n self.shapes,\n link_parents=[0] * (len(legs) + len(stabilizers)),\n physics_id=physics_id,\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = np.array(size)\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n self._bbox[0, 2] = -size[2]\n self._bbox[1, 2] = 0\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes" }, { "identifier": "Null", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Null(Object):\n def __init__(self, physics_id: int, name: str):\n sphere = shapes.Sphere(radius=0.001)\n body_id = shapes.create_body(sphere, physics_id=physics_id)\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=True\n )\n\n def state(self) -> object_state.ObjectState:\n # Null object state is a zero vector.\n return self._state\n\n def enable_collisions(self) -> None:\n pass\n\n def unfreeze(self) -> bool:\n return False" }, { "identifier": "Object", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Object(body.Body):\n name: str\n is_static: bool = False\n\n def __init__(\n self, physics_id: int, body_id: int, name: str, is_static: bool = False\n ):\n super().__init__(physics_id, body_id)\n\n self.name = name\n self.is_static = is_static\n\n T_pybullet_to_obj = super().pose().to_eigen()\n self._modified_axes = not T_pybullet_to_obj.is_approx(\n eigen.Isometry3d.identity()\n )\n if self._modified_axes:\n self._T_pybullet_to_obj = T_pybullet_to_obj\n self._T_obj_to_pybullet = T_pybullet_to_obj.inverse()\n\n self._state = object_state.ObjectState()\n\n def pose(self) -> math.Pose:\n if not self._modified_axes:\n return super().pose()\n\n return math.Pose.from_eigen(super().pose().to_eigen() * self._T_obj_to_pybullet)\n\n def set_pose(self, pose: math.Pose) -> None:\n if not self._modified_axes:\n return super().set_pose(pose)\n\n return super().set_pose(\n math.Pose.from_eigen(pose.to_eigen() * self._T_pybullet_to_obj)\n )\n\n def disable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 0, 0, physicsClientId=self.physics_id\n )\n\n def enable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 1, 0xFF, physicsClientId=self.physics_id\n )\n\n @property\n def inertia(self) -> dyn.SpatialInertiad:\n try:\n return self._obj_inertia # type: ignore\n except AttributeError:\n pass\n\n self._obj_inertia = super().inertia\n if self._modified_axes:\n self._obj_inertia = self._obj_inertia * self._T_pybullet_to_obj\n\n T_world_to_obj = self.pose().to_eigen().inverse()\n for link_id in range(self.dof):\n link = body.Link(self.physics_id, self.body_id, link_id)\n T_link_to_obj = T_world_to_obj * link.pose().to_eigen()\n self._obj_inertia += link.inertia * T_link_to_obj\n\n return self._obj_inertia\n\n def state(self) -> object_state.ObjectState:\n pose = self.pose()\n aa = eigen.AngleAxisd(eigen.Quaterniond(pose.quat))\n self._state.pos = pose.pos\n self._state.aa = aa.angle * aa.axis\n\n return self._state\n\n def set_state(self, state: object_state.ObjectState) -> None:\n self.set_pose(state.pose())\n\n def reset(self, action_skeleton: List) -> None:\n pass\n\n @classmethod\n def create(\n cls,\n physics_id: int,\n object_type: Optional[str],\n object_kwargs: Dict[str, Any] = {},\n object_groups: Dict[str, \"ObjectGroup\"] = {},\n **kwargs,\n ) -> \"Object\":\n object_class = Null if object_type is None else globals()[object_type]\n if issubclass(object_class, Variant):\n kwargs[\"object_groups\"] = object_groups\n object_kwargs = object_kwargs.copy()\n object_kwargs.update(kwargs)\n return object_class(physics_id=physics_id, **object_kwargs)\n\n def isinstance(self, class_or_tuple: Union[type, Tuple[type, ...]]) -> bool:\n return isinstance(self, class_or_tuple)\n\n def type(self) -> Type[\"Object\"]:\n return type(self)\n\n @property\n def size(self) -> np.ndarray:\n raise NotImplementedError\n\n @property\n def bbox(self) -> np.ndarray:\n \"\"\"Returns the bounding box in the object frame.\n\n If the origin of the object is at its geometric center, this will be\n equivalent to `(-0.5 * self.size, 0.5 * self.size)`.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n raise NotImplementedError\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the object's convex hull.\n\n These hulls will be used for rough collision checking. By default,\n the vertices will be the 6 corners of the object's bounding box\n (`Object.bbox`).\n\n Args:\n world_frame: Whether to transform the vertices in world frame or\n leave them in object frame.\n project_2d: Whether to return the 2d convex hull.\n\n Returns:\n List of arrays of shape [_, 3] or [_, 2], where each array is a\n convex hull.\n \"\"\"\n pose = self.pose() if world_frame else None\n vertices = compute_bbox_vertices(self.bbox, pose, project_2d)\n\n return [vertices]\n\n def aabb(self) -> np.ndarray:\n \"\"\"Computes the axis-aligned bounding box from the object pose and size.\n\n This should be more accurate than `super().aabb()`, which gets the aabb\n from Pybullet. Pybullet returns an *enlarged* aabb for the object *base*\n link, while this returns the exact aabb for the entire object.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n vertices = np.concatenate(self.convex_hulls(world_frame=True), axis=0)\n xyz_min = vertices.min(axis=0)\n xyz_max = vertices.max(axis=0)\n\n return np.array([xyz_min, xyz_max])\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return []\n\n def __str__(self) -> str:\n return self.name\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n def __eq__(self, other) -> bool:\n return str(self) == str(other)" }, { "identifier": "object_state", "path": "generative_skill_chaining/envs/pybullet/table/object_state.py", "snippet": "class ObjectState:\n RANGES = {\n \"x\": (-0.3, 0.9),\n \"y\": (-0.5, 0.5),\n \"z\": (-0.1, 0.5),\n \"wx\": (-np.pi, np.pi),\n \"wy\": (-np.pi, np.pi),\n \"wz\": (-np.pi, np.pi),\n \"box_size_x\": (0.0, 0.1),\n \"box_size_y\": (0.0, 0.1),\n \"box_size_z\": (0.0, 0.2),\n \"head_length\": (0.0, 0.3),\n \"handle_length\": (0.0, 0.5),\n \"handle_y\": (-1.0, 1.0),\n }\n def __init__(self, vector: Optional[np.ndarray] = None):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def aa(self) -> np.ndarray:\n def aa(self, aa: np.ndarray) -> None:\n def box_size(self) -> np.ndarray:\n def box_size(self, box_size: np.ndarray) -> None:\n def head_length(self) -> Union[float, np.ndarray]:\n def head_length(self, head_length: Union[float, np.ndarray]) -> None:\n def handle_length(self) -> Union[float, np.ndarray]:\n def handle_length(self, handle_length: Union[float, np.ndarray]) -> None:\n def handle_y(self) -> Union[float, np.ndarray]:\n def handle_y(self, handle_y: Union[float, np.ndarray]) -> None:\n def range(cls) -> np.ndarray:\n def pose(self) -> math.Pose:\n def set_pose(self, pose: math.Pose) -> None:\n def __repr__(self) -> str:" }, { "identifier": "utils", "path": "generative_skill_chaining/envs/pybullet/table/utils.py", "snippet": "TABLE_CONSTRAINTS = {\n \"table_z_max\": 0.00,\n \"table_x_min\": 0.28,\n \"table_y_min\": -0.45,\n \"table_y_max\": 0.45,\n \"workspace_x_min\": 0.40,\n \"operational_x_min\": 0.50,\n \"operational_x_max\": 0.60,\n \"obstruction_x_min\": 0.575,\n \"workspace_radius\": 0.7,\n}\nEPSILONS = {\"aabb\": 0.01, \"align\": 0.99, \"twist\": 0.001, \"tipping\": 0.1}\nTWIST_HISTORY: Dict[str, Dict[Object, np.ndarray]] = collections.defaultdict(dict)\ndef compute_margins(obj: Object) -> np.ndarray:\ndef compute_object_pose(obj: Object, theta: float) -> math.Pose:\ndef is_above(obj_a: Object, obj_b: Object) -> bool:\ndef is_upright(obj: Object) -> bool:\ndef is_within_distance(\n obj_a: Object, obj_b: Object, distance: float, physics_id: int\n) -> bool:\ndef is_moving(obj: Object, use_history: Optional[str] = None) -> bool:\ndef is_below_table(obj: Object) -> bool:\ndef is_touching(\n body_a: body.Body,\n body_b: body.Body,\n link_id_a: Optional[int] = None,\n link_id_b: Optional[int] = None,\n) -> bool:\ndef is_intersecting(obj_a: Object, obj_b: Object) -> bool:\ndef is_under(obj_a: Object, obj_b: Object) -> bool:\ndef is_inworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef is_beyondworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef load_config(config: Union[str, Any]) -> Any:" }, { "identifier": "primitive_actions", "path": "generative_skill_chaining/envs/pybullet/table/primitive_actions.py", "snippet": "class PrimitiveAction:\nclass PickAction(PrimitiveAction):\nclass PlaceAction(PrimitiveAction):\nclass PullAction(PrimitiveAction):\nclass PushAction(PrimitiveAction):\n RANGES: Dict[str, Tuple[float, float]]\n RANGES = {\n \"x\": (-0.2, 0.2),\n \"y\": (-0.1, 0.1),\n \"z\": (-0.05, 0.05),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"x\": (-1.0, 1.0),\n \"y\": (-1.0, 1.0),\n \"z\": (0.0, 0.1),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.2, 0.0),\n \"r_pull\": (-0.4, -0.1),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.4, -0.2),\n \"r_push\": (0.1, 0.4),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n def __init__(self, vector: Optional[np.ndarray] = None):\n def range(cls) -> np.ndarray:\n def random(cls):\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_pull: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_pull(self) -> np.ndarray:\n def r_pull(self, r_pull: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_push: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_push(self) -> np.ndarray:\n def r_push(self, r_push: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:" } ]
import abc import random import gym import numpy as np import symbolic from typing import Callable, Dict, List, Optional, NamedTuple, Type from ctrlutils import eigen from generative_skill_chaining.envs import base as envs from generative_skill_chaining.envs.pybullet.sim import math from generative_skill_chaining.envs.pybullet.sim.robot import ControlException, Robot from generative_skill_chaining.envs.pybullet.table.objects import Box, Hook, Rack, Null, Object from generative_skill_chaining.envs.pybullet.table import ( object_state, utils, primitive_actions, ) from generative_skill_chaining.envs.pybullet.table_env import TableEnv from generative_skill_chaining.envs.pybullet.table_env import TableEnv from generative_skill_chaining.envs.pybullet.table_env import TableEnv from generative_skill_chaining.envs.pybullet.table_env import TableEnv from generative_skill_chaining.envs.pybullet.table_env import TableEnv from generative_skill_chaining.envs.pybullet.table_env import TableEnv
11,174
dbprint = lambda *args: None # noqa # dbprint = print ACTION_CONSTRAINTS = {"max_lift_height": 0.4, "max_lift_radius": 0.7} def compute_top_down_orientation( theta: float, quat_obj: eigen.Quaterniond = eigen.Quaterniond.identity() ) -> eigen.Quaterniond: """Computes the top-down orientation of the end-effector with respect to a target object. Args: theta: Angle of the gripper about the world z-axis wrt the target object. quat_obj: Orientation of the target object. """ command_aa = eigen.AngleAxisd(theta, np.array([0.0, 0.0, 1.0])) command_quat = quat_obj * eigen.Quaterniond(command_aa) return command_quat def did_object_move( obj: Object, old_pose: math.Pose, max_delta_xyz: float = 0.05, max_delta_theta: float = 5.0 * np.pi / 180, ) -> bool: """Checks if the object has moved significantly from its old pose.""" new_pose = obj.pose() T_old_to_world = old_pose.to_eigen() T_new_to_world = new_pose.to_eigen() T_new_to_old = T_old_to_world.inverse() * T_new_to_world delta_xyz = float(np.linalg.norm(T_new_to_old.translation)) delta_theta = eigen.AngleAxisd(eigen.Quaterniond(T_new_to_old.linear)).angle return delta_xyz >= max_delta_xyz or delta_theta >= max_delta_theta def initialize_robot_pose(robot: Robot) -> bool: x_min, x_max = ( utils.TABLE_CONSTRAINTS["table_x_min"], ACTION_CONSTRAINTS["max_lift_radius"], ) y_min = utils.TABLE_CONSTRAINTS["table_y_min"] y_max = utils.TABLE_CONSTRAINTS["table_y_max"] xy_min = np.array([x_min, y_min]) xy_max = np.array([x_max, y_max]) while True: xy = np.random.uniform(xy_min, xy_max) if np.linalg.norm(xy) < ACTION_CONSTRAINTS["max_lift_radius"]: break theta = np.random.uniform(*object_state.ObjectState.RANGES["wz"]) pos = np.append(xy, ACTION_CONSTRAINTS["max_lift_height"]) aa = eigen.AngleAxisd(theta, np.array([0.0, 0.0, 1.0])) quat = eigen.Quaterniond(aa) try: robot.goto_pose(pos, quat)
dbprint = lambda *args: None # noqa # dbprint = print ACTION_CONSTRAINTS = {"max_lift_height": 0.4, "max_lift_radius": 0.7} def compute_top_down_orientation( theta: float, quat_obj: eigen.Quaterniond = eigen.Quaterniond.identity() ) -> eigen.Quaterniond: """Computes the top-down orientation of the end-effector with respect to a target object. Args: theta: Angle of the gripper about the world z-axis wrt the target object. quat_obj: Orientation of the target object. """ command_aa = eigen.AngleAxisd(theta, np.array([0.0, 0.0, 1.0])) command_quat = quat_obj * eigen.Quaterniond(command_aa) return command_quat def did_object_move( obj: Object, old_pose: math.Pose, max_delta_xyz: float = 0.05, max_delta_theta: float = 5.0 * np.pi / 180, ) -> bool: """Checks if the object has moved significantly from its old pose.""" new_pose = obj.pose() T_old_to_world = old_pose.to_eigen() T_new_to_world = new_pose.to_eigen() T_new_to_old = T_old_to_world.inverse() * T_new_to_world delta_xyz = float(np.linalg.norm(T_new_to_old.translation)) delta_theta = eigen.AngleAxisd(eigen.Quaterniond(T_new_to_old.linear)).angle return delta_xyz >= max_delta_xyz or delta_theta >= max_delta_theta def initialize_robot_pose(robot: Robot) -> bool: x_min, x_max = ( utils.TABLE_CONSTRAINTS["table_x_min"], ACTION_CONSTRAINTS["max_lift_radius"], ) y_min = utils.TABLE_CONSTRAINTS["table_y_min"] y_max = utils.TABLE_CONSTRAINTS["table_y_max"] xy_min = np.array([x_min, y_min]) xy_max = np.array([x_max, y_max]) while True: xy = np.random.uniform(xy_min, xy_max) if np.linalg.norm(xy) < ACTION_CONSTRAINTS["max_lift_radius"]: break theta = np.random.uniform(*object_state.ObjectState.RANGES["wz"]) pos = np.append(xy, ACTION_CONSTRAINTS["max_lift_height"]) aa = eigen.AngleAxisd(theta, np.array([0.0, 0.0, 1.0])) quat = eigen.Quaterniond(aa) try: robot.goto_pose(pos, quat)
except ControlException as e:
2
2023-10-16 00:22:40+00:00
16k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/pydantic/dataclasses.py
[ { "identifier": "_config", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_config.py", "snippet": "DEPRECATION_MESSAGE = 'Support for class-based `config` is deprecated, use ConfigDict instead.'\nV2_REMOVED_KEYS = {\n 'allow_mutation',\n 'error_msg_templates',\n 'fields',\n 'getter_dict',\n 'smart_union',\n 'underscore_attrs_are_private',\n 'json_loads',\n 'json_dumps',\n 'copy_on_model_validation',\n 'post_init_call',\n}\nV2_RENAMED_KEYS = {\n 'allow_population_by_field_name': 'populate_by_name',\n 'anystr_lower': 'str_to_lower',\n 'anystr_strip_whitespace': 'str_strip_whitespace',\n 'anystr_upper': 'str_to_upper',\n 'keep_untouched': 'ignored_types',\n 'max_anystr_length': 'str_max_length',\n 'min_anystr_length': 'str_min_length',\n 'orm_mode': 'from_attributes',\n 'schema_extra': 'json_schema_extra',\n 'validate_all': 'validate_default',\n}\nclass ConfigWrapper:\nclass ConfigWrapperStack:\n def __init__(self, config: ConfigDict | dict[str, Any] | type[Any] | None, *, check: bool = True):\n def for_model(cls, bases: tuple[type[Any], ...], namespace: dict[str, Any], kwargs: dict[str, Any]) -> Self:\n def __getattr__(self, name: str) -> Any:\n def core_config(self, obj: Any) -> core_schema.CoreConfig:\n def dict_not_none(**kwargs: Any) -> Any:\n def __repr__(self):\n def __init__(self, config_wrapper: ConfigWrapper):\n def tail(self) -> ConfigWrapper:\n def push(self, config_wrapper: ConfigWrapper | ConfigDict | None) -> ContextManager[None]:\n def _context_manager() -> Iterator[None]:\ndef prepare_config(config: ConfigDict | dict[str, Any] | type[Any] | None) -> ConfigDict:\ndef check_deprecated(config_dict: ConfigDict) -> None:" }, { "identifier": "_decorators", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_decorators.py", "snippet": "class ValidatorDecoratorInfo:\nclass FieldValidatorDecoratorInfo:\nclass RootValidatorDecoratorInfo:\nclass FieldSerializerDecoratorInfo:\nclass ModelSerializerDecoratorInfo:\nclass ModelValidatorDecoratorInfo:\nclass PydanticDescriptorProxy(Generic[ReturnType]):\nclass Decorator(Generic[DecoratorInfoType]):\nclass DecoratorInfos:\n def __post_init__(self):\n def _call_wrapped_attr(self, func: Callable[[Any], None], *, name: str) -> PydanticDescriptorProxy[ReturnType]:\n def __get__(self, obj: object | None, obj_type: type[object] | None = None) -> PydanticDescriptorProxy[ReturnType]:\n def __set_name__(self, instance: Any, name: str) -> None:\n def __getattr__(self, __name: str) -> Any:\n def build(\n cls_: Any,\n *,\n cls_var_name: str,\n shim: Callable[[Any], Any] | None,\n info: DecoratorInfoType,\n ) -> Decorator[DecoratorInfoType]:\n def bind_to_cls(self, cls: Any) -> Decorator[DecoratorInfoType]:\ndef get_bases(tp: type[Any]) -> tuple[type[Any], ...]:\ndef mro(tp: type[Any]) -> tuple[type[Any], ...]:\ndef mro_for_bases(bases: tuple[type[Any], ...]) -> tuple[type[Any], ...]:\n def merge_seqs(seqs: list[deque[type[Any]]]) -> Iterable[type[Any]]:\ndef get_attribute_from_bases(tp: type[Any] | tuple[type[Any], ...], name: str) -> Any:\ndef get_attribute_from_base_dicts(tp: type[Any], name: str) -> Any:\n def build(model_dc: type[Any]) -> DecoratorInfos: # noqa: C901 (ignore complexity)\ndef inspect_validator(validator: Callable[..., Any], mode: FieldValidatorModes) -> bool:\ndef inspect_field_serializer(\n serializer: Callable[..., Any], mode: Literal['plain', 'wrap'], computed_field: bool = False\n) -> tuple[bool, bool]:\ndef inspect_annotated_serializer(serializer: Callable[..., Any], mode: Literal['plain', 'wrap']) -> bool:\ndef inspect_model_serializer(serializer: Callable[..., Any], mode: Literal['plain', 'wrap']) -> bool:\ndef _serializer_info_arg(mode: Literal['plain', 'wrap'], n_positional: int) -> bool | None:\ndef is_instance_method_from_sig(function: AnyDecoratorCallable) -> bool:\ndef ensure_classmethod_based_on_signature(function: AnyDecoratorCallable) -> Any:\ndef _is_classmethod_from_sig(function: AnyDecoratorCallable) -> bool:\ndef unwrap_wrapped_function(\n func: Any,\n *,\n unwrap_partial: bool = True,\n unwrap_class_static_method: bool = True,\n) -> Any:\ndef get_function_return_type(\n func: Any, explicit_return_type: Any, types_namespace: dict[str, Any] | None = None\n) -> Any:\ndef count_positional_params(sig: Signature) -> int:\ndef can_be_positional(param: Parameter) -> bool:\ndef ensure_property(f: Any) -> Any:" }, { "identifier": "_typing_extra", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_typing_extra.py", "snippet": " def origin_is_union(tp: type[Any] | None) -> bool:\n def origin_is_union(tp: type[Any] | None) -> bool:\ndef is_none_type(type_: Any) -> bool:\ndef is_callable_type(type_: type[Any]) -> bool:\ndef is_literal_type(type_: type[Any]) -> bool:\ndef literal_values(type_: type[Any]) -> tuple[Any, ...]:\ndef all_literal_values(type_: type[Any]) -> list[Any]:\ndef is_annotated(ann_type: Any) -> bool:\ndef is_namedtuple(type_: type[Any]) -> bool:\ndef is_new_type(type_: type[Any]) -> bool:\ndef _check_classvar(v: type[Any] | None) -> bool:\ndef is_classvar(ann_type: type[Any]) -> bool:\ndef _check_finalvar(v: type[Any] | None) -> bool:\ndef is_finalvar(ann_type: Any) -> bool:\ndef parent_frame_namespace(*, parent_depth: int = 2) -> dict[str, Any] | None:\ndef add_module_globals(obj: Any, globalns: dict[str, Any] | None = None) -> dict[str, Any]:\ndef get_cls_types_namespace(cls: type[Any], parent_namespace: dict[str, Any] | None = None) -> dict[str, Any]:\ndef get_cls_type_hints_lenient(obj: Any, globalns: dict[str, Any] | None = None) -> dict[str, Any]:\ndef eval_type_lenient(value: Any, globalns: dict[str, Any] | None, localns: dict[str, Any] | None) -> Any:\ndef get_function_type_hints(\n function: Callable[..., Any], *, include_keys: set[str] | None = None, types_namespace: dict[str, Any] | None = None\n) -> dict[str, Any]:\n def _make_forward_ref(\n arg: Any,\n is_argument: bool = True,\n *,\n is_class: bool = False,\n ) -> typing.ForwardRef:\n def get_type_hints( # noqa: C901\n obj: Any,\n globalns: dict[str, Any] | None = None,\n localns: dict[str, Any] | None = None,\n include_extras: bool = False,\n ) -> dict[str, Any]: # pragma: no cover\n def evaluate_fwd_ref(\n ref: ForwardRef, globalns: dict[str, Any] | None = None, localns: dict[str, Any] | None = None\n ) -> Any:\n def evaluate_fwd_ref(\n ref: ForwardRef, globalns: dict[str, Any] | None = None, localns: dict[str, Any] | None = None\n ) -> Any:\ndef is_dataclass(_cls: type[Any]) -> TypeGuard[type[StandardDataclass]]:\ndef origin_is_type_alias_type(origin: Any) -> TypeGuard[TypeAliasType]:\nLITERAL_TYPES: set[Any] = {Literal}\nNONE_TYPES: tuple[Any, ...] = (None, NoneType, *(tp[None] for tp in LITERAL_TYPES))" }, { "identifier": "_dataclasses", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_dataclasses.py", "snippet": " class StandardDataclass(typing.Protocol):\n class PydanticDataclass(StandardDataclass, typing.Protocol):\n def __init__(self, *args: object, **kwargs: object) -> None:\ndef set_dataclass_fields(cls: type[StandardDataclass], types_namespace: dict[str, Any] | None = None) -> None:\ndef complete_dataclass(\n cls: type[Any],\n config_wrapper: _config.ConfigWrapper,\n *,\n raise_errors: bool = True,\n types_namespace: dict[str, Any] | None,\n) -> bool:\n def __init__(__dataclass_self__: PydanticDataclass, *args: Any, **kwargs: Any) -> None:\n def validated_setattr(instance: Any, __field: str, __value: str) -> None:\ndef generate_dataclass_signature(cls: type[StandardDataclass]) -> Signature:\ndef is_builtin_dataclass(_cls: type[Any]) -> TypeGuard[type[StandardDataclass]]:" }, { "identifier": "getattr_migration", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_migration.py", "snippet": "def getattr_migration(module: str) -> Callable[[str], Any]:\n \"\"\"Implement PEP 562 for objects that were either moved or removed on the migration\n to V2.\n\n Args:\n module: The module name.\n\n Returns:\n A callable that will raise an error if the object is not found.\n \"\"\"\n # This avoids circular import with errors.py.\n from .errors import PydanticImportError\n\n def wrapper(name: str) -> object:\n \"\"\"Raise an error if the object is not found, or warn if it was moved.\n\n In case it was moved, it still returns the object.\n\n Args:\n name: The object name.\n\n Returns:\n The object.\n \"\"\"\n if name == '__path__':\n raise AttributeError(f'module {__name__!r} has no attribute {name!r}')\n\n import_path = f'{module}:{name}'\n if import_path in MOVED_IN_V2.keys():\n new_location = MOVED_IN_V2[import_path]\n warnings.warn(f'`{import_path}` has been moved to `{new_location}`.')\n return import_string(MOVED_IN_V2[import_path])\n if import_path in DEPRECATED_MOVED_IN_V2:\n # skip the warning here because a deprecation warning will be raised elsewhere\n return import_string(DEPRECATED_MOVED_IN_V2[import_path])\n if import_path in REDIRECT_TO_V1:\n new_location = REDIRECT_TO_V1[import_path]\n warnings.warn(\n f'`{import_path}` has been removed. We are importing from `{new_location}` instead.'\n 'See the migration guide for more details: https://docs.pydantic.dev/latest/migration/'\n )\n return import_string(REDIRECT_TO_V1[import_path])\n if import_path == 'pydantic:BaseSettings':\n raise PydanticImportError(\n '`BaseSettings` has been moved to the `pydantic-settings` package. '\n f'See https://docs.pydantic.dev/{version_short()}/migration/#basesettings-has-moved-to-pydantic-settings '\n 'for more details.'\n )\n if import_path in REMOVED_IN_V2:\n raise PydanticImportError(f'`{import_path}` has been removed in V2.')\n globals: Dict[str, Any] = sys.modules[module].__dict__\n if name in globals:\n return globals[name]\n raise AttributeError(f'module {__name__!r} has no attribute {name!r}')\n\n return wrapper" }, { "identifier": "ConfigDict", "path": "backend/venv/lib/python3.10/site-packages/pydantic/config.py", "snippet": "class ConfigDict(TypedDict, total=False):\n \"\"\"A TypedDict for configuring Pydantic behaviour.\"\"\"\n\n title: str | None\n \"\"\"The title for the generated JSON schema, defaults to the model's name\"\"\"\n\n str_to_lower: bool\n \"\"\"Whether to convert all characters to lowercase for str types. Defaults to `False`.\"\"\"\n\n str_to_upper: bool\n \"\"\"Whether to convert all characters to uppercase for str types. Defaults to `False`.\"\"\"\n str_strip_whitespace: bool\n \"\"\"Whether to strip leading and trailing whitespace for str types.\"\"\"\n\n str_min_length: int\n \"\"\"The minimum length for str types. Defaults to `None`.\"\"\"\n\n str_max_length: int | None\n \"\"\"The maximum length for str types. Defaults to `None`.\"\"\"\n\n extra: ExtraValues | None\n \"\"\"\n Whether to ignore, allow, or forbid extra attributes during model initialization. Defaults to `'ignore'`.\n\n You can configure how pydantic handles the attributes that are not defined in the model:\n\n * `allow` - Allow any extra attributes.\n * `forbid` - Forbid any extra attributes.\n * `ignore` - Ignore any extra attributes.\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n\n class User(BaseModel):\n model_config = ConfigDict(extra='ignore') # (1)!\n\n name: str\n\n\n user = User(name='John Doe', age=20) # (2)!\n print(user)\n #> name='John Doe'\n ```\n\n 1. This is the default behaviour.\n 2. The `age` argument is ignored.\n\n Instead, with `extra='allow'`, the `age` argument is included:\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n\n class User(BaseModel):\n model_config = ConfigDict(extra='allow')\n\n name: str\n\n\n user = User(name='John Doe', age=20) # (1)!\n print(user)\n #> name='John Doe' age=20\n ```\n\n 1. The `age` argument is included.\n\n With `extra='forbid'`, an error is raised:\n\n ```py\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n\n class User(BaseModel):\n model_config = ConfigDict(extra='forbid')\n\n name: str\n\n\n try:\n User(name='John Doe', age=20)\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for User\n age\n Extra inputs are not permitted [type=extra_forbidden, input_value=20, input_type=int]\n '''\n ```\n \"\"\"\n\n frozen: bool\n \"\"\"\n Whether or not models are faux-immutable, i.e. whether `__setattr__` is allowed, and also generates\n a `__hash__()` method for the model. This makes instances of the model potentially hashable if all the\n attributes are hashable. Defaults to `False`.\n\n Note:\n On V1, this setting was called `allow_mutation`, and was `True` by default.\n \"\"\"\n\n populate_by_name: bool\n \"\"\"\n Whether an aliased field may be populated by its name as given by the model\n attribute, as well as the alias. Defaults to `False`.\n\n Note:\n The name of this configuration setting was changed in **v2.0** from\n `allow_population_by_alias` to `populate_by_name`.\n\n ```py\n from pydantic import BaseModel, ConfigDict, Field\n\n\n class User(BaseModel):\n model_config = ConfigDict(populate_by_name=True)\n\n name: str = Field(alias='full_name') # (1)!\n age: int\n\n\n user = User(full_name='John Doe', age=20) # (2)!\n print(user)\n #> name='John Doe' age=20\n user = User(name='John Doe', age=20) # (3)!\n print(user)\n #> name='John Doe' age=20\n ```\n\n 1. The field `'name'` has an alias `'full_name'`.\n 2. The model is populated by the alias `'full_name'`.\n 3. The model is populated by the field name `'name'`.\n \"\"\"\n\n use_enum_values: bool\n \"\"\"\n Whether to populate models with the `value` property of enums, rather than the raw enum.\n This may be useful if you want to serialize `model.model_dump()` later. Defaults to `False`.\n \"\"\"\n\n validate_assignment: bool\n \"\"\"\n Whether to validate the data when the model is changed. Defaults to `False`.\n\n The default behavior of Pydantic is to validate the data when the model is created.\n\n In case the user changes the data after the model is created, the model is _not_ revalidated.\n\n ```py\n from pydantic import BaseModel\n\n class User(BaseModel):\n name: str\n\n user = User(name='John Doe') # (1)!\n print(user)\n #> name='John Doe'\n user.name = 123 # (1)!\n print(user)\n #> name=123\n ```\n\n 1. The validation happens only when the model is created.\n 2. The validation does not happen when the data is changed.\n\n In case you want to revalidate the model when the data is changed, you can use `validate_assignment=True`:\n\n ```py\n from pydantic import BaseModel, ValidationError\n\n class User(BaseModel, validate_assignment=True): # (1)!\n name: str\n\n user = User(name='John Doe') # (2)!\n print(user)\n #> name='John Doe'\n try:\n user.name = 123 # (3)!\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for User\n name\n Input should be a valid string [type=string_type, input_value=123, input_type=int]\n '''\n ```\n\n 1. You can either use class keyword arguments, or `model_config` to set `validate_assignment=True`.\n 2. The validation happens when the model is created.\n 3. The validation _also_ happens when the data is changed.\n \"\"\"\n\n arbitrary_types_allowed: bool\n \"\"\"\n Whether arbitrary types are allowed for field types. Defaults to `False`.\n\n ```py\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n # This is not a pydantic model, it's an arbitrary class\n class Pet:\n def __init__(self, name: str):\n self.name = name\n\n class Model(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n pet: Pet\n owner: str\n\n pet = Pet(name='Hedwig')\n # A simple check of instance type is used to validate the data\n model = Model(owner='Harry', pet=pet)\n print(model)\n #> pet=<__main__.Pet object at 0x0123456789ab> owner='Harry'\n print(model.pet)\n #> <__main__.Pet object at 0x0123456789ab>\n print(model.pet.name)\n #> Hedwig\n print(type(model.pet))\n #> <class '__main__.Pet'>\n try:\n # If the value is not an instance of the type, it's invalid\n Model(owner='Harry', pet='Hedwig')\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n pet\n Input should be an instance of Pet [type=is_instance_of, input_value='Hedwig', input_type=str]\n '''\n\n # Nothing in the instance of the arbitrary type is checked\n # Here name probably should have been a str, but it's not validated\n pet2 = Pet(name=42)\n model2 = Model(owner='Harry', pet=pet2)\n print(model2)\n #> pet=<__main__.Pet object at 0x0123456789ab> owner='Harry'\n print(model2.pet)\n #> <__main__.Pet object at 0x0123456789ab>\n print(model2.pet.name)\n #> 42\n print(type(model2.pet))\n #> <class '__main__.Pet'>\n ```\n \"\"\"\n\n from_attributes: bool\n \"\"\"\n Whether to build models and look up discriminators of tagged unions using python object attributes.\n \"\"\"\n\n loc_by_alias: bool\n \"\"\"Whether to use the actual key provided in the data (e.g. alias) for error `loc`s rather than the field's name. Defaults to `True`.\"\"\"\n\n alias_generator: Callable[[str], str] | None\n \"\"\"\n A callable that takes a field name and returns an alias for it.\n\n If data source field names do not match your code style (e. g. CamelCase fields),\n you can automatically generate aliases using `alias_generator`:\n\n ```py\n from pydantic import BaseModel, ConfigDict\n from pydantic.alias_generators import to_pascal\n\n class Voice(BaseModel):\n model_config = ConfigDict(alias_generator=to_pascal)\n\n name: str\n language_code: str\n\n voice = Voice(Name='Filiz', LanguageCode='tr-TR')\n print(voice.language_code)\n #> tr-TR\n print(voice.model_dump(by_alias=True))\n #> {'Name': 'Filiz', 'LanguageCode': 'tr-TR'}\n ```\n\n Note:\n Pydantic offers three built-in alias generators: [`to_pascal`][pydantic.alias_generators.to_pascal],\n [`to_camel`][pydantic.alias_generators.to_camel], and [`to_snake`][pydantic.alias_generators.to_snake].\n \"\"\"\n\n ignored_types: tuple[type, ...]\n \"\"\"A tuple of types that may occur as values of class attributes without annotations. This is\n typically used for custom descriptors (classes that behave like `property`). If an attribute is set on a\n class without an annotation and has a type that is not in this tuple (or otherwise recognized by\n _pydantic_), an error will be raised. Defaults to `()`.\n \"\"\"\n\n allow_inf_nan: bool\n \"\"\"Whether to allow infinity (`+inf` an `-inf`) and NaN values to float fields. Defaults to `True`.\"\"\"\n\n json_schema_extra: dict[str, object] | JsonSchemaExtraCallable | None\n \"\"\"A dict or callable to provide extra JSON schema properties. Defaults to `None`.\"\"\"\n\n json_encoders: dict[type[object], JsonEncoder] | None\n \"\"\"\n A `dict` of custom JSON encoders for specific types. Defaults to `None`.\n\n !!! warning \"Deprecated\"\n This config option is a carryover from v1.\n We originally planned to remove it in v2 but didn't have a 1:1 replacement so we are keeping it for now.\n It is still deprecated and will likely be removed in the future.\n \"\"\"\n\n # new in V2\n strict: bool\n \"\"\"\n _(new in V2)_ If `True`, strict validation is applied to all fields on the model.\n\n By default, Pydantic attempts to coerce values to the correct type, when possible.\n\n There are situations in which you may want to disable this behavior, and instead raise an error if a value's type\n does not match the field's type annotation.\n\n To configure strict mode for all fields on a model, you can set `strict=True` on the model.\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n class Model(BaseModel):\n model_config = ConfigDict(strict=True)\n\n name: str\n age: int\n ```\n\n See [Strict Mode](../concepts/strict_mode.md) for more details.\n\n See the [Conversion Table](../concepts/conversion_table.md) for more details on how Pydantic converts data in both\n strict and lax modes.\n \"\"\"\n # whether instances of models and dataclasses (including subclass instances) should re-validate, default 'never'\n revalidate_instances: Literal['always', 'never', 'subclass-instances']\n \"\"\"\n When and how to revalidate models and dataclasses during validation. Accepts the string\n values of `'never'`, `'always'` and `'subclass-instances'`. Defaults to `'never'`.\n\n - `'never'` will not revalidate models and dataclasses during validation\n - `'always'` will revalidate models and dataclasses during validation\n - `'subclass-instances'` will revalidate models and dataclasses during validation if the instance is a\n subclass of the model or dataclass\n\n By default, model and dataclass instances are not revalidated during validation.\n\n ```py\n from typing import List\n\n from pydantic import BaseModel\n\n class User(BaseModel, revalidate_instances='never'): # (1)!\n hobbies: List[str]\n\n class SubUser(User):\n sins: List[str]\n\n class Transaction(BaseModel):\n user: User\n\n my_user = User(hobbies=['reading'])\n t = Transaction(user=my_user)\n print(t)\n #> user=User(hobbies=['reading'])\n\n my_user.hobbies = [1] # (2)!\n t = Transaction(user=my_user) # (3)!\n print(t)\n #> user=User(hobbies=[1])\n\n my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying'])\n t = Transaction(user=my_sub_user)\n print(t)\n #> user=SubUser(hobbies=['scuba diving'], sins=['lying'])\n ```\n\n 1. `revalidate_instances` is set to `'never'` by **default.\n 2. The assignment is not validated, unless you set `validate_assignment` to `True` in the model's config.\n 3. Since `revalidate_instances` is set to `never`, this is not revalidated.\n\n If you want to revalidate instances during validation, you can set `revalidate_instances` to `'always'`\n in the model's config.\n\n ```py\n from typing import List\n\n from pydantic import BaseModel, ValidationError\n\n class User(BaseModel, revalidate_instances='always'): # (1)!\n hobbies: List[str]\n\n class SubUser(User):\n sins: List[str]\n\n class Transaction(BaseModel):\n user: User\n\n my_user = User(hobbies=['reading'])\n t = Transaction(user=my_user)\n print(t)\n #> user=User(hobbies=['reading'])\n\n my_user.hobbies = [1]\n try:\n t = Transaction(user=my_user) # (2)!\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Transaction\n user.hobbies.0\n Input should be a valid string [type=string_type, input_value=1, input_type=int]\n '''\n\n my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying'])\n t = Transaction(user=my_sub_user)\n print(t) # (3)!\n #> user=User(hobbies=['scuba diving'])\n ```\n\n 1. `revalidate_instances` is set to `'always'`.\n 2. The model is revalidated, since `revalidate_instances` is set to `'always'`.\n 3. Using `'never'` we would have gotten `user=SubUser(hobbies=['scuba diving'], sins=['lying'])`.\n\n It's also possible to set `revalidate_instances` to `'subclass-instances'` to only revalidate instances\n of subclasses of the model.\n\n ```py\n from typing import List\n\n from pydantic import BaseModel\n\n class User(BaseModel, revalidate_instances='subclass-instances'): # (1)!\n hobbies: List[str]\n\n class SubUser(User):\n sins: List[str]\n\n class Transaction(BaseModel):\n user: User\n\n my_user = User(hobbies=['reading'])\n t = Transaction(user=my_user)\n print(t)\n #> user=User(hobbies=['reading'])\n\n my_user.hobbies = [1]\n t = Transaction(user=my_user) # (2)!\n print(t)\n #> user=User(hobbies=[1])\n\n my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying'])\n t = Transaction(user=my_sub_user)\n print(t) # (3)!\n #> user=User(hobbies=['scuba diving'])\n ```\n\n 1. `revalidate_instances` is set to `'subclass-instances'`.\n 2. This is not revalidated, since `my_user` is not a subclass of `User`.\n 3. Using `'never'` we would have gotten `user=SubUser(hobbies=['scuba diving'], sins=['lying'])`.\n \"\"\"\n\n ser_json_timedelta: Literal['iso8601', 'float']\n \"\"\"\n The format of JSON serialized timedeltas. Accepts the string values of `'iso8601'` and\n `'float'`. Defaults to `'iso8601'`.\n\n - `'iso8601'` will serialize timedeltas to ISO 8601 durations.\n - `'float'` will serialize timedeltas to the total number of seconds.\n \"\"\"\n\n ser_json_bytes: Literal['utf8', 'base64']\n \"\"\"\n The encoding of JSON serialized bytes. Accepts the string values of `'utf8'` and `'base64'`.\n Defaults to `'utf8'`.\n\n - `'utf8'` will serialize bytes to UTF-8 strings.\n - `'base64'` will serialize bytes to URL safe base64 strings.\n \"\"\"\n\n # whether to validate default values during validation, default False\n validate_default: bool\n \"\"\"Whether to validate default values during validation. Defaults to `False`.\"\"\"\n\n validate_return: bool\n \"\"\"whether to validate the return value from call validators. Defaults to `False`.\"\"\"\n\n protected_namespaces: tuple[str, ...]\n \"\"\"\n A `tuple` of strings that prevent model to have field which conflict with them.\n Defaults to `('model_', )`).\n\n Pydantic prevents collisions between model attributes and `BaseModel`'s own methods by\n namespacing them with the prefix `model_`.\n\n ```py\n import warnings\n\n from pydantic import BaseModel\n\n warnings.filterwarnings('error') # Raise warnings as errors\n\n try:\n\n class Model(BaseModel):\n model_prefixed_field: str\n\n except UserWarning as e:\n print(e)\n '''\n Field \"model_prefixed_field\" has conflict with protected namespace \"model_\".\n\n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n '''\n ```\n\n You can customize this behavior using the `protected_namespaces` setting:\n\n ```py\n import warnings\n\n from pydantic import BaseModel, ConfigDict\n\n warnings.filterwarnings('error') # Raise warnings as errors\n\n try:\n\n class Model(BaseModel):\n model_prefixed_field: str\n also_protect_field: str\n\n model_config = ConfigDict(\n protected_namespaces=('protect_me_', 'also_protect_')\n )\n\n except UserWarning as e:\n print(e)\n '''\n Field \"also_protect_field\" has conflict with protected namespace \"also_protect_\".\n\n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ('protect_me_',)`.\n '''\n ```\n\n While Pydantic will only emit a warning when an item is in a protected namespace but does not actually have a collision,\n an error _is_ raised if there is an actual collision with an existing attribute:\n\n ```py\n from pydantic import BaseModel\n\n try:\n\n class Model(BaseModel):\n model_validate: str\n\n except NameError as e:\n print(e)\n '''\n Field \"model_validate\" conflicts with member <bound method BaseModel.model_validate of <class 'pydantic.main.BaseModel'>> of protected namespace \"model_\".\n '''\n ```\n \"\"\"\n\n hide_input_in_errors: bool\n \"\"\"\n Whether to hide inputs when printing errors. Defaults to `False`.\n\n Pydantic shows the input value and type when it raises `ValidationError` during the validation.\n\n ```py\n from pydantic import BaseModel, ValidationError\n\n class Model(BaseModel):\n a: str\n\n try:\n Model(a=123)\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n a\n Input should be a valid string [type=string_type, input_value=123, input_type=int]\n '''\n ```\n\n You can hide the input value and type by setting the `hide_input_in_errors` config to `True`.\n\n ```py\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n class Model(BaseModel):\n a: str\n model_config = ConfigDict(hide_input_in_errors=True)\n\n try:\n Model(a=123)\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n a\n Input should be a valid string [type=string_type]\n '''\n ```\n \"\"\"\n\n defer_build: bool\n \"\"\"\n Whether to defer model validator and serializer construction until the first model validation.\n\n This can be useful to avoid the overhead of building models which are only\n used nested within other models, or when you want to manually define type namespace via\n [`Model.model_rebuild(_types_namespace=...)`][pydantic.BaseModel.model_rebuild]. Defaults to False.\n \"\"\"\n\n plugin_settings: dict[str, object] | None\n \"\"\"A `dict` of settings for plugins. Defaults to `None`.\n\n See [Pydantic Plugins](../concepts/plugins.md) for details.\n \"\"\"\n\n schema_generator: type[_GenerateSchema] | None\n \"\"\"\n A custom core schema generator class to use when generating JSON schemas.\n Useful if you want to change the way types are validated across an entire model/schema. Defaults to `None`.\n\n The `GenerateSchema` interface is subject to change, currently only the `string_schema` method is public.\n\n See [#6737](https://github.com/pydantic/pydantic/pull/6737) for details.\n \"\"\"\n\n json_schema_serialization_defaults_required: bool\n \"\"\"\n Whether fields with default values should be marked as required in the serialization schema. Defaults to `False`.\n\n This ensures that the serialization schema will reflect the fact a field with a default will always be present\n when serializing the model, even though it is not required for validation.\n\n However, there are scenarios where this may be undesirable — in particular, if you want to share the schema\n between validation and serialization, and don't mind fields with defaults being marked as not required during\n serialization. See [#7209](https://github.com/pydantic/pydantic/issues/7209) for more details.\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n class Model(BaseModel):\n a: str = 'a'\n\n model_config = ConfigDict(json_schema_serialization_defaults_required=True)\n\n print(Model.model_json_schema(mode='validation'))\n '''\n {\n 'properties': {'a': {'default': 'a', 'title': 'A', 'type': 'string'}},\n 'title': 'Model',\n 'type': 'object',\n }\n '''\n print(Model.model_json_schema(mode='serialization'))\n '''\n {\n 'properties': {'a': {'default': 'a', 'title': 'A', 'type': 'string'}},\n 'required': ['a'],\n 'title': 'Model',\n 'type': 'object',\n }\n '''\n ```\n \"\"\"\n\n json_schema_mode_override: Literal['validation', 'serialization', None]\n \"\"\"\n If not `None`, the specified mode will be used to generate the JSON schema regardless of what `mode` was passed to\n the function call. Defaults to `None`.\n\n This provides a way to force the JSON schema generation to reflect a specific mode, e.g., to always use the\n validation schema.\n\n It can be useful when using frameworks (such as FastAPI) that may generate different schemas for validation\n and serialization that must both be referenced from the same schema; when this happens, we automatically append\n `-Input` to the definition reference for the validation schema and `-Output` to the definition reference for the\n serialization schema. By specifying a `json_schema_mode_override` though, this prevents the conflict between\n the validation and serialization schemas (since both will use the specified schema), and so prevents the suffixes\n from being added to the definition references.\n\n ```py\n from pydantic import BaseModel, ConfigDict, Json\n\n class Model(BaseModel):\n a: Json[int] # requires a string to validate, but will dump an int\n\n print(Model.model_json_schema(mode='serialization'))\n '''\n {\n 'properties': {'a': {'title': 'A', 'type': 'integer'}},\n 'required': ['a'],\n 'title': 'Model',\n 'type': 'object',\n }\n '''\n\n class ForceInputModel(Model):\n # the following ensures that even with mode='serialization', we\n # will get the schema that would be generated for validation.\n model_config = ConfigDict(json_schema_mode_override='validation')\n\n print(ForceInputModel.model_json_schema(mode='serialization'))\n '''\n {\n 'properties': {\n 'a': {\n 'contentMediaType': 'application/json',\n 'contentSchema': {'type': 'integer'},\n 'title': 'A',\n 'type': 'string',\n }\n },\n 'required': ['a'],\n 'title': 'ForceInputModel',\n 'type': 'object',\n }\n '''\n ```\n \"\"\"\n\n coerce_numbers_to_str: bool\n \"\"\"\n If `True`, enables automatic coercion of any `Number` type to `str` in \"lax\" (non-strict) mode. Defaults to `False`.\n\n Pydantic doesn't allow number types (`int`, `float`, `Decimal`) to be coerced as type `str` by default.\n\n ```py\n from decimal import Decimal\n\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n class Model(BaseModel):\n value: str\n\n try:\n print(Model(value=42))\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n value\n Input should be a valid string [type=string_type, input_value=42, input_type=int]\n '''\n\n class Model(BaseModel):\n model_config = ConfigDict(coerce_numbers_to_str=True)\n\n value: str\n\n repr(Model(value=42).value)\n #> \"42\"\n repr(Model(value=42.13).value)\n #> \"42.13\"\n repr(Model(value=Decimal('42.13')).value)\n #> \"42.13\"\n ```\n \"\"\"" }, { "identifier": "Field", "path": "backend/venv/lib/python3.10/site-packages/pydantic/fields.py", "snippet": "def Field( # noqa: C901\n default: Any = PydanticUndefined,\n *,\n default_factory: typing.Callable[[], Any] | None = _Unset,\n alias: str | None = _Unset,\n alias_priority: int | None = _Unset,\n validation_alias: str | AliasPath | AliasChoices | None = _Unset,\n serialization_alias: str | None = _Unset,\n title: str | None = _Unset,\n description: str | None = _Unset,\n examples: list[Any] | None = _Unset,\n exclude: bool | None = _Unset,\n discriminator: str | None = _Unset,\n json_schema_extra: dict[str, Any] | typing.Callable[[dict[str, Any]], None] | None = _Unset,\n frozen: bool | None = _Unset,\n validate_default: bool | None = _Unset,\n repr: bool = _Unset,\n init_var: bool | None = _Unset,\n kw_only: bool | None = _Unset,\n pattern: str | None = _Unset,\n strict: bool | None = _Unset,\n gt: float | None = _Unset,\n ge: float | None = _Unset,\n lt: float | None = _Unset,\n le: float | None = _Unset,\n multiple_of: float | None = _Unset,\n allow_inf_nan: bool | None = _Unset,\n max_digits: int | None = _Unset,\n decimal_places: int | None = _Unset,\n min_length: int | None = _Unset,\n max_length: int | None = _Unset,\n union_mode: Literal['smart', 'left_to_right'] = _Unset,\n **extra: Unpack[_EmptyKwargs],\n) -> Any:\n \"\"\"Usage docs: https://docs.pydantic.dev/2.4/concepts/fields\n\n Create a field for objects that can be configured.\n\n Used to provide extra information about a field, either for the model schema or complex validation. Some arguments\n apply only to number fields (`int`, `float`, `Decimal`) and some apply only to `str`.\n\n Note:\n - Any `_Unset` objects will be replaced by the corresponding value defined in the `_DefaultValues` dictionary. If a key for the `_Unset` object is not found in the `_DefaultValues` dictionary, it will default to `None`\n\n Args:\n default: Default value if the field is not set.\n default_factory: A callable to generate the default value, such as :func:`~datetime.utcnow`.\n alias: An alternative name for the attribute.\n alias_priority: Priority of the alias. This affects whether an alias generator is used.\n validation_alias: 'Whitelist' validation step. The field will be the single one allowed by the alias or set of\n aliases defined.\n serialization_alias: 'Blacklist' validation step. The vanilla field will be the single one of the alias' or set\n of aliases' fields and all the other fields will be ignored at serialization time.\n title: Human-readable title.\n description: Human-readable description.\n examples: Example values for this field.\n exclude: Whether to exclude the field from the model serialization.\n discriminator: Field name for discriminating the type in a tagged union.\n json_schema_extra: Any additional JSON schema data for the schema property.\n frozen: Whether the field is frozen.\n validate_default: Run validation that isn't only checking existence of defaults. This can be set to `True` or `False`. If not set, it defaults to `None`.\n repr: A boolean indicating whether to include the field in the `__repr__` output.\n init_var: Whether the field should be included in the constructor of the dataclass.\n kw_only: Whether the field should be a keyword-only argument in the constructor of the dataclass.\n strict: If `True`, strict validation is applied to the field.\n See [Strict Mode](../concepts/strict_mode.md) for details.\n gt: Greater than. If set, value must be greater than this. Only applicable to numbers.\n ge: Greater than or equal. If set, value must be greater than or equal to this. Only applicable to numbers.\n lt: Less than. If set, value must be less than this. Only applicable to numbers.\n le: Less than or equal. If set, value must be less than or equal to this. Only applicable to numbers.\n multiple_of: Value must be a multiple of this. Only applicable to numbers.\n min_length: Minimum length for strings.\n max_length: Maximum length for strings.\n pattern: Pattern for strings.\n allow_inf_nan: Allow `inf`, `-inf`, `nan`. Only applicable to numbers.\n max_digits: Maximum number of allow digits for strings.\n decimal_places: Maximum number of decimal places allowed for numbers.\n union_mode: The strategy to apply when validating a union. Can be `smart` (the default), or `left_to_right`.\n See [Union Mode](standard_library_types.md#union-mode) for details.\n extra: Include extra fields used by the JSON schema.\n\n !!! warning Deprecated\n The `extra` kwargs is deprecated. Use `json_schema_extra` instead.\n\n Returns:\n A new [`FieldInfo`][pydantic.fields.FieldInfo], the return annotation is `Any` so `Field` can be used on\n type annotated fields without causing a typing error.\n \"\"\"\n # Check deprecated and removed params from V1. This logic should eventually be removed.\n const = extra.pop('const', None) # type: ignore\n if const is not None:\n raise PydanticUserError('`const` is removed, use `Literal` instead', code='removed-kwargs')\n\n min_items = extra.pop('min_items', None) # type: ignore\n if min_items is not None:\n warn('`min_items` is deprecated and will be removed, use `min_length` instead', DeprecationWarning)\n if min_length in (None, _Unset):\n min_length = min_items # type: ignore\n\n max_items = extra.pop('max_items', None) # type: ignore\n if max_items is not None:\n warn('`max_items` is deprecated and will be removed, use `max_length` instead', DeprecationWarning)\n if max_length in (None, _Unset):\n max_length = max_items # type: ignore\n\n unique_items = extra.pop('unique_items', None) # type: ignore\n if unique_items is not None:\n raise PydanticUserError(\n (\n '`unique_items` is removed, use `Set` instead'\n '(this feature is discussed in https://github.com/pydantic/pydantic-core/issues/296)'\n ),\n code='removed-kwargs',\n )\n\n allow_mutation = extra.pop('allow_mutation', None) # type: ignore\n if allow_mutation is not None:\n warn('`allow_mutation` is deprecated and will be removed. use `frozen` instead', DeprecationWarning)\n if allow_mutation is False:\n frozen = True\n\n regex = extra.pop('regex', None) # type: ignore\n if regex is not None:\n raise PydanticUserError('`regex` is removed. use `pattern` instead', code='removed-kwargs')\n\n if extra:\n warn(\n 'Using extra keyword arguments on `Field` is deprecated and will be removed.'\n ' Use `json_schema_extra` instead.'\n f' (Extra keys: {\", \".join(k.__repr__() for k in extra.keys())})',\n DeprecationWarning,\n )\n if not json_schema_extra or json_schema_extra is _Unset:\n json_schema_extra = extra # type: ignore\n\n if (\n validation_alias\n and validation_alias is not _Unset\n and not isinstance(validation_alias, (str, AliasChoices, AliasPath))\n ):\n raise TypeError('Invalid `validation_alias` type. it should be `str`, `AliasChoices`, or `AliasPath`')\n\n if serialization_alias in (_Unset, None) and isinstance(alias, str):\n serialization_alias = alias\n\n if validation_alias in (_Unset, None):\n validation_alias = alias\n\n include = extra.pop('include', None) # type: ignore\n if include is not None:\n warn('`include` is deprecated and does nothing. It will be removed, use `exclude` instead', DeprecationWarning)\n\n return FieldInfo.from_field(\n default,\n default_factory=default_factory,\n alias=alias,\n alias_priority=alias_priority,\n validation_alias=validation_alias,\n serialization_alias=serialization_alias,\n title=title,\n description=description,\n examples=examples,\n exclude=exclude,\n discriminator=discriminator,\n json_schema_extra=json_schema_extra,\n frozen=frozen,\n pattern=pattern,\n validate_default=validate_default,\n repr=repr,\n init_var=init_var,\n kw_only=kw_only,\n strict=strict,\n gt=gt,\n ge=ge,\n lt=lt,\n le=le,\n multiple_of=multiple_of,\n min_length=min_length,\n max_length=max_length,\n allow_inf_nan=allow_inf_nan,\n max_digits=max_digits,\n decimal_places=decimal_places,\n union_mode=union_mode,\n )" } ]
import dataclasses import sys import types from typing import TYPE_CHECKING, Any, Callable, Generic, NoReturn, TypeVar, overload from typing_extensions import Literal, TypeGuard, dataclass_transform from ._internal import _config, _decorators, _typing_extra from ._internal import _dataclasses as _pydantic_dataclasses from ._migration import getattr_migration from .config import ConfigDict from .fields import Field from ._internal._dataclasses import PydanticDataclass
11,981
"""Provide an enhanced dataclass that performs validation.""" from __future__ import annotations as _annotations if TYPE_CHECKING: __all__ = 'dataclass', 'rebuild_dataclass' _T = TypeVar('_T') if sys.version_info >= (3, 10):
"""Provide an enhanced dataclass that performs validation.""" from __future__ import annotations as _annotations if TYPE_CHECKING: __all__ = 'dataclass', 'rebuild_dataclass' _T = TypeVar('_T') if sys.version_info >= (3, 10):
@dataclass_transform(field_specifiers=(dataclasses.field, Field))
6
2023-10-23 18:09:28+00:00
16k
zju3dv/nr_in_a_room
data_gen/batch_real_scene_neural_render.py
[ { "identifier": "read_json", "path": "utils/util.py", "snippet": "def read_json(fname):\n fname = Path(fname)\n with fname.open(\"rt\") as handle:\n return json.load(handle, object_hook=OrderedDict)" }, { "identifier": "read_yaml", "path": "utils/util.py", "snippet": "def read_yaml(fname):\n with open(fname, \"r\") as stream:\n return yaml.safe_load(stream)" }, { "identifier": "RoomOptimizer", "path": "optim/room_optimizer.py", "snippet": "class RoomOptimizer:\n def __init__(\n self,\n scale_factor: float,\n bg_scale_factor: float,\n bg_scene_center: list,\n img_wh: list,\n near: float,\n far: float,\n chunk: int,\n model_ckpt_path_dict: Dict[str, Any],\n config=None,\n scale_factor_dict: Dict[str, Any] = {},\n scene_info_path: str = None,\n scene_info_json_path: str = None,\n model_type=\"NeuS\",\n N_samples: int = 64,\n N_importance: int = 128,\n relation_info: Dict[str, Any] = {},\n output_path: str = None,\n prefix: str = \"\",\n active_instance_id: list = [46, 4, 9, 102],\n virtual_instance_id: list = [], # specific for edit (insert virtual to real) mode\n filter_door_and_window: bool = True,\n lr: float = 1e-2,\n N_optim_step: int = 500,\n adjust_lr_per_step: int = 150,\n optim_batch_size: int = 1024,\n use_amp: bool = False,\n extract_obj_bbox_from_neural_model: bool = False,\n ig_data_base_dir: str = \"data/ig_dataset_v1.0.1/\",\n mask_per_object: bool = False,\n bbox_ray_intersect: bool = True,\n bbox_enlarge: float = 0.1,\n optimize_light_env: bool = True,\n optimize_appearance_code: bool = False,\n use_light_from_image_attr: bool = False,\n use_appearance_from_image_attr: bool = False,\n optimize_option: list = [\n \"photometric_loss\",\n \"perceptual_loss\",\n \"z_axis_align_loss\",\n \"object_room_wall_attach\",\n \"object_room_floor_attach\",\n \"physical_violation\",\n \"object_object_attach\",\n ],\n ):\n # load config\n self.scene_info_path = scene_info_path\n self.scale_factor = scale_factor\n self.scale_factor_dict = scale_factor_dict\n self.bg_scale_factor = bg_scale_factor\n self.bg_scene_center = np.array(bg_scene_center)\n self.ig_data_base_dir = ig_data_base_dir\n self.mask_per_object = mask_per_object\n self.bbox_ray_intersect = bbox_ray_intersect\n self.bbox_enlarge = bbox_enlarge\n self.virtual_instance_id = virtual_instance_id\n\n self.img_wh = img_wh\n self.w = img_wh[0]\n self.h = img_wh[1]\n self.near = near\n self.far = far\n self.N_importance = N_importance\n self.N_samples = N_samples\n self.chunk = chunk\n self.lr = lr\n self.N_optim_step = N_optim_step\n self.adjust_lr_per_step = adjust_lr_per_step\n self.optim_batch_size = optim_batch_size\n self.use_amp = use_amp\n self.optimize_light_env = optimize_light_env\n self.optimize_appearance_code = optimize_appearance_code\n self.optimize_option = optimize_option\n self.config = config\n\n self.use_light_from_image_attr = use_light_from_image_attr\n if self.use_light_from_image_attr:\n print(\n \"WARNING: self.use_light_from_image_attr = True, using hard coded light env.\"\n )\n self.hard_coded_light_id = 0 # just for compatibility\n # self.hard_coded_light_id = 9 # probe_03 in 10 HDR multi_light training\n\n self.use_appearance_from_image_attr = use_appearance_from_image_attr\n if self.use_appearance_from_image_attr:\n print(\n \"WARNING: self.use_appearance_from_image_attr = True, using first frame appearance code.\"\n )\n self.hard_coded_appearance_frame_id = 0\n\n self.optimize_exposure = \"optimize_exposure\" in self.optimize_option\n\n # laod scene info\n if scene_info_json_path is None:\n scene_info_json_path = os.path.join(scene_info_path, \"data.json\")\n self.scene_meta = read_json(scene_info_json_path)\n\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n self.relation_info = relation_info\n\n self.model_type = model_type\n # self.load_model(\n # model_type, model_ckpt_path_dict[\"obj\"], model_ckpt_path_dict[\"bg\"]\n # )\n self.load_model_from_dict_path(model_type, model_ckpt_path_dict)\n\n self.reset_optimizable_parameters()\n\n if extract_obj_bbox_from_neural_model:\n self.extract_bounding_boxes_from_neural_model()\n\n if self.bbox_ray_intersect:\n self.prepare_bbox_ray_helper()\n\n self.set_output_path(output_path, prefix)\n\n print(\"RoomOptimizer initialize finished.\")\n\n def load_model_from_dict_path(self, model_type, model_ckpt_path_dict):\n assert model_type == \"NeuS\"\n self.models = {}\n self.image_attrs = {}\n\n # avoid duplicate loading\n self.models_cache = {}\n self.image_attrs_cache = {}\n\n print(\"loading model with instance_id\", self.active_instance_id)\n\n # print(model_ckpt_path_dict)\n for obj_id in self.active_instance_id:\n # identify ckpt_path\n if str(obj_id) in model_ckpt_path_dict:\n ckpt_info = model_ckpt_path_dict[str(obj_id)]\n elif obj_id == 0:\n assert (\n \"bg\" in model_ckpt_path_dict or \"0\" in model_ckpt_path_dict\n ), \"model_ckpt_path_dict missing background 'bg' or '0' ckpt\"\n ckpt_info = model_ckpt_path_dict.get(\"bg\", model_ckpt_path_dict[\"0\"])\n else:\n print(\n f\"Cannot find specific model for obj_id = {obj_id}, \\\n maybe config file is not compatible with given active_instance_id.\"\n )\n ckpt_info = model_ckpt_path_dict[\"obj\"]\n # load with cache\n ckpt_path, neus_conf = ckpt_info[\"path\"], ckpt_info[\"neus_conf\"]\n if ckpt_info not in self.models_cache:\n (\n self.models_cache[ckpt_path],\n self.image_attrs_cache[ckpt_path],\n ) = self.load_model_neus(ckpt_path, obj_id, neus_conf)\n self.models[f\"neus_{obj_id}\"] = self.models_cache[ckpt_path]\n self.image_attrs[str(obj_id)] = self.image_attrs_cache[ckpt_path]\n\n def load_model_nerf(self, ckpt_path):\n # TODO(ybbbbt): fix hard coding\n conf = {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n }\n nerf_coarse = NeRF_Object(conf)\n nerf_fine = NeRF_Object(conf)\n image_attributes = ImageAttributes(conf)\n load_ckpt(nerf_coarse, ckpt_path, model_name=\"nerf_coarse\")\n load_ckpt(nerf_fine, ckpt_path, model_name=\"nerf_fine\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n nerf_coarse = nerf_coarse.cuda().eval()\n nerf_fine = nerf_fine.cuda().eval()\n image_attributes = image_attributes.cuda().eval()\n\n models = {\n \"coarse\": nerf_coarse,\n \"fine\": nerf_fine,\n }\n\n embedding_xyz = Embedding(3, 10)\n embedding_dir = Embedding(3, 4)\n embeddings = {\n \"xyz\": embedding_xyz,\n \"dir\": embedding_dir,\n }\n return models, embeddings, image_attributes\n\n def load_model_neus(self, ckpt_path, obj_id, config_path=\"config/neus.yaml\"):\n conf = {\n \"model\": {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n },\n }\n if self.optimize_light_env:\n # conf[\"model\"].update({\"N_max_lights\": 128, \"N_light_embedding\": 16})\n conf[\"model\"].update({\"N_max_lights\": 1024, \"N_light_embedding\": 16})\n\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n conf[\"model\"].update(\n {\"N_max_appearance_frames\": 10000, \"N_appearance_embedding\": 16}\n )\n\n neus, render_kwargs_train, render_kwargs_test = get_model_neus(\n config_path=config_path, need_trainer=False, extra_conf=conf\n )\n self.render_kwargs_neus = render_kwargs_test\n image_attributes = ImageAttributes(conf[\"model\"])\n\n print(ckpt_path)\n load_ckpt(neus, ckpt_path, model_name=\"neus\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n if self.config is not None and (\n str(obj_id) in self.config.get(\"map_virtual_to_local\", {})\n ):\n # image_attributes.embedding_instance\n real_id_in_ckpt = self.config.map_virtual_to_local[str(obj_id)]\n image_attributes.embedding_instance.weight.requires_grad = False\n image_attributes.embedding_instance.weight[\n obj_id\n ] = image_attributes.embedding_instance.weight[real_id_in_ckpt]\n # ipdb.set_trace()\n\n neus.cuda().eval()\n image_attributes.cuda().eval()\n return neus, image_attributes\n\n def reset_optimizable_parameters(self):\n self.params = []\n self.relation_info = {}\n if self.optimize_light_env:\n self.initialize_light_code()\n\n if self.optimize_appearance_code:\n self.initialize_appearance_code()\n\n if self.optimize_exposure:\n self.initialize_autoexposure()\n\n def save_optimizable_parameters(self, path):\n all_param_dict = {}\n # all_param_dict[\"params\"] = self.params\n all_param_dict[\"relation_info\"] = self.relation_info\n all_param_dict[\"object_pose_dict\"] = copy.deepcopy(self.object_pose_dict)\n all_param_dict[\"active_instance_id\"] = copy.deepcopy(self.active_instance_id)\n if self.optimize_light_env:\n all_param_dict[\"light_code\"] = copy.deepcopy(self.light_code_dict)\n if self.optimize_appearance_code:\n all_param_dict[\"appearance_code\"] = copy.deepcopy(self.appearance_code_dict)\n if self.optimize_exposure:\n all_param_dict[\"exposure\"] = copy.deepcopy(self.autoexposure_param)\n torch.save(all_param_dict, path)\n\n def load_optimizable_parameters(self, path):\n all_param_dict = torch.load(path)\n # self.params = all_param_dict[\"params\"]\n self.relation_info = all_param_dict[\"relation_info\"]\n if len(self.virtual_instance_id) == 0: # not overwrite in edit mode\n self.active_instance_id = all_param_dict[\"active_instance_id\"]\n\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if len(self.virtual_instance_id) == 0: # not modify edit mode pose\n if hasattr(self, \"object_pose_dict\"):\n self.object_pose_dict.update(all_param_dict[\"object_pose_dict\"])\n else:\n self.object_pose_dict = all_param_dict[\"object_pose_dict\"]\n if self.optimize_light_env:\n self.light_code_dict = all_param_dict[\"light_code\"]\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n self.appearance_code_dict = all_param_dict[\"appearance_code\"]\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure and \"exposure\" in all_param_dict:\n self.autoexposure_param = all_param_dict[\"exposure\"]\n to_gpu(self.autoexposure_param)\n # ipdb.set_trace()\n\n def interpolate_light_env_from_states(self, path1, path2, interp):\n all_param_dict_1 = torch.load(path1)\n all_param_dict_2 = torch.load(path2)\n\n # self.params = all_param_dict[\"params\"]\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if self.optimize_light_env:\n light_code_dict_1 = all_param_dict_1[\"light_code\"]\n light_code_dict_2 = all_param_dict_2[\"light_code\"]\n for k, v in self.light_code_dict.items():\n self.light_code_dict[k] = light_code_dict_1[\n k\n ] * interp + light_code_dict_2[k] * (1 - interp)\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n appearance_code_dict_1 = all_param_dict_1[\"appearance_code\"]\n appearance_code_dict_2 = all_param_dict_2[\"appearance_code\"]\n for k, v in self.appearance_code_dict.items():\n self.appearance_code_dict[k] = appearance_code_dict_1[\n k\n ] * interp + appearance_code_dict_2[k] * (1 - interp)\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure:\n autoexposure_param_1 = all_param_dict_1[\"exposure\"]\n autoexposure_param_2 = all_param_dict_2[\"exposure\"]\n for k, v in self.autoexposure_param.items():\n self.autoexposure_param[k] = autoexposure_param_1[\n k\n ] * interp + autoexposure_param_2[k] * (1 - interp)\n to_gpu(self.autoexposure_param)\n\n def reset_active_instance_id(self, active_instance_id, filter_door_and_window=True):\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n def set_output_path(self, output_path: str, prefix: str, with_timestamp=True):\n if output_path is not None:\n if with_timestamp:\n self.output_path = os.path.join(\n output_path, f\"rendered_{get_timestamp()}_{prefix}\"\n )\n else:\n self.output_path = os.path.join(output_path, f\"{prefix}\")\n os.makedirs(self.output_path, exist_ok=True)\n\n def filter_door_and_window(self):\n print(\"Filtering door and window objects.\")\n filtered_active_instance_id = []\n for obj_id in self.active_instance_id:\n if self.get_type_of_instance(obj_id) not in [\"door\", \"window\"]:\n filtered_active_instance_id += [obj_id]\n self.active_instance_id = filtered_active_instance_id\n\n def initialize_light_code(self):\n self.light_code_dict = {}\n for obj_id in self.active_instance_id:\n # light_code = torch.randn((16)).cuda()\n light_code = torch.zeros((16)).cuda()\n light_code.requires_grad = True\n self.params += [\n {\"params\": light_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.light_code_dict[str(obj_id)] = light_code\n\n def initialize_appearance_code(self):\n self.appearance_code_dict = {}\n for obj_id in self.active_instance_id:\n # appearance_code = torch.randn((16)).cuda()\n appearance_code = torch.zeros((16)).cuda()\n appearance_code.requires_grad = True\n self.params += [\n {\"params\": appearance_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.appearance_code_dict[str(obj_id)] = appearance_code\n\n def initialize_autoexposure(self):\n self.autoexposure_param = {}\n for obj_id in self.active_instance_id:\n # scale and shift\n autoexposure_param = torch.Tensor([1, 1, 1, 0, 0, 0]).cuda()\n autoexposure_param.requires_grad = True\n self.params += [\n {\"params\": autoexposure_param, \"lr\": self.lr * 0.1}\n ] # light code can be optimized with larger lr\n self.autoexposure_param[str(obj_id)] = autoexposure_param\n\n def get_scale_factor(self, obj_id):\n if obj_id == 0:\n return self.bg_scale_factor\n elif str(obj_id) in self.scale_factor_dict:\n return self.scale_factor_dict[str(obj_id)]\n else:\n return self.scale_factor\n\n def extract_bounding_boxes_from_neural_model(self):\n print(\"Extracting object bounding boxes from neural model...\")\n assert self.model_type == \"NeuS\"\n for obj_id in tqdm(self.active_instance_id):\n mesh = extract_mesh_from_neus(\n self.models[f\"neus_{obj_id}\"],\n self.image_attrs[str(obj_id)],\n obj_id,\n )\n bbox = mesh.get_axis_aligned_bounding_box()\n bound = np.array([bbox.min_bound, bbox.max_bound])\n size = (bound[1] - bound[0]) * self.get_scale_factor(obj_id)\n # update scene_meta\n for idx, obj_info in enumerate(self.scene_meta[\"objs\"]):\n if obj_info[\"id\"] == obj_id:\n self.scene_meta[\"objs\"][idx][\"bdb3d\"][\"size\"] = size.tolist()\n\n def prepare_bbox_ray_helper(self):\n # bbox ray helper dict\n self.bbox_ray_helper_dict = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n length = np.array(obj_meta_info[\"bbox3d\"][\"size\"])\n self.bbox_ray_helper_dict[str(obj_id)] = BBoxRayHelper(np.zeros(3), length)\n\n def generate_object_rays(\n self, rays_o_obj, rays_d_obj, obj_id, near=None, far=None, select_ind=None\n ):\n \"\"\"\n Generate object rays given rays_o, rays_d and obj_id\n Input:\n select_ind: only for masked rendering\n \"\"\"\n if obj_id == 0: # background\n return self.generate_bg_rays(rays_o_obj, rays_d_obj, near=near, far=far)\n if self.bbox_ray_intersect:\n # for object, rays_o and rays_d should lie in world scale (unscaled)\n bbox_mask, bbox_batch_near, bbox_batch_far = self.bbox_ray_helper_dict[\n str(obj_id)\n ].get_ray_bbox_intersections(\n rays_o_obj,\n rays_d_obj,\n self.get_scale_factor(obj_id),\n # bbox_enlarge=self.bbox_enlarge / self.get_scale_factor(obj_id),\n bbox_enlarge=self.bbox_enlarge, # in physical world\n )\n # for area which hits bbox, we use bbox hit near far\n # bbox_ray_helper has scale for us, do no need to rescale\n batch_near_obj, batch_far_obj = bbox_batch_near, bbox_batch_far\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n # for the invalid part, we use 0 as near far, which assume that (0, 0, 0) is empty\n batch_near_obj[~bbox_mask] = torch.zeros_like(batch_near_obj[~bbox_mask])\n batch_far_obj[~bbox_mask] = torch.zeros_like(batch_far_obj[~bbox_mask])\n else:\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_obj = (\n near\n / self.get_scale_factor(obj_id)\n * torch.ones_like(rays_o_obj[:, :1])\n )\n batch_far_obj = (\n far / self.get_scale_factor(obj_id) * torch.ones_like(rays_d_obj[:, :1])\n )\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n\n if self.mask_per_object:\n # mask out of bound rendering\n obj_mask = torch.from_numpy(self.instance_mask == obj_id).view(-1)\n obj_mask = obj_mask[select_ind]\n batch_near_obj[~obj_mask] = 0\n batch_far_obj[~obj_mask] = 0\n\n rays_obj = torch.cat(\n [rays_o_obj, rays_d_obj, batch_near_obj, batch_far_obj], 1\n ) # (H*W, 8)\n rays_obj = rays_obj.cuda()\n return rays_obj\n\n def generate_bg_rays(self, rays_o_bg, rays_d_bg, near=None, far=None):\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_bg = near / self.bg_scale_factor * torch.ones_like(rays_o_bg[:, :1])\n batch_far_bg = far / self.bg_scale_factor * torch.ones_like(rays_d_bg[:, :1])\n rays_o_bg = rays_o_bg / self.bg_scale_factor\n rays_bg = torch.cat(\n [rays_o_bg, rays_d_bg, batch_near_bg, batch_far_bg], 1\n ) # (H*W, 8)\n rays_bg = rays_bg.cuda()\n return rays_bg\n\n def batched_inference_multi(\n self,\n rays_list,\n obj_id_list,\n to_cpu=True,\n hit_test_only=False,\n need_normal=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=True,\n refine_edge=False,\n refine_edge_obj_ids=[],\n render_mask=False,\n # use_sphere_tracing=False,\n show_progress=False,\n **kwargs,\n ):\n \"\"\"Do batched inference on rays using chunk.\"\"\"\n B = rays_list[0].shape[0]\n results = defaultdict(list)\n for i in tqdm(range(0, B, self.chunk), disable=not show_progress):\n extra_chunk = dict()\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor) and \"autoexposure_\" not in k:\n extra_chunk[k] = v[i : i + self.chunk]\n else:\n extra_chunk[k] = v\n if self.model_type == \"NeRF\":\n rendered_ray_chunks = render_rays_multi(\n self.models,\n self.embeddings,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n self.N_samples,\n use_disp=False,\n perturb=0.001,\n # perturb=0.00,\n noise_std=0,\n N_importance=self.N_importance,\n chunk=self.chunk,\n white_back=True,\n individual_weight_for_coarse=True,\n obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n **extra_chunk,\n )\n elif self.model_type == \"NeuS\":\n rendered_ray_chunks = render_rays_multi_neus(\n self,\n self.models,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n noise_std=0,\n white_back=True,\n # white_back=False,\n # obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n hit_test_only=hit_test_only,\n need_normal=need_normal,\n use_sphere_tracing=use_sphere_tracing,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n refine_edge_obj_ids=refine_edge_obj_ids,\n render_mask=render_mask,\n extra_dict=extra_chunk,\n render_kwargs=self.render_kwargs_neus,\n )\n\n for k, v in rendered_ray_chunks.items():\n if to_cpu:\n results[k] += [v.cpu()]\n else:\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results\n\n def render_full_scene(\n self,\n pose: np.ndarray,\n idx: int,\n h: int,\n w: int,\n write_idx_on_image=True,\n return_raw_image=False,\n render_mask=False,\n refine_edge=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=False,\n show_progress=False,\n refine_edge_obj_ids=[],\n fovx_deg=0,\n ):\n extra_dict = dict()\n extra_dict[\"compute_3d_mask\"] = False\n extra_dict[\"is_eval\"] = True\n\n rays_list = []\n object_id_list = []\n\n if fovx_deg > 0:\n focal = (w / 2) / np.tan((fovx_deg / 2) / (180 / np.pi))\n print(\"focal =\", focal)\n directions = get_ray_directions(h, w, focal).cuda() # (h, w, 3)\n else:\n directions = get_ray_directions_equirectangular(h, w).cuda() # (h, w, 3)\n\n for obj_id in self.active_instance_id:\n # get object location\n # Two: object to world pose\n if obj_id == 0: # 0 denotes background\n Two = np.eye(4)\n Two[:3, 3] = self.bg_scene_center\n else: # other objects\n Two = torch.eye(4).cuda()\n Two[:3, :3] = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n Two[:3, 3] = self.object_pose_dict[str(obj_id)][\"trans\"]\n Two = Two.detach().cpu().numpy()\n # pose: Twc\n # we need: Toc\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n\n Toc = np.linalg.inv(Two) @ Twc\n\n Toc = torch.from_numpy(Toc).float().cuda()[:3, :4]\n rays_o, rays_d = get_rays(directions, Toc)\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id)\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr or obj_id in self.virtual_instance_id:\n if not hasattr(self, \"hard_code_light_id\"):\n self.hard_coded_light_id = 0\n extra_dict[\"embedding_light_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n\n # optimize exposure\n if self.optimize_exposure and obj_id not in self.virtual_instance_id:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n with torch.cuda.amp.autocast(enabled=True):\n with torch.no_grad():\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n use_sphere_tracing=use_sphere_tracing,\n # use_sphere_tracing=True,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n render_mask=render_mask,\n show_progress=show_progress,\n **extra_dict,\n )\n img = results[f\"rgb_fine\"]\n img_pred = np.clip(img.view(h, w, 3).cpu().numpy(), 0, 1)\n img_pred_ = (img_pred * 255).astype(np.uint8)\n\n if return_raw_image:\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0]\n .cpu()\n .numpy()\n .round()\n .astype(np.uint16)\n )\n return img_pred_, img_mask\n return img_pred_ # raw image in [h, w, 3] np.uint8\n\n if write_idx_on_image:\n img_pred_ = cv2.putText(\n img_pred_,\n \"Iter: {:03d}\".format(idx),\n (20, 20),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7,\n (255, 0, 0),\n 2,\n )\n\n imageio.imwrite(\n os.path.join(self.output_path, f\"{idx:06d}.multi_obj.png\"), img_pred_\n )\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0].cpu().numpy().round().astype(np.uint16)\n )\n cv2.imwrite(os.path.join(self.output_path, f\"{idx:06d}.seg.png\"), img_mask)\n\n def set_initial_object_poses_from_scene_meta(self, add_noise=True):\n self.object_pose_dict = {}\n\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n if \"gt_T_wo\" in obj_meta_info:\n Two = obj_meta_info[\"gt_T_wo\"]\n else:\n print(\n f\"Cannot find object pose for obj_id = {obj_id}, use custom pose with minor offset.\"\n )\n Two = np.eye(4)\n from scipy.spatial.transform import Rotation as R\n\n rot_fix = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0]).reshape(3, 3)\n # TODO: update initial pose for real-world scenes\n # if obj_id == 31:\n # blender_xyz = np.array([-1.44, 1.18, 0.1])\n # blender_rot = R.from_quat([0.5, -0.5, 0.5, 0.5]).as_matrix()\n # elif obj_id == 32:\n # blender_xyz = np.array([0.76, 0.54, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n # elif obj_id == 33:\n # blender_xyz = np.array([-0.06, 1.01, -0.9])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 34:\n # blender_xyz = np.array([-0.05, 1.14, -0.15])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 35:\n # blender_xyz = np.array([-0.35, 1.1, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n\n # Two[:3, :3] = blender_rot @ rot_fix\n # Two[:3, :3] = rot_fix @ blender_rot\n # Two[:3, 3] = rot_fix @ blender_xyz\n\n # Two[1, 3] += 0.75\n # Two[2, 3] -= 0.7\n\n # add noise\n if add_noise:\n Two[:3, 3] += 0.1\n from scipy.spatial.transform import Rotation as R\n\n rot_noise = R.from_euler(\"z\", 20, degrees=True).as_matrix()\n Two[:3, :3] = Two[:3, :3] @ rot_noise\n Two = torch.from_numpy(Two).float().cuda()\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n if \"fix_object_pose\" not in self.optimize_option:\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_from_prediction(self, pred_json_path):\n print(\"Initial pose from\", pred_json_path)\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n pred_info = read_json(pred_json_path)\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.array(pred_info[str(obj_id)][\"Two\"])\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n\n if not \"fix_object_pose\" in self.optimize_option:\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_as_identity(self):\n print(\"Initial pose as identity.\")\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.eye(4)\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_sampling_mask_from_seg(\n self,\n seg_mask=None,\n seg_mask_path=None,\n add_noise_to_seg=0,\n convert_seg_mask_to_box_mask=False,\n ):\n if seg_mask_path is not None:\n print(\"Read segmentation from gt mask\")\n # read mask\n self.instance_mask = get_instance_mask(seg_mask_path, img_wh=self.img_wh)\n elif seg_mask is not None:\n self.instance_mask = seg_mask\n else:\n print(\"Warning: empty mask\")\n self.merged_mask = (\n np.ones((self.img_wh[1], self.img_wh[0])).reshape(-1).astype(bool)\n )\n return\n\n # merge active object masks\n merged_mask = np.zeros_like(self.instance_mask)\n for i_obj, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue # do not accumulate background obj_id\n instance_mask_obj = self.instance_mask == obj_id\n # use tightly fit bbox instead of segmentation mask\n if convert_seg_mask_to_box_mask:\n instance_mask_obj = seg_mask_to_box_mask(instance_mask_obj)\n merged_mask = np.logical_or(merged_mask, instance_mask_obj)\n\n # if add noise to gt segmentation\n if add_noise_to_seg != 0:\n is_dilate = add_noise_to_seg > 0\n add_noise_to_seg = abs(add_noise_to_seg)\n kernel = np.ones((add_noise_to_seg, add_noise_to_seg), np.uint8)\n if is_dilate:\n merged_mask = cv2.dilate(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n else:\n merged_mask = cv2.erode(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n cv2.imwrite(\n f\"{self.output_path}/merged_mask.png\", merged_mask.astype(np.uint8) * 255\n )\n self.merged_mask = merged_mask.reshape(-1)\n\n def get_type_of_instance(self, instance_id):\n for obj_info in self.scene_meta[\"objs\"]:\n if obj_info[\"id\"] == instance_id:\n return obj_info[\"classname\"]\n return \"unknown\"\n\n def generate_relation(\n self,\n obj_to_room_distance_th: float = 0.5,\n top_down_dist_th: float = 0.3,\n top_down_xy_close_factor: float = 0.8,\n ):\n \"\"\"\n Generate relationship : object-wall, object-floor, object-object\n \"\"\"\n print(\"Start to generate relation from initial poses and neural models...\")\n all_obj_info = {}\n for i, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue\n Rwo = rotation_6d_to_matrix(self.object_pose_dict[str(obj_id)][\"rot6d\"])\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n all_obj_info[str(obj_id)] = optimized_meta\n with torch.no_grad():\n generate_relation_for_all(\n room_optimizer=self,\n all_obj_info=all_obj_info,\n obj_to_room_distance_th=obj_to_room_distance_th,\n top_down_dist_th=top_down_dist_th,\n top_down_xy_close_factor=top_down_xy_close_factor,\n )\n # print(\"Relation:\\n\", self.relation_info)\n for k, v in self.relation_info.items():\n print(k, v)\n\n def optimize(self, input_rgb: torch.Tensor, pose=None):\n \"\"\"\n Inputs:\n input_rgb: torch.Tensor [h, w, 3] normalized in 0...1\n \"\"\"\n if pose is None:\n pose = np.array(self.scene_meta[\"camera\"][\"cam3d2world\"]).reshape(4, 4)\n # Original poses has rotation in form \"right down forward\", change to NDC \"right up back\"\n fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n pose[:3, :3] = pose[:3, :3] @ fix_rot\n\n # camera to world pose\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n Twc = torch.from_numpy(Twc).float().cuda()\n\n if \"keypoint_mask\" in self.optimize_option:\n # detect keypoint for interest region\n keypoint_mask = detect_keypoints(input_rgb.numpy(), circle_radius=5)\n self.merged_mask = np.logical_and(\n keypoint_mask, self.merged_mask.reshape(keypoint_mask.shape)\n )\n cv2.imwrite(\n f\"{self.output_path}/merged_mask_keypoint.png\",\n self.merged_mask.astype(np.uint8) * 255,\n )\n self.merged_mask = self.merged_mask.reshape(-1)\n\n input_rgb = input_rgb.view(-1, 3) # (H*W, 3) RGB\n\n directions = get_ray_directions_equirectangular(\n self.h, self.w\n ).cuda() # (h, w, 3)\n\n mse_loss = nn.MSELoss(reduction=\"none\")\n\n assert hasattr(\n self, \"params\"\n ), \"Please set initial pose params before optimization.\"\n optimizer = torch.optim.Adam(self.params)\n\n scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)\n perceptual_net = perceptual_model.VGG16_for_Perceptual().cuda()\n\n sample_prob = pano_sample_probability(self.h, self.w).reshape(-1)\n\n t = trange(self.N_optim_step, desc=\"Opt.\", leave=True)\n for i_step in t:\n if \"regenerate_relation_during_test\" in self.optimize_option:\n if i_step != 0 and i_step % 50 == 0:\n self.generate_relation()\n if self.adjust_lr_per_step > 0:\n adjust_learning_rate(\n self.lr,\n optimizer,\n i_step,\n base=0.5,\n adjust_lr_every=self.adjust_lr_per_step,\n )\n extra_dict = dict()\n rays_list = []\n object_id_list = []\n # sample according to batch size limitation\n select_ind = np.arange(self.merged_mask.shape[0])[self.merged_mask]\n if (\n \"perceptual_loss\" not in self.optimize_option\n ): # we only sample some points in this case\n # sample according to pano distribution\n select_sample_prob = sample_prob[self.merged_mask]\n select_sample_prob /= select_sample_prob.sum()\n # assert select_ind.shape[0] > self.optim_batch_size\n sample_size = min(select_ind.shape[0], self.optim_batch_size)\n select_ind = np.random.choice(\n select_ind,\n size=sample_size,\n replace=False,\n p=select_sample_prob,\n )\n\n # add some sampling on the background for bg light code\n if self.optimize_light_env:\n bg_sample_ratio = 0.2\n bg_sample_prob = sample_prob[~self.merged_mask]\n bg_sample_prob /= bg_sample_prob.sum()\n bg_sample_ind = np.arange(self.merged_mask.shape[0])[~self.merged_mask]\n # assert bg_sample_ind.shape[0] > self.optim_batch_size\n bg_sample_size = min(\n bg_sample_ind.shape[0], int(bg_sample_ratio * self.optim_batch_size)\n )\n if bg_sample_size > 0:\n bg_sample_ind = np.random.choice(\n bg_sample_ind,\n size=bg_sample_size,\n replace=False,\n p=bg_sample_prob,\n )\n select_ind = np.concatenate([select_ind, bg_sample_ind], axis=-1)\n\n select_ind = np.unique(select_ind)\n if i_step == 0:\n print(\"Actual optimization rays\", select_ind.shape[0])\n select_input_rgb = input_rgb[select_ind].float().cuda()\n\n loss_dict = {}\n all_obj_info = {} # prepare for violation loss\n\n for i, obj_id in enumerate(self.active_instance_id):\n # object to world pose\n if obj_id == 0:\n Rwo = torch.eye(3).cuda()\n two = torch.from_numpy(self.bg_scene_center).float().cuda()\n else:\n Rwo = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n\n # camera to object pose\n Toc = torch.eye(4).cuda()\n Toc[:3, :3] = Rwo.T @ Twc[:3, :3]\n Toc[:3, 3] = Rwo.T @ (Twc[:3, 3] - two)\n\n # generate object rays\n rays_o, rays_d = get_rays(directions, Toc[:3, :4])\n\n rays_o = rays_o[select_ind]\n rays_d = rays_d[select_ind]\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id\n )\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr:\n extra_dict[\n \"embedding_light_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # autoexposure\n if self.optimize_exposure:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n # we do not need to add relation constraints to bg\n if obj_id == 0:\n continue\n\n # enforce optimising on yaw\n if \"z_axis_align_loss\" in self.optimize_option:\n loss_dict[\"z_axis_loss_{}\".format(obj_id)] = (\n z_axis_loss(Rwo, 1.0) * 1e2\n )\n\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n obj_id_key = str(obj_id)\n\n if obj_id_key not in self.relation_info:\n continue\n\n # get obj_relation from input\n obj_relation = self.relation_info[obj_id_key]\n # supplement obj_type\n obj_type = self.get_type_of_instance(obj_id)\n optimized_meta[\"obj_type\"] = obj_type\n\n all_obj_info[str(obj_id)] = optimized_meta\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n \"\"\"attach wall loss\"\"\"\n if (\n \"object_room_wall_attach\" in self.optimize_option\n and obj_relation.get(\"attach_wall\", False)\n ):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n # \"face_direction\": torch.Tensor([0, 1, 0]),\n # \"face_direction\": obj_relation.get(\n # \"attach_wall_face_dir\", torch.Tensor([0, 1, 0])\n # ),\n \"face_direction\": obj_relation[\"attach_wall_face_dir\"],\n \"ray_grid_size\": 10,\n }\n # for door object, we slightly stretch the size to ensure successive hit-test\n if obj_type == \"door\" or obj_type == \"window\":\n kwargs.update(\n {\n \"ray_grid_stretch\": torch.Tensor([1.2, 1.2, 1]),\n \"use_bbox_surface_as_in_detect\": True,\n }\n )\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n \"\"\"attach floor loss\"\"\"\n if (\n \"object_room_floor_attach\" in self.optimize_option\n and obj_relation.get(\"attach_floor\", False)\n ):\n # # TODO(ybbbbt): hard code floor\n # loss_dict.update(\n # obj_attach_floor_loss(optimized_meta, floor=0.0)\n # )\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n \"face_direction\": torch.Tensor([0, 0, -1]),\n \"ray_grid_stretch\": torch.Tensor(\n [0.8, 0.8, 1.0]\n ), # avoid too close to wall\n \"use_bbox_surface_as_in_detect\": True,\n \"ray_grid_size\": 3,\n }\n if obj_type == \"door\":\n # kwargs[\"ray_grid_offset\"] = torch.Tensor(\n # [0, -0.3, 0]\n # ) # to avoid to close to wall\n assert (\n \"attach_wall_face_dir\" in obj_relation\n ), f\"door {obj_id} relation prediction failed.\"\n kwargs[\"ray_grid_offset\"] = (\n obj_relation[\"attach_wall_face_dir\"] * -0.3\n ) # to avoid to close to wall\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n # use_sphere_tracing=True,\n use_sphere_tracing=False,\n **extra_dict,\n )\n pred_rgb = results[\"rgb_fine\"]\n\n if \"photometric_loss\" in self.optimize_option:\n loss_dict[\"mse_loss\"] = mse_loss(pred_rgb, select_input_rgb).mean()\n\n if \"visualize_pred\" in self.optimize_option: # dump image for debug\n # pred_rgb_full = input_rgb.cuda()\n pred_rgb_full = torch.zeros_like(input_rgb.cuda())\n pred_rgb_full[select_ind] = pred_rgb\n\n imageio.imwrite(\n f\"debug/pred_rgb_full.png\",\n (pred_rgb_full * 255)\n .view(self.img_wh[1], self.img_wh[0], 3)\n .detach()\n .cpu()\n .numpy()\n .astype(np.uint8),\n )\n\n if \"perceptual_loss\" in self.optimize_option:\n pred_rgb_full = input_rgb.cuda()\n pred_rgb_full[select_ind] = pred_rgb\n loss_dict.update(\n patch_perceptual_loss(\n perceptual_net,\n pred_rgb_full,\n input_rgb,\n all_obj_info,\n self.instance_mask,\n self.img_wh,\n )\n )\n\n \"\"\"attach bottom to other object loss\"\"\"\n if \"object_object_attach\" in self.optimize_option:\n for obj_id_str, obj_relation in self.relation_info.items():\n if obj_relation.get(\"attach_bottom_to_object\", False):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info_src\": all_obj_info[obj_id_str],\n \"obj_info_tgt\": all_obj_info[\n str(obj_relation[\"attach_tgt_obj_id\"])\n ],\n \"face_direction\": torch.Tensor([0, 0, -1]),\n }\n loss_dict.update(object_object_attach_loss(**kwargs))\n\n # physical violation loss\n if \"physical_violation\" in self.optimize_option:\n if (\n not \"physical_violation_delayed_start\" in self.optimize_option\n or i_step >= 100\n ):\n loss_dict.update(\n physical_violation_loss(\n self,\n all_obj_info,\n N_nearest_obj=3,\n check_background_violation=True,\n # N_sample_points=1000,\n N_sample_points=2000,\n # N_sample_points=300,\n )\n )\n\n if \"viewing_constraint\" in self.optimize_option:\n loss_dict.update(viewing_constraint_loss(self, Twc, all_obj_info))\n\n if \"print_loss_dict\" in self.optimize_option:\n for k, v in loss_dict.items():\n # if \"_62\" not in k:\n # continue\n print(k, \"=\", float(v))\n loss = sum(list(loss_dict.values()))\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n\n t.set_description(\"Loss: %f\" % float(loss))\n t.refresh()\n # dump image\n if i_step % 20 == 0:\n self.save_optimizable_parameters(\n f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n )\n # self.load_optimizable_parameters(\n # f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n # )\n if i_step >= self.N_optim_step - 20:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n write_idx_on_image=False,\n render_mask=True,\n h=512,\n w=1280,\n )\n else:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n render_mask=False,\n h=self.h,\n w=self.w,\n )\n dump_optimization_meta_to_file(\n filepath=f\"{self.output_path}/{i_step:06d}.optim.json\",\n obj_pose_dict=self.object_pose_dict,\n )" }, { "identifier": "read_real_scene_localization", "path": "optim/misc_utils.py", "snippet": "def read_real_scene_localization(pose_path: str, transform_info_json_path: str):\n pose_dict = {}\n transform_info = read_json(transform_info_json_path)\n trans_colmap_to_arkit = np.array(transform_info[\"transform_colmap_to_arkit_sRT\"])\n trans_align = np.array(transform_info[\"transform_alignment\"])\n with open(pose_path) as file:\n lines = file.readlines()\n lines = lines[1:]\n for line in lines:\n fname, tx, ty, tz, qx, qy, qz, qw, _, _ = line.strip().split(\" \")\n fname += \".png\"\n pose = np.eye(4)\n pose[0, 3] = tx\n pose[1, 3] = ty\n pose[2, 3] = tz\n # Twc\n pose[:3, :3] = Rotation.from_quat([qx, qy, qz, qw]).as_matrix()\n # pose = np.linalg.inv(pose)\n # pose_ndc = np.linalg.inv(pose_ndc)\n\n # convert to ndc\n # pose_ndc = pose\n # fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n # pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot\n\n # transform to arkit pose\n s, R, t = decompose_to_sRT(trans_colmap_to_arkit)\n # pose_ndc = transform_colmap_to_arkit @ pose_ndc\n # print(s, R, t)\n pose[:3, 3] = R @ (pose[:3, 3] * s) + t\n pose[:3, :3] = R @ pose[:3, :3]\n\n # apply alignment to poses\n pose = trans_align @ pose\n\n pose_dict[fname] = {\"pose_slam_Twc\": pose}\n # print(fname, pose)\n return pose_dict" }, { "identifier": "read_testing_config", "path": "optim/misc_utils.py", "snippet": "def read_testing_config():\n conf_cli = OmegaConf.from_cli()\n conf_test_file = OmegaConf.load(conf_cli.config)\n # read dataset config\n conf_test_file[\"dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"dataset_config_path\"]\n )\n conf_test_file[\"bg_dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"bg_dataset_config_path\"]\n )\n\n # processing ckpt\n ckpt_path_dict = {}\n for item in conf_test_file[\"ckpt_lists\"]:\n path = item[\"path\"]\n obj_ids = item[\"obj_ids\"]\n neus_conf = item.get(\"neus_conf\", \"config/neus.yaml\")\n for obj_id in obj_ids:\n ckpt_path_dict[str(obj_id)] = {\"path\": path, \"neus_conf\": neus_conf}\n conf_test_file[\"ckpt_path_dict\"] = ckpt_path_dict\n\n conf_merged = OmegaConf.merge(conf_test_file, conf_cli)\n return conf_merged" } ]
import sys import os import torch import numpy as np import imageio import time import cv2 from tqdm import tqdm from argparse import ArgumentParser from utils.util import read_json, read_yaml from optim.room_optimizer import RoomOptimizer from optim.misc_utils import read_real_scene_localization, read_testing_config from scipy.spatial.transform import Rotation
14,238
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0] for obj_info in read_json(scene_info_json_path)["objs"]: active_instance_id += [obj_info["id"]] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] # intialize room optimizer
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0] for obj_info in read_json(scene_info_json_path)["objs"]: active_instance_id += [obj_info["id"]] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] # intialize room optimizer
room_optimizer = RoomOptimizer(
2
2023-10-15 08:41:29+00:00
16k
WenzhengZhang/Seq2seqCoref
trainer.py
[ { "identifier": "CorefAllMetrics", "path": "metrics.py", "snippet": "class CorefAllMetrics(object):\n \"\"\"\n Wrapper for coreference resolution metrics.\n \"\"\"\n\n @staticmethod\n def _get_mention_to_x(clusters: List[list]) -> dict:\n mention_to_x = {}\n for cluster in clusters:\n for m in cluster:\n mention_to_x[m] = tuple(cluster)\n return mention_to_x\n\n def _compute_mention_detect_metrics(self, gold_clusters: List[list],\n predicted_clusters: List[list]):\n # mention detection evaluation\n mention_evaluator = MentionEvaluator()\n results = {}\n predicted_mentions = list(self._get_mention_to_x(\n predicted_clusters).keys())\n gold_mentions = list(self._get_mention_to_x(gold_clusters).keys())\n mention_evaluator.update(predicted_mentions, gold_mentions)\n mention_precision, mention_recall, mention_f1 = \\\n mention_evaluator.get_prf()\n results['precision'] = mention_precision\n results['recall'] = mention_recall\n results['f1'] = mention_f1\n return results\n\n def _compute_coref_metrics(self, gold_clusters: List[list],\n predicted_clusters: List[list]) \\\n -> Dict[str, Dict[str, float]]:\n \"\"\"\n Compute all coreference metrics given a list of gold cluster and a list of predicted clusters.\n \"\"\"\n mention_to_predicted = self._get_mention_to_x(predicted_clusters)\n mention_to_gold = self._get_mention_to_x(gold_clusters)\n result = {}\n metric_name_evals = [('muc', Evaluator(muc)),\n ('b_cubed', Evaluator(b_cubed)),\n ('ceaf', Evaluator(ceafe))]\n\n for name, evaluator in metric_name_evals:\n evaluator.update(predicted_clusters, gold_clusters,\n mention_to_predicted, mention_to_gold)\n result[name] = {\n 'precision': evaluator.get_precision(),\n 'recall': evaluator.get_recall(),\n 'f1': evaluator.get_f1()\n }\n\n result['average'] = {\n 'precision': sum(\n [result[k]['precision'] for k, _ in metric_name_evals]) / len(\n metric_name_evals),\n 'recall': sum(\n [result[k]['recall'] for k, _ in metric_name_evals]) / len(\n metric_name_evals),\n 'f1': sum([result[k]['f1'] for k, _ in metric_name_evals]) / len(\n metric_name_evals)\n }\n\n return result\n\n @staticmethod\n def _average_nested_dict(\n list_nested_dict: List[Dict[str, Dict[str, float]]]) -> Dict[\n str, Dict[str, float]]:\n \"\"\"\n Given a list of 2-level nested dict, compute the average.\n \"\"\"\n result_dict = {}\n\n # sum up all values\n for outer_dict in list_nested_dict:\n for key_outer, value_outer in outer_dict.items():\n if key_outer not in result_dict:\n result_dict[key_outer] = {}\n for key_inner, value_inner in value_outer.items():\n result_dict[key_outer][key_inner] = result_dict[\n key_outer].get(\n key_inner, 0.0) + value_inner\n\n # take the average\n for key_outer, value_outer in result_dict.items():\n for key_inner, value_inner in value_outer.items():\n result_dict[key_outer][key_inner] = result_dict[key_outer][\n key_inner] / len(\n list_nested_dict)\n\n return result_dict\n\n def get_all_metrics(self, labels: List[List[List[Tuple[int, int]]]],\n preds: List[List[List[Tuple[int, int]]]]) \\\n -> Dict[str, Dict[str, Dict[str, float]]]:\n \"\"\"\n Compute all metrics for coreference resolution.\n In input are given two list of mention groups, for example:\n [ # this is the corpus level, with a list of documents\n [ # this is the document level, with a list of mention clusters\n [ # this is the cluster level, with a list of spans\n (5, 7),\n (11, 19),\n ...\n ],\n ...\n ]\n ]\n \"\"\"\n assert len(labels) == len(preds)\n result = {}\n\n # compute micro-averaged scores (treat all clusters from all docs as a single list of clusters)\n gold_clusters = [\n [(i,) + span for span in cluster] for i, clusters in\n enumerate(labels) for cluster in clusters\n ]\n predicted_clusters = [\n [(i,) + span for span in cluster] for i, clusters in\n enumerate(preds) for cluster in clusters\n ]\n coref_ment_results = self._compute_coref_metrics(gold_clusters,\n predicted_clusters)\n ment_results = self._compute_mention_detect_metrics(gold_clusters,\n predicted_clusters)\n coref_ment_results['mention_detect'] = ment_results\n result['micro'] = coref_ment_results\n\n # compute macro-averaged scores (compute p/r/f1 for each doc first, then take average per doc)\n doc_metrics = []\n for gold_clusters, predicted_clusters in zip(labels, preds):\n doc_metrics.append(self._compute_coref_metrics(\n gold_clusters, predicted_clusters\n ))\n result['macro'] = self._average_nested_dict(doc_metrics)\n\n return result" }, { "identifier": "get_document_predicts", "path": "data.py", "snippet": "def get_document_predicts(doc_preds: List[List]) -> List[\n List[Tuple[int, int]]]:\n \"\"\"\n Aggregate predictions for each chunk into document-level predictions.\n \"\"\"\n if len(doc_preds) == 0:\n return []\n graph = nx.compose_all([nx.complete_graph(p) for p in doc_preds])\n\n processed_groups = []\n for component in nx.connected_components(graph):\n processed_group = []\n for start, end in sorted(component, key=lambda x: (x[0], -x[1])):\n # add this entity if it does not overlap with the previous one\n condition = not any(\n [s < start < e < end for (s, e) in processed_group])\n # if len(processed_group) == 0 or start >= processed_group[-1][1]:\n # processed_group.append((start, end))\n if len(processed_group) == 0 or condition:\n processed_group.append((start, end))\n\n processed_groups.append(processed_group)\n\n return [[(start, end) for start, end in group] for group in\n processed_groups]" }, { "identifier": "parse_int_output_tokens", "path": "data.py", "snippet": "def parse_int_output_tokens(input_ids, output_ids,\n special_ids, subtoken_map, tokenizer,\n thred, is_tagging):\n rec_ids, new_id = [], -1\n ment_start_stack = []\n unmatched_clusters = defaultdict(list)\n new_output_ids = []\n if is_tagging:\n new_input_ids = [special_ids['copy'] for t in input_ids if\n t != tokenizer.pad_token_id and t != special_ids[\n 'eos']]\n new_input_ids.append(special_ids['eos'])\n else:\n new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]\n token_mentions = []\n for i in range(len(output_ids)):\n if output_ids[i] == tokenizer.pad_token_id:\n break\n if output_ids[i] == special_ids['mention_start']:\n new_id += 1\n ment_start_stack.append([new_id, 'name', []])\n if is_tagging:\n new_output_ids.append(output_ids[i])\n elif output_ids[i] == special_ids['mention_end']:\n new_id += 0\n if is_tagging:\n new_output_ids.append(output_ids[i])\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n if item[1] == \"ent\":\n unmatched_clusters[tuple(item[-1])].append(\n (item[0], new_id))\n else:\n # a normal token\n # if output_ids[i] == special_ids['sep']:\n # status = \"ent\"\n if len(ment_start_stack) > 0:\n # inside some entities\n if output_ids[i] == special_ids['sep']:\n ment_start_stack[-1][1] = \"ent\"\n if is_tagging:\n new_output_ids.append(output_ids[i])\n else:\n if ment_start_stack[-1][1] == 'ent':\n ment_start_stack[-1][2].append(output_ids[i])\n if is_tagging:\n new_output_ids.append(output_ids[i])\n elif ment_start_stack[-1][1] == 'name':\n new_id += 1\n rec_ids.append(output_ids[i])\n if is_tagging:\n new_output_ids.append(input_ids[new_id])\n else:\n raise ValueError('wrong status')\n else:\n # outside\n new_id += 1\n rec_ids.append(output_ids[i])\n if is_tagging:\n new_output_ids.append(input_ids[new_id])\n if output_ids[i] == special_ids['mention_start']:\n new_id -= 1\n # thred = 1 if allow_singletons else 2\n # Needleman-Wunsch text alignment algorithm\n wrong_reconstruction = (rec_ids != new_input_ids)\n if wrong_reconstruction:\n print(f'new input ids {new_input_ids}')\n print(f'reconstructed ids {rec_ids}')\n print(f'out ids {output_ids}')\n print('wrong reconstruction! please debug')\n matching = global_align(new_input_ids, rec_ids)\n\n # update predicted entities with the positions in the original sentence\n clusters = defaultdict(list)\n\n for ent_id, ments in unmatched_clusters.items():\n for start, end in ments:\n new_start = None # start in the original sequence\n new_end = None # end in the original sequence\n\n for j in range(start, end + 1):\n if j in matching:\n if new_start is None:\n new_start = matching[j]\n\n new_end = matching[j]\n\n if new_start is not None:\n # predict entity\n clusters[ent_id].append((\n subtoken_map[new_start], subtoken_map[new_end]))\n token_mentions.append((new_start, new_end))\n predict_clusters = [list(set(v)) for k, v in clusters.items() if\n len(set(v)) >= thred]\n token_mentions = list(set(token_mentions))\n else:\n clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v\n in\n unmatched_clusters.values()]\n predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=\n thred]\n token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()\n for m in v]\n token_mentions = list(set(token_mentions))\n if not is_tagging:\n new_output_ids = output_ids\n return predict_clusters, token_mentions, new_output_ids" }, { "identifier": "parse_short_target_tokens", "path": "data.py", "snippet": "def parse_short_target_tokens(input_ids, output_ids,\n special_ids, subtoken_map, tokenizer,\n align_mode, thred, split_sentence):\n # support mark sentence, align sentence by sentence\n rec_ids, new_id = [], -1\n ment_start_stack = []\n unmatched_clusters = defaultdict(list)\n new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]\n for i in range(len(output_ids)):\n if output_ids[i] == tokenizer.pad_token_id:\n break\n if output_ids[i] == special_ids['mention_start']:\n ment_start_stack.append([new_id + 1, 'name', []])\n elif output_ids[i] == special_ids['mention_end']:\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n if item[1] == \"ent\":\n unmatched_clusters[tuple(item[-1])].append(\n (item[0], new_id))\n else:\n # a normal token\n if len(ment_start_stack) > 0:\n # inside some entities\n if output_ids[i] == special_ids['sep']:\n ment_start_stack[-1][1] = \"ent\"\n else:\n if ment_start_stack[-1][1] == 'ent':\n ment_start_stack[-1][2].append(output_ids[i])\n elif ment_start_stack[-1][1] == 'name':\n new_id += 1\n rec_ids.append(output_ids[i])\n else:\n raise ValueError('wrong status')\n\n else:\n # outside\n new_id += 1\n rec_ids.append(output_ids[i])\n # mapping.append(new_id)\n # thred = 1 if allow_singletons else 2\n # Affine global text alignment algorithm\n if split_sentence:\n input_sents = split_list(\n new_input_ids, special_ids['sentence_start'], True)\n out_sents = split_list(rec_ids, special_ids['sentence_start'], True)\n try:\n assert len(input_sents) == len(out_sents)\n aligned_input_ids, aligned_rec_ids, matching = [], [], {}\n input_offset, out_offset = 0, 0\n for input_sent, out_sent in zip(input_sents, out_sents):\n aligned_input_sent, aligned_out_sent, sent_match = \\\n affine_global_align(input_sent, out_sent,\n special_ids['copy'],\n align_mode)\n aligned_input_ids.extend(aligned_input_sent)\n aligned_rec_ids.extend(aligned_out_sent)\n matching.update(\n {k + out_offset: v + input_offset for k, v in\n sent_match.items()})\n input_offset += len(input_sent)\n out_offset += len(out_sent)\n except AssertionError:\n print(f'input sents and out sents different length '\n f'{len(input_sents)} vs {len(out_sents)}, have to use '\n f'global alignment')\n aligned_input_ids, aligned_rec_ids, matching = affine_global_align(\n new_input_ids, rec_ids, special_ids['copy'], align_mode)\n else:\n aligned_input_ids, aligned_rec_ids, matching = affine_global_align(\n new_input_ids, rec_ids, special_ids['copy'], align_mode)\n # update predicted entities with the positions in the original sentence\n clusters = defaultdict(list)\n\n for ent_id, ments in unmatched_clusters.items():\n for start, end in ments:\n new_start = None # start in the original sequence\n new_end = None # end in the original sequence\n\n for j in range(start, end + 1):\n if j in matching:\n if new_start is None:\n new_start = matching[j]\n\n new_end = matching[j]\n\n if new_start is not None:\n # predict entity\n clusters[ent_id].append((\n subtoken_map[new_start], subtoken_map[new_end]))\n predict_clusters = [list(set(v)) for k, v in clusters.items() if\n len(set(v)) >= thred]\n return predict_clusters, aligned_input_ids, aligned_rec_ids" }, { "identifier": "parse_nonint_output_tokens", "path": "data.py", "snippet": "def parse_nonint_output_tokens(input_ids, output_ids,\n special_ids, subtoken_map,\n tokenizer,\n add_mention_end,\n thred):\n rec_ids, new_id = [], -1\n ment_start_stack = []\n unmatched_clusters = defaultdict(list)\n new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]\n token_mentions = []\n for i in range(len(output_ids)):\n if output_ids[i] == tokenizer.pad_token_id:\n break\n if output_ids[i] == special_ids['mention_start']:\n new_id += 1\n ment_start_stack.append(new_id)\n elif add_mention_end and output_ids[i] == special_ids['mention_end']:\n assert output_ids[i + 1] in special_ids['cluster_ids_to_num']\n cid = special_ids['cluster_ids_to_num'][output_ids[i + 1]]\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n unmatched_clusters[cid].append((item, new_id))\n elif output_ids[i] in special_ids['cluster_ids_to_num']:\n if not add_mention_end:\n cid = special_ids['cluster_ids_to_num'][output_ids[i]]\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n unmatched_clusters[cid].append((item, new_id))\n else:\n new_id += 1\n rec_ids.append(output_ids[i])\n if output_ids[i] == special_ids['mention_start']:\n new_id -= 1\n # Needleman-Wunsch text alignment algorithm\n wrong_reconstruction = (rec_ids != new_input_ids)\n # thred = 1 if allow_singletons else 2\n if wrong_reconstruction:\n print(f'new input ids {new_input_ids}')\n print(f'reconstructed ids {rec_ids}')\n print(f'out ids {output_ids}')\n print('wrong reconstruction! please debug')\n matching = global_align(new_input_ids, rec_ids)\n\n # update predicted entities with the positions in the original sentence\n clusters = defaultdict(list)\n\n for ent_id, ments in unmatched_clusters.items():\n for start, end in ments:\n new_start = None # start in the original sequence\n new_end = None # end in the original sequence\n\n for j in range(start, end + 1):\n if j in matching:\n if new_start is None:\n new_start = matching[j]\n\n new_end = matching[j]\n\n if new_start is not None:\n # predict entity\n clusters[ent_id].append((\n subtoken_map[new_start], subtoken_map[new_end]))\n token_mentions.append((new_start, new_end))\n predict_clusters = [list(set(v)) for k, v in clusters.items() if\n len(set(v)) >= thred]\n token_mentions = list(set(token_mentions))\n else:\n clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v\n in\n unmatched_clusters.values()]\n predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=\n thred]\n token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()\n for m in v]\n token_mentions = list(set(token_mentions))\n return predict_clusters, token_mentions, output_ids" }, { "identifier": "SPECIAL_IDS", "path": "constants.py", "snippet": "SPECIAL_IDS = {\n 'speaker_start': int_tokenizer.encode(SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end': int_tokenizer.encode(SPEAKER_END, add_special_tokens=False)[\n 0],\n 'mention_start': int_tokenizer.encode(MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': int_tokenizer.encode(MENTION_END, add_special_tokens=False)[\n 0],\n 'sep': int_tokenizer.encode(SEP_TOKEN, add_special_tokens=False)[0],\n 'copy': int_tokenizer.encode(COPY, add_special_tokens=False)[0],\n 'eos': int_tokenizer.eos_token_id\n}" }, { "identifier": "MARK_SPECIAL_IDS", "path": "constants.py", "snippet": "MARK_SPECIAL_IDS = deepcopy(SPECIAL_IDS)" }, { "identifier": "NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "NON_INT_SPECIAL_IDS = {\n 'speaker_start': non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'cluster_ids': MENTION_ENDS_IDS,\n 'cluster_ids_to_num': END_IDS_TO_NUM,\n 'cluster_new': non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': non_int_tokenizer.eos_token_id\n}" }, { "identifier": "MENTION_END_NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "MENTION_END_NON_INT_SPECIAL_IDS = {\n 'speaker_start': mention_end_non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n mention_end_non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': mention_end_non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': mention_end_non_int_tokenizer.encode(\n MENTION_END,\n add_special_tokens=False)[0],\n 'cluster_ids': CLUSTER_IDS,\n 'cluster_ids_to_num': CLUSTER_IDS_TO_NUM,\n 'cluster_new': mention_end_non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': mention_end_non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': mention_end_non_int_tokenizer.eos_token_id\n}" }, { "identifier": "ShortSeqProcessor", "path": "logits_processor.py", "snippet": "class ShortSeqProcessor(LogitsProcessor):\n\n def __init__(self, orig_inputs, special_ids):\n self.orig_inputs = orig_inputs\n self.sentence_start = special_ids['sentence_start']\n self.sentence_end = special_ids['sentence_end']\n self.mention_start = special_ids['mention_start']\n self.mention_end = special_ids['mention_end']\n self.sep = special_ids['sep']\n self.ent_ids = special_ids['integers'] + [special_ids['mention_end']]\n self.eos_id = special_ids['eos']\n self.sentence_mask = self.get_sentence_mask(orig_inputs)\n\n def get_sentence_mask(self, orig_inputs: torch.Tensor):\n # index from 1 instead of 0\n return (orig_inputs == self.sentence_start).cumsum(-1)\n\n def __call__(self, input_ids: torch.LongTensor,\n scores: torch.FloatTensor) -> torch.FloatTensor:\n is_sent_start = (input_ids == self.sentence_start)\n is_sent_end = (input_ids == self.sentence_end)\n sent_idx = is_sent_start.sum(-1, keepdim=True)\n unclose_sent = (sent_idx.sum(-1) - is_sent_end.sum(-1)) > 0\n close_sent = (~unclose_sent)\n is_sep = (input_ids == self.sep)\n is_end = (input_ids == self.mention_end)\n is_start = (input_ids == self.mention_start)\n is_ent = (is_sep.cumsum(-1) - is_end.cumsum(-1)).bool()\n unclose_ent = (is_ent[:, -1] & unclose_sent)\n unclose_ment = (is_start.sum(-1) - is_sep.sum(-1)) > 0\n close_ent = (~unclose_ent)\n unclose_ment = (close_ent & unclose_ment & unclose_sent)\n masks = torch.ones_like(scores, dtype=torch.bool)\n masks[unclose_sent, self.sentence_end] = False\n masks[close_sent, self.sentence_start] = False\n assert scores.size(0) % self.orig_inputs.size(0) == 0\n num_beams = scores.size(0) // self.orig_inputs.size(0)\n # repeat over beams\n orig_ids = self.orig_inputs.repeat_interleave(num_beams, 0)\n sent_mask = self.sentence_mask.repeat_interleave(num_beams, 0)\n cur_sent_mask = (sent_mask != sent_idx)\n sent_ids = orig_ids.masked_fill(cur_sent_mask, self.sentence_end)\n masks[unclose_sent] = masks[unclose_sent].scatter(1, sent_ids[\n unclose_sent], False)\n masks[unclose_sent, self.sentence_start] = True\n masks[unclose_ent, torch.tensor(self.ent_ids).unsqueeze(1)] = False\n masks[close_ent, self.mention_start] = False\n masks[unclose_ment, self.sep] = False\n is_eos = (close_sent & (sent_idx.sum(-1) == sent_mask[:, -1]))\n masks[is_eos] = True\n masks[is_eos, self.eos_id] = False\n scores.masked_fill_(masks, -float('inf'))\n return scores" }, { "identifier": "IntProcessor", "path": "logits_processor.py", "snippet": "class IntProcessor(LogitsProcessor):\n\n def __init__(self, orig_inputs, special_ids, seq2seq_type):\n \"\"\"\n\n :param orig_inputs: original input_ids\n :param special_ids: dict with keys:[mention_start, mention_end, sep,\n integers]\n \"\"\"\n self.orig_inputs = orig_inputs\n self.seq2seq_type = seq2seq_type\n self.special_ids = special_ids\n self.mention_start = special_ids['mention_start']\n self.mention_end = special_ids['mention_end']\n self.sep = special_ids['sep']\n self.ent_ids = special_ids['integers'] + [special_ids['mention_end']]\n self.specials = [self.mention_start, self.sep] + self.ent_ids\n if self.seq2seq_type == 'action' or self.seq2seq_type == 'tagging' or \\\n self.seq2seq_type == 'input_feed':\n self.copy_id = special_ids['copy']\n self.specials.append(self.copy_id)\n self.eos_id = special_ids['eos']\n\n def __call__(self, input_ids: torch.LongTensor,\n scores: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n\n :param input_ids: BC x l\n :param scores: BC x V\n :return:\n \"\"\"\n # input_ids : B x L\n is_sep = (input_ids == self.sep)\n is_end = (input_ids == self.mention_end)\n is_start = (input_ids == self.mention_start)\n is_ent = (is_sep.cumsum(-1) - is_end.cumsum(-1)).bool()\n is_copy = ((~is_start) & (~is_ent) & (~is_end))\n unclose_ent = is_ent[:, -1]\n unclose_ment = (is_start.sum(-1) - is_sep.sum(-1)) > 0\n unclose_ment = ((~unclose_ent) & unclose_ment)\n # -1 for <pad> at begining\n num_copied = is_copy.sum(-1) - 1\n masks = torch.ones_like(scores, dtype=torch.bool)\n close_ent = (~unclose_ent)\n num_copied = num_copied.clamp(max=self.orig_inputs.size(1) - 1)\n # unclosed ent only allows to generate cluster ids or end mention id\n masks[unclose_ent, torch.tensor(self.ent_ids).unsqueeze(1)] = False\n masks[close_ent, self.mention_start] = False\n masks[unclose_ment, self.sep] = False\n # get next copy id\n assert scores.size(0) % self.orig_inputs.size(0) == 0\n num_beams = scores.size(0) // self.orig_inputs.size(0)\n # repeat over beams\n orig_ids = self.orig_inputs.repeat_interleave(num_beams, 0)\n next_ids = orig_ids[torch.arange(scores.size(0)), num_copied]\n if self.seq2seq_type == 'tagging':\n masks[close_ent, self.copy_id] = False\n else:\n if self.seq2seq_type == 'action' or self.seq2seq_type == \\\n 'input_feed':\n scores[close_ent, next_ids[close_ent]] = scores[close_ent,\n self.copy_id]\n masks[close_ent, next_ids[close_ent]] = False\n is_eos = (close_ent & (next_ids == self.eos_id))\n masks[is_eos, torch.tensor(self.specials).unsqueeze(1)] = True\n masks[is_eos, self.eos_id] = False\n scores.masked_fill_(masks, -float('inf'))\n return scores" }, { "identifier": "NonIntProcessor", "path": "logits_processor.py", "snippet": "class NonIntProcessor(LogitsProcessor):\n\n def __init__(self, orig_inputs, special_ids,\n seq2seq_type,\n add_mention_end):\n \"\"\"\n\n :param orig_inputs: original input_ids\n :param special_ids: dict with keys:[mention_start, mention_end, sep,\n integers]\n :param add_mention_end: whether predict mention end before predict\n cluster ids\n \"\"\"\n self.orig_inputs = orig_inputs\n self.special_ids = special_ids\n self.seq2seq_type = seq2seq_type\n self.mention_start = special_ids['mention_start']\n if add_mention_end:\n self.mention_end = special_ids['mention_end']\n else:\n self.mention_end = None\n self.cluster_ids = torch.tensor(special_ids['cluster_ids'],\n dtype=torch.long)\n self.cluster_new = special_ids['cluster_new']\n self.copy_id = special_ids['copy']\n self.eos_id = special_ids['eos']\n self.first_cluster_id = special_ids['cluster_ids'][0]\n self.last_cluster_id = special_ids['cluster_ids'][-1]\n self.add_mention_end = add_mention_end\n\n def __call__(self, input_ids: torch.LongTensor,\n scores: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n\n :param input_ids: BC x l\n :param scores: BC x V\n :return:\n \"\"\"\n # input_ids : B x L\n cluster_ids = self.cluster_ids.to(input_ids.device)\n range_indices = torch.arange(scores.size(0))\n is_not_cid = torch.isin(input_ids, cluster_ids, invert=True)\n is_not_start = (input_ids != self.mention_start)\n if self.add_mention_end:\n is_not_end = (input_ids != self.mention_end)\n unclosed_ent = (input_ids[:, -1] == self.mention_end)\n close_ent = (~unclosed_ent)\n is_copy = (is_not_start & is_not_end & is_not_cid)\n else:\n is_not_end = is_not_cid\n is_copy = (is_not_start & is_not_end)\n unclosed_ment = (is_not_start.sum(-1) - is_not_end.sum(-1)) < 0\n if self.add_mention_end:\n unclosed_ment = (close_ent & unclosed_ment)\n # -1 for <pad> at begining\n num_copied = is_copy.sum(-1) - 1\n masks = torch.ones_like(scores, dtype=torch.bool)\n num_copied = num_copied.clamp(max=self.orig_inputs.size(1) - 1)\n # unclosed ent only allows to generate cluster ids or end mention id\n # masks[:, self.specials] = False\n if self.add_mention_end:\n masks[close_ent, self.mention_start] = False\n masks[unclosed_ment, self.mention_end] = False\n else:\n masks[:, self.mention_start] = False\n # notice: make sure </mk> and </mk+1> are next to each other in vocab\n cluster_input_ids = input_ids.masked_fill(\n is_not_cid,\n self.first_cluster_id - 1)\n next_cids = cluster_input_ids.amax(-1) + 1\n if self.add_mention_end:\n has_prev_ends = (unclosed_ent & (next_cids > self.first_cluster_id))\n masks[unclosed_ent, next_cids[unclosed_ent]] = False\n else:\n has_prev_ends = (unclosed_ment & (next_cids >\n self.first_cluster_id))\n masks[unclosed_ment, next_cids[unclosed_ment]] = False\n\n masks[has_prev_ends] = masks[has_prev_ends].scatter(\n 1, cluster_input_ids[has_prev_ends], False)\n masks[has_prev_ends, self.first_cluster_id - 1] = True\n # get next copy id\n assert scores.size(0) % self.orig_inputs.size(0) == 0\n num_beams = scores.size(0) // self.orig_inputs.size(0)\n # repeat over beams\n orig_ids = self.orig_inputs.repeat_interleave(num_beams, 0)\n next_ids = orig_ids[range_indices, num_copied]\n if self.add_mention_end:\n if self.seq2seq_type == 'action' or self.seq2seq_type == \\\n 'input_feed':\n scores[close_ent, next_ids[close_ent]] = scores[close_ent,\n self.copy_id]\n scores[unclosed_ent, next_cids[unclosed_ent]] = scores[\n unclosed_ent, self.cluster_new]\n masks[close_ent, next_ids[close_ent]] = False\n else:\n if self.seq2seq_type == 'action' or self.seq2seq_type == \\\n 'input_feed':\n scores[range_indices, next_ids] = scores[:, self.copy_id]\n scores[unclosed_ment, next_cids[unclosed_ment]] = scores[\n unclosed_ment,\n self.cluster_new]\n masks[range_indices, next_ids] = False\n is_eos = (next_ids == self.eos_id)\n masks[is_eos] = True\n masks[is_eos, self.eos_id] = False\n scores.masked_fill_(masks, -float('inf'))\n return scores" } ]
import time import torch.distributed as dist import sys import numpy as np import os import json import re import torch.nn as nn import torch import shutil import math import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met import torch_xla.distributed.parallel_loader as pl import smdistributed.modelparallel.torch as smp import safetensors.torch from tqdm.auto import tqdm from transformers.trainer_utils import HPSearchBackend, speed_metrics, \ TrainOutput from pathlib import Path from torch.utils.data import RandomSampler from torch.utils.data.distributed import DistributedSampler from transformers.trainer_callback import TrainerState from transformers.trainer import TRAINER_STATE_NAME, OptimizerNames from transformers.utils import is_apex_available from transformers.integrations import hp_params from transformers import Seq2SeqTrainer from packaging import version from collections import defaultdict from metrics import CorefAllMetrics from typing import Dict, Union, Any, Optional, Tuple, List from transformers.debug_utils import DebugOption, DebugUnderflowOverflow from transformers.pytorch_utils import is_torch_less_than_1_11 from torch.utils.data import DataLoader from transformers.trainer_utils import EvalLoopOutput, has_length, \ denumpify_detensorize, ShardedDDPOption from data import get_document_predicts, parse_int_output_tokens, \ parse_short_target_tokens, parse_nonint_output_tokens from constants import SPECIAL_IDS, MARK_SPECIAL_IDS, NON_INT_SPECIAL_IDS, \ MENTION_END_NON_INT_SPECIAL_IDS from transformers.deepspeed import deepspeed_init from transformers.trainer_pt_utils import find_batch_size, nested_concat, \ nested_numpify, IterableDatasetShard, nested_truncate, get_parameter_names from transformers.modeling_utils import PreTrainedModel, unwrap_model, \ load_sharded_checkpoint from transformers.utils import logging, is_torch_tpu_available, \ is_sagemaker_mp_enabled, is_safetensors_available, SAFE_WEIGHTS_NAME, \ WEIGHTS_NAME, WEIGHTS_INDEX_NAME from transformers.integrations import is_fairscale_available from transformers.dependency_versions_check import dep_version_check from smdistributed.modelparallel import __version__ as SMP_VERSION from apex import amp from transformers import LogitsProcessorList from logits_processor import ShortSeqProcessor, IntProcessor, NonIntProcessor from transformers.trainer_seq2seq import is_deepspeed_zero3_enabled
10,939
logger.info( "\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: # Wait for everyone to get here so we are sur the model has been saved by process 0. if is_torch_tpu_available(): xm.rendezvous("load_best_model_at_end") elif args.local_rank != -1: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() # add remaining tr_loss self._total_loss_scalar += tr_loss.item() train_loss = self._total_loss_scalar / self.state.global_step metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint. if self.state.best_model_checkpoint is not None and \ self.args.save_total_limit == 1 and self.is_world_process_zero(): for checkpoint in checkpoints_sorted: if checkpoint != self.state.best_model_checkpoint: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) return TrainOutput(self.state.global_step, train_loss, metrics) def my_compute_metrics(self, doc_labels: Dict[str, List[List]], predicts: Any, samples: List, split: str, id_to_name: Dict = None ) -> Dict: if self.args.joint_train: data_names = self.args.joint_data_names.split(',') joint_threds = [ int(t) for t in self.args.joint_min_num_mentions.split(',')] name_to_threds = {n: t for n, t in zip(data_names, joint_threds)} documents_to_chunk_data = defaultdict(list) documents_to_chunk_gold = defaultdict(list) predictions = {} golds = {} assert len(samples) == len(predicts) out_sents = [] last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key']) for sample, predict in zip(samples, predicts): doc_key = sample['doc_key'] doc_id = re.sub(r'_\d+$', '', doc_key) # require convert to ids first input_ids = sample['sentence'] subtoken_map = sample['subtoken_map'] offset = sample['offset'] # remove bos predict_ids = predict[1:].tolist() gold_data = sample['seg_clusters'] if self.args.joint_train: thred = name_to_threds[id_to_name[doc_id]] else: thred = self.args.min_num_mentions if self.args.seq2seq_type == "short_seq": special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \ else SPECIAL_IDS pred_data, aligned_input_ids, aligned_pred_ids = \ parse_short_target_tokens(input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.align_mode, thred, self.args.mark_sentence ) pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = { 'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_pred_ids ), 'input_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_input_ids ) } else: is_tagging = (self.args.seq2seq_type == 'tagging') if self.args.action_type == 'integer': pred_data, pred_token_mentions, predict_ids = \ parse_int_output_tokens( input_ids, predict_ids, SPECIAL_IDS, subtoken_map, self.tokenizer, thred, is_tagging) else: special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \
if is_torch_tpu_available(check_device=False): if is_fairscale_available(): dep_version_check("fairscale") if is_sagemaker_mp_enabled(): IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse( "1.10") else: IS_SAGEMAKER_MP_POST_1_10 = False if is_safetensors_available(): if is_apex_available(): logger = logging.get_logger(__name__) TRAINING_ARGS_NAME = "training_args.bin" TRAINER_STATE_NAME = "trainer_state.json" OPTIMIZER_NAME = "optimizer.pt" SCHEDULER_NAME = "scheduler.pt" SCALER_NAME = "scaler.pt" class CorefTrainer(Seq2SeqTrainer): def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if self.args.val_after_train and self.args.eval_delay < \ self.state.global_step: for checkpoint in checkpoints_sorted[:-1]: states_dir = [str(x) for x in Path( checkpoint).glob(f'global_step*') if os.path.isdir(x)] for state_dir in states_dir: logger.info(f"Deleting optimizer states of saved " f"checkpoint {checkpoint}") if os.path.exists(state_dir) and os.path.isdir( state_dir): shutil.rmtree(state_dir) else: if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit if ( self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and checkpoints_sorted[ -1] != self.state.best_model_checkpoint ): save_total_limit = 2 number_of_checkpoints_to_delete = max(0, len( checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[ :number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) def _save(self, output_dir: Optional[str] = None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel) and not hasattr( self.model, 'save_pretrained'): if state_dict is None: state_dict = self.model.state_dict() if isinstance(unwrap_model(self.model), PreTrainedModel): unwrap_model(self.model).save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) else: logger.info( "Trainer.model is not a `PreTrainedModel`, only saving its state dict.") # if self.args.save_safetensors: # safetensors.torch.save_file(state_dict, # os.path.join(output_dir, # SAFE_WEIGHTS_NAME)) # else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def _inner_training_loop( self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None ): self._train_batch_size = batch_size # Data loader and number of training steps train_dataloader = self.get_train_dataloader() # Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size len_dataloader = None if has_length(train_dataloader): len_dataloader = len(train_dataloader) num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) num_examples = self.num_examples(train_dataloader) if args.max_steps > 0: max_steps = args.max_steps num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( args.max_steps % num_update_steps_per_epoch > 0 ) # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's # the best we can do. num_train_samples = args.max_steps * total_train_batch_size else: max_steps = math.ceil( args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples( train_dataloader) * args.num_train_epochs elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size max_steps = args.max_steps # Setting a very large number of epochs so we go as many times as necessary over the iterator. num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size else: raise ValueError( "args.max_steps must be set to a positive value if dataloader does not have a length, was" f" {args.max_steps}" ) if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: # nn.DataParallel(model) replicates the model, creating new variables and module # references registered here no longer work on other gpus, breaking the module raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" " (torch.distributed.launch)." ) else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa delay_optimizer_creation = ( self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE or is_sagemaker_mp_enabled() or self.fsdp is not None ) if args.deepspeed: deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint ) self.model = deepspeed_engine.module self.model_wrapped = deepspeed_engine self.deepspeed = deepspeed_engine self.optimizer = optimizer self.lr_scheduler = lr_scheduler elif not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState() self.state.is_hyper_param_search = trial is not None # Activate gradient checkpointing if needed if args.gradient_checkpointing: self.model.gradient_checkpointing_enable() model = self._wrap_model(self.model_wrapped) if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None: self._load_from_checkpoint(resume_from_checkpoint, model) # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model if delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(resume_from_checkpoint) # important: at this point: # self.model is the Transformers Model # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc. # Train! logger.info("***** Running training *****") logger.info(f" Num examples = {num_examples}") logger.info(f" Num Epochs = {num_train_epochs}") logger.info( f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info( f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") logger.info( f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_steps}") logger.info( f" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}" ) self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None # Check if continuing training from a checkpoint if resume_from_checkpoint is not None and os.path.isfile( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) ): self.state = TrainerState.load_from_json( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) epochs_trained = self.state.global_step // num_update_steps_per_epoch if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % ( num_update_steps_per_epoch) steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info( " Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info( f" Continuing training from global step {self.state.global_step}") if not args.ignore_data_skip: logger.info( f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} " "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` " "flag to your launch command, but you will resume the training on data already seen by your model." ) if self.is_local_process_zero() and not args.disable_tqdm: steps_trained_progress_bar = tqdm( total=steps_trained_in_current_epoch) steps_trained_progress_bar.set_description( "Skipping the first batches") # Update the references self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader if self.hp_name is not None and self._trial is not None: # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial # parameter to Train when using DDP. self.state.trial_name = self.hp_name(self._trial) if trial is not None: assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial self.state.trial_params = hp_params(assignments) else: self.state.trial_params = None # This should be the same if the state has been saved but in case the training arguments changed, it's safer # to set this after the load. self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() # tr_loss is a tensor to avoid synchronization of TPUs through .item() tr_loss = torch.tensor(0.0).to(args.device) # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() self.control = self.callback_handler.on_train_begin(args, self.state, self.control) # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point. if not args.ignore_data_skip: for epoch in range(epochs_trained): is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( train_dataloader.sampler, RandomSampler ) if is_torch_less_than_1_11 or not is_random_sampler: # We just need to begin an iteration to create the randomization of the sampler. # That was before PyTorch 1.11 however... if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) for _ in train_dataloader: break else: # Otherwise we need to call the whooooole sampler cause there is some random operation added # AT THE VERY END! _ = list(train_dataloader.sampler) if args.manual_empty_cache: torch.cuda.empty_cache() for epoch in range(epochs_trained, num_train_epochs): if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) if isinstance(train_dataloader, DataLoader) and isinstance( train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) elif hasattr(train_dataloader, "dataset") and isinstance( train_dataloader.dataset, IterableDatasetShard): train_dataloader.dataset.set_epoch(epoch) if is_torch_tpu_available(): parallel_loader = pl.ParallelLoader(train_dataloader, [ args.device]).per_device_loader(args.device) epoch_iterator = parallel_loader else: epoch_iterator = train_dataloader # Reset the past mems state at the beginning of each epoch if necessary. if args.past_index >= 0: self._past = None steps_in_epoch = ( len(epoch_iterator) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps ) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) step = -1 if args.manual_empty_cache: torch.cuda.empty_cache() for step, inputs in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if args.manual_empty_cache: torch.cuda.empty_cache() if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) # if args.manual_empty_cache: # torch.cuda.empty_cache() if ( ((step + 1) % args.gradient_accumulation_steps != 0) and args.local_rank != -1 and args._no_sync_in_gradient_accumulation ): # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. with model.no_sync(): tr_loss_step = self.training_step(model, inputs) else: tr_loss_step = self.training_step(model, inputs) if ( args.logging_nan_inf_filter and not is_torch_tpu_available() and ( torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) ): # if loss is nan or inf simply add the average of previous logged losses tr_loss += tr_loss / ( 1 + self.state.global_step - self._globalstep_last_logged) else: tr_loss += tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps if self.deepspeed: if args.manual_empty_cache: torch.cuda.empty_cache() self.deepspeed.step() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps steps_in_epoch <= args.gradient_accumulation_steps and (step + 1) == steps_in_epoch ): # Gradient clipping if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed: # deepspeed does its own clipping if self.do_grad_scaling: # Reduce gradients first for XLA if is_torch_tpu_available(): gradients = xm._fetch_gradients(self.optimizer) xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) # AMP: gradients need unscaling self.scaler.unscale_(self.optimizer) if is_sagemaker_mp_enabled() and args.fp16: self.optimizer.clip_master_grads(args.max_grad_norm) elif hasattr(self.optimizer, "clip_grad_norm"): # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping self.optimizer.clip_grad_norm(args.max_grad_norm) elif hasattr(model, "clip_grad_norm_"): # Some models (like FullyShardedDDP) have a specific way to do gradient clipping model.clip_grad_norm_(args.max_grad_norm) else: # Revert to normal clipping otherwise, handling Apex or full precision nn.utils.clip_grad_norm_( amp.master_params( self.optimizer) if self.use_apex else model.parameters(), args.max_grad_norm, ) # Optimizer step optimizer_was_run = True if self.deepspeed: pass # called outside the loop elif is_torch_tpu_available(): if self.do_grad_scaling: self.scaler.step(self.optimizer) self.scaler.update() else: xm.optimizer_step(self.optimizer) elif self.do_grad_scaling: scale_before = self.scaler.get_scale() self.scaler.step(self.optimizer) self.scaler.update() scale_after = self.scaler.get_scale() optimizer_was_run = scale_before <= scale_after else: self.optimizer.step() if optimizer_was_run and not self.deepspeed: self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch if args.manual_empty_cache: torch.cuda.empty_cache() self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) if self.control.should_epoch_stop or self.control.should_training_stop: break if step < 0: logger.warning( "There seems to be not a single sample in your epoch_iterator, stopping training at step" f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" f" num_steps ({max_steps}) higher than the number of available samples." ) self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_tpu_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") logger.info( "\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: # Wait for everyone to get here so we are sur the model has been saved by process 0. if is_torch_tpu_available(): xm.rendezvous("load_best_model_at_end") elif args.local_rank != -1: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() # add remaining tr_loss self._total_loss_scalar += tr_loss.item() train_loss = self._total_loss_scalar / self.state.global_step metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint. if self.state.best_model_checkpoint is not None and \ self.args.save_total_limit == 1 and self.is_world_process_zero(): for checkpoint in checkpoints_sorted: if checkpoint != self.state.best_model_checkpoint: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) return TrainOutput(self.state.global_step, train_loss, metrics) def my_compute_metrics(self, doc_labels: Dict[str, List[List]], predicts: Any, samples: List, split: str, id_to_name: Dict = None ) -> Dict: if self.args.joint_train: data_names = self.args.joint_data_names.split(',') joint_threds = [ int(t) for t in self.args.joint_min_num_mentions.split(',')] name_to_threds = {n: t for n, t in zip(data_names, joint_threds)} documents_to_chunk_data = defaultdict(list) documents_to_chunk_gold = defaultdict(list) predictions = {} golds = {} assert len(samples) == len(predicts) out_sents = [] last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key']) for sample, predict in zip(samples, predicts): doc_key = sample['doc_key'] doc_id = re.sub(r'_\d+$', '', doc_key) # require convert to ids first input_ids = sample['sentence'] subtoken_map = sample['subtoken_map'] offset = sample['offset'] # remove bos predict_ids = predict[1:].tolist() gold_data = sample['seg_clusters'] if self.args.joint_train: thred = name_to_threds[id_to_name[doc_id]] else: thred = self.args.min_num_mentions if self.args.seq2seq_type == "short_seq": special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \ else SPECIAL_IDS pred_data, aligned_input_ids, aligned_pred_ids = \ parse_short_target_tokens(input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.align_mode, thred, self.args.mark_sentence ) pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = { 'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_pred_ids ), 'input_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_input_ids ) } else: is_tagging = (self.args.seq2seq_type == 'tagging') if self.args.action_type == 'integer': pred_data, pred_token_mentions, predict_ids = \ parse_int_output_tokens( input_ids, predict_ids, SPECIAL_IDS, subtoken_map, self.tokenizer, thred, is_tagging) else: special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \
self.args.add_mention_end else NON_INT_SPECIAL_IDS
7
2023-10-17 17:39:16+00:00
16k
chenxn2020/GOSE
GOSEfinetune/models/LiLTRobertaLike/modeling_LiLTRobertaLike.py
[ { "identifier": "LiLTRobertaLikeConfig", "path": "GOSEfinetune/models/LiLTRobertaLike/configuration_LiLTRobertaLike.py", "snippet": "class LiLTRobertaLikeConfig(RobertaConfig):\n model_type = \"liltrobertalike\"\n\n def __init__(\n self,\n channel_shrink_ratio=4,\n max_2d_position_embeddings=1024,\n **kwargs\n ):\n super().__init__(\n **kwargs,\n )\n self.channel_shrink_ratio = channel_shrink_ratio\n self.max_2d_position_embeddings = max_2d_position_embeddings" }, { "identifier": "RE", "path": "GOSEfinetune/modules/decoders/RE.py", "snippet": "class RE(nn.Module):\n def __init__(self,args):\n super().__init__()\n self.cnt=0\n self.rounds = 5\n self.hidden_size = 960\n self.dim = self.hidden_size // 2\n self.hidden_dropout_prob = 0.5\n self.max_key = 64\n self.max_value = 64\n self.pooling_mode = 'max'\n self.softmax = nn.Softmax(dim=-1)\n self.loss_fct = CrossEntropyLoss()\n self.dropout = nn.Dropout(self.hidden_dropout_prob)\n\n self.output = nn.Linear(self.dim,2)\n self.k_up = nn.Linear(2,self.dim)\n self.v_up = nn.Linear(2,self.dim)\n\n self.type_token = nn.Parameter(torch.normal(0,0.0002,size=(1,self.hidden_size)))\n self.biaffine_type = BiaffineAttention(self.dim , 3)\n self.biaffine = BiaffineAttention(self.dim , 2)\n self.ffn = nn.Linear(2,self.dim)\n self.ffn_type = nn.Linear(3,self.dim)\n self.attn_type = Attention_logits(self.dim,max_len=self.max_key)\n self.attn = Attention_logits(self.dim,max_len=self.max_key)\n\n self.key_type_ffn = nn.Linear(self.hidden_size,self.dim )\n self.value_type_ffn = nn.Linear(self.hidden_size,self.dim )\n self.key_multi_ffn = nn.Linear(self.hidden_size,self.dim )\n self.value_multi_ffn = nn.Linear(self.hidden_size,self.dim )\n self.key_single_ffn = nn.Linear(self.hidden_size,self.dim )\n self.value_single_ffn = nn.Linear(self.hidden_size,self.dim )\n\n self.classifier = nn.Linear(self.dim * 2,2)\n \"\"\"\n self.text_biaffine = BiaffineAttention(self.hidden_size//2 , 2)\n \"\"\"\n def devide_entities(self,entities):\n \"\"\"\n devide entities into keys and values according there entities label\n return entities index\n \"\"\"\n entities_label_list = entities['label']\n key_index = [index for index,label in enumerate(entities_label_list) if label == 1]\n value_index = [index for index,label in enumerate(entities_label_list) if label == 2]\n\n key_num = len(key_index)\n value_num = len(value_index)\n\n M = self.max_key\n N = self.max_value \n\n if not key_num * value_num :\n key_index = [0]\n value_index = [1]\n \n if key_num > M :\n key_index = key_index[:M]\n if value_num > N:\n value_index = value_index[:N]\n\n return key_index, value_index \n\n def padding(self,data,N):\n # padding data 2,n,768 -> 2,N,768\n n = data.shape[0] \n dim = data.shape[1]\n device = data.device\n data = F.pad(data,(0,0,0,N-n))\n mask = torch.tensor([1.0]*n + [0.0]*(N-n),device=device)\n return data,mask \n\n def type_classifier(self,key,value,key_mask,value_mask):\n key = self.key_type_ffn(key)\n value = self.value_type_ffn(value)\n \n M = self.max_key\n N = self.max_value + 1\n logits_mask = key_mask.unsqueeze(2).repeat(1,1,N) * \\\n value_mask.unsqueeze(1).repeat(1,M,1) \n for i in range(self.rounds):\n \n logits = self.biaffine_type(key.unsqueeze(2).repeat(1,1,N,1),\n value.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n # B K V H\n logits = self.ffn_type(logits)\n logits = self.attn_type(logits,logits_mask)\n det_key,det_value = self.pooling(logits,key_mask,value_mask)\n key += det_key\n value += det_value \n else: \n logits = logits * logits_mask.unsqueeze(3).repeat(1,1,1,3)\n return logits \n \n def multi_classifier(self,key,value,key_mask,value_mask):\n key = self.key_multi_ffn(key)\n value = self.value_multi_ffn(value)\n\n M = key.shape[1]\n N = value.shape[1]\n \n key = key.unsqueeze(2).repeat(1,1,N,1)\n value = value.unsqueeze(1).repeat(1,M,1,1)\n\n multi_logits = self.classifier(torch.cat([key,value],dim=-1))\n\n return multi_logits \n \n def single_classifier(self,key,value,key_mask,value_mask):\n key = self.key_single_ffn(key)\n value = self.value_single_ffn(value)\n \n M = key.shape[1]\n N = value.shape[1]\n \n logits_mask = key_mask.unsqueeze(2).repeat(1,1,N) * \\\n value_mask.unsqueeze(1).repeat(1,M,1) \n \n for i in range(self.rounds):\n logits = self.biaffine(key.unsqueeze(2).repeat(1,1,N,1),\n value.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n # B K V H\n logits = self.ffn(logits)\n logits = self.attn(logits,logits_mask)\n det_key,det_value = self.pooling(logits,key_mask,value_mask)\n key += det_key\n value += det_value \n else: \n logits = logits * logits_mask.unsqueeze(3).repeat(1,1,1,2) \n\n return logits \n \n def forward(self, hidden_state, entities, relations, bbox):\n self.cnt+=1\n #layout_emb,text_emb = hidden_state \n B, max_len, H = hidden_state.shape\n device = hidden_state.device\n M = self.max_key\n N = self.max_value\n loss = 0\n all_pred_relations = []\n\n batch = []\n for b in range(B):\n if len(entities[b]['start']) <= 2:\n entities[b] = {\"end\":[1,1],\"label\":[0,0],\"start\":[0,0]}\n \n key_index,value_index = self.devide_entities(entities[b])\n start_token_index = torch.tensor(entities[b]['start'])\n key_start_token = start_token_index[key_index]\n value_start_token = start_token_index[value_index]\n #b,2,len,dim\n key = hidden_state[b][key_start_token,:]\n value = hidden_state[b][value_start_token,:]\n\n key,key_mask = self.padding(key,self.max_key)\n value = torch.cat([self.type_token,value],dim=0)\n value,value_mask = self.padding(value,self.max_value+1)\n\n batch.append((key,value,key_mask,value_mask))\n \n \n org_key = torch.stack([item[0] for item in batch],dim=0)\n org_value = torch.stack([item[1] for item in batch],dim=0)\n key_mask = torch.stack([item[2] for item in batch],dim=0)\n value_mask = torch.stack([item[3] for item in batch],dim=0)\n\n type_logits = self.type_classifier(org_key,org_value,key_mask,value_mask)\n \"\"\"\n self.type_token 0 - no link \n 1 - single link \n 2 - multilink\n B M N+1 3/\n \"\"\"\n \n org_value = org_value[:,1:,:]\n value_mask = value_mask[:,1:]\n\n type_token = self.softmax(type_logits[:,:,0])\n key_type = type_token.argmax(dim=-1)\n #so far we can get key label to route for downstream processing\n type_drop = key_type == 0\n type_single = key_type == 1\n type_multi = key_type == 2\n\n #multi_key = org_key[type_multi]\n multi_logits = self.multi_classifier(org_key,org_value,key_mask,value_mask)\n\n key_mask = key_mask.bool() & type_single\n single_logits = self.single_classifier(org_key,org_value,key_mask,value_mask)\n\n type_loss = self.get_type_loss(type_logits,key_mask,entities,relations)\n multi_loss = self.get_multi_loss(multi_logits,entities,relations)\n single_loss = self.get_single_loss(single_logits,entities,relations)\n\n loss = type_loss + multi_loss + single_loss \n all_pred_relations = self.get_predicted_relations(logits,entities,relations,key_mask,value_mask)\n\n return loss,all_pred_relations\n\n\n def pooling(self, table_logits, key_mask, value_mask):\n key_repr_list = []\n value_repr_list = []\n bs,_,_,_ = table_logits.shape\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n M = key_mask.shape[1]\n N = value_mask.shape[1]\n for b in range(bs):\n logit = table_logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n key_num, value_num, _ = logit.shape\n if self.pooling_mode == 'max':\n key_repr = logit.max(dim=1).values \n value_repr = logit.max(dim=0).values \n else:\n key_repr = logit.mean(dim=1)\n value_repr = logit.mean(dim=0)\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, M - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, N - value_num)))\n key_new = torch.stack(key_repr_list,dim=0) \n value_new = torch.stack(value_repr_list,dim=0)\n return key_new, value_new\n \n def get_type_loss(self,type_logits,key_mask,entities,relations):\n # logits 2,64,65,3\n logits = self.softmax(type_logits[:,:,0])\n B = logits.shape[0]\n device = logits.device\n key_mask = key_mask.bool()\n loss_fcn = CrossEntropyLoss()\n\n for b in range(B):\n logit = logits[b][key_mask[b]]\n\n from IPython import embed;embed()\n relations \n\n def get_loss(self,logits,entities,relations,key_mask,value_mask):\n loss_fcn = CrossEntropyLoss()\n B = logits.shape[0]\n device = logits.device\n loss = 0\n all_logits = []\n all_labels = []\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n logit = logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n M,N,_ = logit.shape \n\n key_index,value_index = self.devide_entities(entities[b])\n key_list = relations[b]['head']\n value_list = relations[b]['tail']\n labels = torch.zeros(M*N,device=device).view(M,N)\n\n true_label = []\n for i in range(len(key_list)):\n try:\n key = key_index.index(key_list[i])\n value = value_index.index(value_list[i])\n labels[key][value] = 1\n except:\n continue\n \n labels = labels.view(-1).to(dtype=torch.long)\n logit = logit.view(M*N,-1).to(dtype=torch.float)\n \n\n all_logits.append(logit)\n all_labels.append(labels)\n\n all_logits = torch.cat(all_logits,0)\n all_labels = torch.cat(all_labels,0)\n\n loss = loss_fcn(all_logits+1e-10,all_labels)\n return loss \n \n def get_predicted_relations(self,logits,entities,relations,key_mask,value_mask):\n #from IPython import embed;embed()\n softmax = nn.Softmax(dim=-1)\n all_pred_relations = []\n \n device = logits.device\n B = logits.shape[0]\n \n\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n pred_relations = []\n logit = logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n \n M,N,_ = logit.shape\n \n key_index,value_index = self.devide_entities(entities[b])\n for index in range(M*N):\n key = index // N\n value = index % N\n pred_label = logit[key][value].argmax(-1)\n\n if pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = key_index[key] \n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n \n pred_relations.append(rel)\n \n all_pred_relations.append(pred_relations)\n return all_pred_relations \n \n def get_loss_1(self,l_logits,t_logits,entities,relations,key_mask,value_mask):\n loss_fcn = CrossEntropyLoss()\n B = l_logits.shape[0]\n device = l_logits.device\n loss = 0\n all_layout_logits = []\n all_text_logits = []\n all_labels = []\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n l_logit = l_logits[b][key_mask[b]]\n l_logit = l_logit[:,value_mask[b]]\n t_logit = t_logits[b][key_mask[b]]\n t_logit = t_logit[:,value_mask[b]]\n M,N,_ = l_logit.shape \n\n key_index,value_index = self.devide_entities(entities[b])\n key_list = relations[b]['head']\n value_list = relations[b]['tail']\n labels = torch.zeros(M*N,device=device).view(M,N)\n\n true_label = []\n for i in range(len(key_list)):\n try:\n key = key_index.index(key_list[i])\n value = value_index.index(value_list[i])\n labels[key][value] = 1\n true_label.append((key*N+value))\n except:\n continue\n \n labels = labels.view(-1).to(dtype=torch.long)\n layout_logit = l_logit.view(M*N,-1).to(dtype=torch.float)\n text_logit = t_logit.view(M*N,-1).to(dtype=torch.float)\n\n all_layout_logits.append(layout_logit)\n all_text_logits.append(text_logit)\n all_labels.append(labels)\n\n all_layout_logits = torch.cat(all_layout_logits,0)\n all_text_logits = torch.cat(all_text_logits,0)\n all_labels = torch.cat(all_labels,0)\n\n layout_loss = loss_fcn(all_layout_logits+1e-10,all_labels)\n text_loss = loss_fcn(all_text_logits+1e-10,all_labels)\n\n loss = 2*layout_loss + text_loss \n return loss \n \n def get_predicted_relations_1(self,l_logits,t_logits,entities,relations,key_mask,value_mask):\n #from IPython import embed;embed()\n softmax = nn.Softmax(dim=-1)\n all_pred_relations = []\n \n device = l_logits.device\n B = l_logits.shape[0]\n \n\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n pred_relations = []\n l_logit = l_logits[b][key_mask[b]]\n l_logit = l_logit[:,value_mask[b]]\n t_logit = t_logits[b][key_mask[b]]\n t_logit = t_logit[:,value_mask[b]]\n M,N,_ = l_logit.shape\n \n key_index,value_index = self.devide_entities(entities[b])\n key_list = relations[b]['head']\n value_list = relations[b]['tail']\n labels = torch.zeros(M*N,device=device).view(M,N)\n\n for index in range(M*N):\n key = index // N\n value = index % N\n layout_pred_label = l_logit[key][value].argmax(-1)\n text_pred_label = t_logit[key][value].argmax(-1)\n\n if layout_pred_label * text_pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = key_index[key] \n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n \n pred_relations.append(rel)\n \n all_pred_relations.append(pred_relations)\n return all_pred_relations " }, { "identifier": "GOSE", "path": "GOSEfinetune/modules/decoders/gose.py", "snippet": "class GOSE(nn.Module):\n def __init__(self, args):\n super().__init__()\n #(rounds,num_heads)\n # self.rounds = 4\n self.args = args\n self.rounds = args.rounds+1\n self.norm = False\n if args.backbone_name == 'lilt':\n self.hidden_size = 960\n elif args.backbone_name == 'xlm':\n self.hidden_size = 768\n self.hidden_dropout_prob = 0.5\n #默认only-mean pooling\n self.pooling_mode = args.pooling_mode\n self.use_gam = args.use_gam\n self.loss_fct = CrossEntropyLoss()\n self.use_prefix = args.use_prefix\n #---对global-attention使用稀疏注意力\n self.use_global_mask = args.use_global_mask\n #--------\n self.use_gate = args.use_gate\n print(f\"**********************************Backbone: {args.backbone_name}****************************\")\n print(f\"**********************************Use_GAM: {self.use_gam}************************************\")\n print(f\"**********************************Use_Prefix: {self.use_prefix}********************************\")\n print(f\"**********************************Use_Gate: {self.use_gate}************************************\")\n # print(f\"**********************************Use_Global_Mask: {self.use_global_mask}**********************\")\n print(f\"**********************************Pooling_Mode: {self.pooling_mode}****************************\")\n print(f\"**********************************Iterative_Rounds: {self.rounds-1}****************************\")\n print(f\"**************************************************************\")\n print(f\"**********************************No_Iteration: {self.args.no_it}********************************\")\n print(f\"**********************************No_Global: {self.args.no_global}********************************\")\n print(f\"**********************************Window_size: {self.args.window_size}********************************\")\n # self.mode = 'only-mean'\n # self.mode = 'only-max'\n # self.mode = 'attn-max'\n\n\n \n self.dropout = nn.Dropout(self.hidden_dropout_prob)\n self.elu=nn.ELU()\n self.biaffine = BiaffineAttention(self.hidden_size//2 , 2)\n self.ffn = nn.Linear(2, self.hidden_size//2)\n self.ffn_key = nn.Linear(self.hidden_size, self.hidden_size//2)\n self.ffn_value = nn.Linear(self.hidden_size, self.hidden_size//2)\n\n # attention config\n self.dim = self.hidden_size //2\n self.num_heads = 1\n self.num_tokens = 8 # max_len = 8\n self.window_size = args.window_size # 8 # window_size * S = H \n self.qkv_bias = False\n self.drop = 0\n self.attn_drop = 0\n self.drop_path = 0\n self.max_len = args.max_len #64\n self.norm1 = nn.LayerNorm(self.dim)\n self.norm2 = nn.LayerNorm(self.dim)\n self.global_token_num = args.global_token_num\n print(f\"**********************************Global_token: {self.global_token_num}****************************\")\n self.global_token = nn.Parameter(torch.zeros(1, self.global_token_num, self.hidden_size //2))\n self.attn = Attention(self.dim,num_heads=self.num_heads, num_tokens=self.num_tokens, \n window_size=self.window_size,qkv_bias=self.qkv_bias, \n attn_drop=self.attn_drop, proj_drop=self.drop, args=args)\n\n self.cnt = 0\n self.loss_fcn = CrossEntropyLoss()\n self.normal = True\n self.dummy_vec = nn.Parameter(torch.Tensor(1, self.hidden_size//2))\n nn.init.normal_(self.dummy_vec)\n #----gate\n self.gru = GRU(self.hidden_size//2) \n #---layout-prefix-tuning\n self.axis_dis_fn = nn.Linear(1, self.hidden_size//12)\n self.axis_angle_fn = nn.Linear(1, self.hidden_size//12)\n \n def create_global_mask(self):\n global_mask = torch.zeros(self.global_token_num, self.max_len, self.max_len).cuda()\n step = self.num_tokens\n for idx in range(self.global_token_num):\n row_ids = idx // self.num_tokens\n column_ids = idx % self.num_tokens\n row_start = row_ids * step\n column_start = column_ids * step\n global_mask[idx, row_start:row_start+self.num_tokens,:] = 1\n global_mask[idx, :, column_start:column_start+self.num_tokens] = 1\n return global_mask\n \n def get_entities_kv_index_list(self, entities):\n\n M = self.max_len\n entities_label = entities['label']\n\n entities_key_index = [index for index,label in enumerate(entities_label) if label == 1 ]\n entities_value_index = [index for index,label in enumerate(entities_label) if label == 2 ] \n key_num, value_num = len(entities_key_index),len(entities_value_index)\n '''\n in re.py\n if len(all_possible_relations) == 0:\n all_possible_relations = set([(0, 1)])\n '''\n if key_num * value_num == 0:\n #print(\"all_possible_relations == 0\")\n entities_key_index = [0]\n entities_value_index = [1]\n if key_num > M :\n entities_key_index = entities_key_index[:M]\n self.normal = False\n if value_num > M :\n entities_value_index = entities_value_index[:M]\n self.normal = False\n\n return entities_key_index, entities_value_index\n\n \n def forward(self, hidden_state, entities,relations, bbox):\n #if self.cnt == 30: set the num + 1 which failed\n # from IPython import embed;embed()\n self.cnt += 1\n B ,_ ,H = hidden_state.shape\n M = self.max_len\n device = hidden_state.device\n\n loss = 0\n all_pred_relations = []\n\n # B len(entities)\n # entities_label = torch.stack([torch.tensor(dict['label']) for dict in entities],dim=0)\n # padding to max_len M 64\n \n key_repr_list = []\n value_repr_list = []\n key_mask_list = []\n value_mask_list = []\n key_bbox_list, value_bbox_list = [], []\n for b in range(B):\n #key_repr ~ N,H -> 64,H/2\n #value_repr ~ M,H -> 64,H/2\n if len(entities[b][\"start\"]) <= 2:\n entities[b] = {\"end\": [1, 1], \"label\": [0, 0], \"start\": [0, 0]}\n \n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n entities_first_token_index = torch.tensor(entities[b]['start'])\n \n entities_key_first_token_index = entities_first_token_index[entities_key_index]\n entities_value_first_token_index = entities_first_token_index[entities_value_index]\n key_repr = hidden_state[b][entities_key_first_token_index,:]\n value_repr = hidden_state[b][entities_value_first_token_index,:]\n \n key_num,value_num = key_repr.shape[0],value_repr.shape[0]\n # padding key_repr key_num,H -> max_len,H\n # generate mask shape like max_len,H\n \n key_mask_list.append(torch.tensor([[1.]] * key_num + [[0.]] * (M - key_num),device=device).repeat(1,H//2))\n value_mask_list.append(torch.tensor([[1.]] * value_num + [[0.]] * (M - value_num),device=device).repeat(1,H//2))\n # padding key_repr key_num,H -> max_len,H\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, M - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, M - value_num)))\n #----得到kv实体的bbox\n key_bbox = bbox[b][entities_key_first_token_index]\n value_bbox = bbox[b][entities_value_first_token_index]\n key_bbox_list.append(F.pad(key_bbox,(0, 0, 0, M - key_num)))\n value_bbox_list.append(F.pad(value_bbox,(0, 0, 0, M - value_num)))\n\n # batch max_len hidden_size\n key_repr = torch.stack(key_repr_list,dim=0) \n key_mask = torch.stack(key_mask_list,dim=0)\n \n value_repr = torch.stack(value_repr_list,dim=0)\n value_mask = torch.stack(value_mask_list,dim=0)\n \n\n #key_mask * value_mask -> table_mask B,M,H * B,M,H -> B M M H\n table_mask = key_mask.unsqueeze(2).repeat(1,1,M,1)\\\n *value_mask.unsqueeze(1).repeat(1,M,1,1)\n #---global_mask\n if self.use_global_mask:\n self.global_mask = self.create_global_mask()\n global_mask = self.global_mask.unsqueeze(0).repeat(B,1,1,1) #shape[bsz,global_token_num,M,M]\n # global_mask = global_mask.view(B, self.global_token_num, -1)\n else:\n global_mask = None\n \n \n key_mask = key_mask[:,:,0].bool()\n value_mask = value_mask[:,:,0].bool()\n key_ffn = self.ffn_key(key_repr)\n value_ffn = self.ffn_value(value_repr)\n \n if self.norm == True:\n key_ffn = self.norm1(key_repr)\n value_ffn = self.norm1(value_repr)\n global_token = self.global_token.expand(B, -1, -1)\n key_bbox = torch.stack(key_bbox_list, dim=0) \n value_bbox = torch.stack(value_bbox_list, dim=0) \n layout_repr = self.calc_layout(key_bbox, value_bbox)\n layout_repr = layout_repr * table_mask\n layout_repr = layout_repr.view(B,M*M,H//2)\n for i in range(self.rounds):\n '''\n method 1 with biaffine \n \n table_mask.shape B M M H/2 -> B M M H (M=64)\n table_logits.shape B M M H/2 -> B M M 2\n B M M 2 -> B M M H\n attention input B (64+1)*64 384\n table input 64 * 64 \n window_size 8\n token_num 64/8 * 64/8 = 64\n '''\n #key_ffn = self.ffn_key(key_repr)\n #value_ffn = self.ffn_value(value_repr)\n #key_ffn = self.ffn_key(key_ffn)\n #value_ffn = self.ffn_value(value_ffn)\n \n table_logits = self.biaffine(key_ffn.unsqueeze(2).repeat(1,1,M,1),\n value_ffn.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n table_logits = self.ffn(table_logits) * table_mask\n \n if self.use_gam:\n table_logits = table_logits.view(B,M*M,H//2)\n \n table_logits = torch.cat((global_token, table_logits), dim=1)\n if self.use_prefix:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=layout_repr, key_num=key_num, value_num=value_num)\n else:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=None)\n global_token_new = table_logits[:,:self.global_token_num,:]\n global_token = global_token + global_token_new\n table_logits = table_logits[:,self.global_token_num:,:]\n table_logits = table_logits.view(B,M,M,H//2)\n table_logits = table_logits * table_mask\n key_new, value_new = self.get_new_repr(table_logits, key_mask, value_mask)\n if self.norm == True:\n key_new = self.norm2(key_new)\n value_new = self.norm2(value_new)\n if self.use_gate:\n key_ffn = self.gru(key_ffn,key_new)\n value_ffn = self.gru(value_ffn,value_new)\n \n elif self.args.no_it:\n key_ffn = key_new\n value_ffn = value_new\n elif self.args.use_add:\n key_ffn = key_ffn + key_new\n value_ffn = value_ffn + value_new \n else:\n table_logits = table_logits * table_mask[:,:,:,:2]\n\n # table_logits M N 2\n # table_logits.unsqueeze(0)\n # batch_table_logits = table_logits if batch_table_logits == None else torch.cat((batch_table_logits,table_logits),dim=0)\n\n loss = self.get_loss(table_logits,entities,relations,key_mask,value_mask)\n all_pred_relations = self.get_predicted_relations(table_logits,entities,key_mask,value_mask, bbox)\n return loss,all_pred_relations\n \n def calc_layout(self, head_bbox, tail_bbox):\n bsz, num, _ = head_bbox.shape\n head_bbox = head_bbox.unsqueeze(2).repeat(1,1,num,1)\n tail_bbox = tail_bbox.unsqueeze(1).repeat(1,num,1,1)\n \n #-----中心点坐标特征\n head_bbox_center = torch.div(torch.cat(((head_bbox[:,:,:,0]+head_bbox[:,:,:,2]).view(-1,1), (head_bbox[:,:,:,1]+head_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n tail_bbox_center = torch.div(torch.cat(((tail_bbox[:,:,:,0]+tail_bbox[:,:,:,2]).view(-1,1), (tail_bbox[:,:,:,1]+tail_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n head_tail_center_dis, hea_tail_center_angle = self.axis_features(head_bbox_center, tail_bbox_center)\n head_tail_center_dis_feature = self.axis_dis_fn(head_tail_center_dis)\n head_tail_center_angle_feature = self.axis_angle_fn(hea_tail_center_angle)\n #-----左上点坐标特征\n head_bbox_left_top = torch.cat((head_bbox[:,:,:, 0].view(-1,1), head_bbox[:,:,:, 1].view(-1,1)), dim=1)\n tail_bbox_left_top = torch.cat((tail_bbox[:,:,:, 0].view(-1,1), tail_bbox[:,:,:, 1].view(-1,1)), dim=1)\n head_tail_lt_dis, hea_tail_lt_angle = self.axis_features(head_bbox_left_top, tail_bbox_left_top)\n head_tail_lt_dis_feature = self.axis_dis_fn(head_tail_lt_dis)\n hea_tail_lt_angle_feature = self.axis_angle_fn(hea_tail_lt_angle)\n #-----右下点坐标特征\n head_bbox_right_down = torch.cat((head_bbox[:,:,:, 2].view(-1,1), head_bbox[:,:,:, 3].view(-1,1)), dim=1)\n tail_bbox_right_down = torch.cat((tail_bbox[:,:,:, 2].view(-1,1), tail_bbox[:,:,:, 3].view(-1,1)), dim=1)\n head_tail_rd_dis, hea_tail_rd_angle = self.axis_features(head_bbox_right_down, tail_bbox_right_down)\n head_tail_rd_dis_feature = self.axis_dis_fn(head_tail_rd_dis)\n hea_tail_rd_angle_feature = self.axis_angle_fn(hea_tail_rd_angle)\n layout_repr = torch.cat(\n (head_tail_center_dis_feature, head_tail_center_angle_feature\n , head_tail_lt_dis_feature, hea_tail_lt_angle_feature\n , head_tail_rd_dis_feature, hea_tail_rd_angle_feature\n ),\n dim=-1\n )\n layout_repr = layout_repr.view(bsz, num, num, -1) \n return layout_repr\n \n \n \n def axis_features(self, tmp_bbox_1, tmp_bbox_2):\n tmp_bbox_distance = torch.pow(torch.sum(torch.pow(tmp_bbox_1 - tmp_bbox_2, 2), dim=1), 0.5) #欧氏距离\n tmp_bbox_distance = tmp_bbox_distance.view(-1, 1)\n ##########计算角度\n head_tail_x = tmp_bbox_1[:, 0] - tmp_bbox_2[:, 0]\n head_tail_y = tmp_bbox_1[:, 1] - tmp_bbox_2[:, 1]\n tmp_bbox_angle = torch.div(torch.atan2(head_tail_y, head_tail_x), 3.1416) #正切的角度\n tmp_bbox_angle = tmp_bbox_angle.view(-1, 1)\n return torch.div(tmp_bbox_distance, 1000), tmp_bbox_angle\n\n \n \n \n def get_new_repr(self, table_logits, key_mask, value_mask):\n key_repr_list = []\n value_repr_list = []\n bs,_,_,_ = table_logits.shape\n for b in range(bs):\n logit = table_logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n key_num, value_num, _ = logit.shape\n if self.pooling_mode == 'max':\n key_repr = logit.max(dim=1).values \n value_repr = logit.max(dim=0).values \n else:\n key_repr = logit.mean(dim=1)\n value_repr = logit.mean(dim=0)\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, self.max_len - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, self.max_len - value_num)))\n key_new = torch.stack(key_repr_list,dim=0) \n value_new = torch.stack(value_repr_list,dim=0)\n return key_new, value_new\n \n def get_predicted_relations(self, logists,entities,key_mask,value_mask,bbox):\n all_pred_relations = []\n #logits.shape B,M,N,2\n #here is one batch so no dim B\n B,N,M,_=logists.shape\n for b in range(B):\n\n pred_relations = []\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n \n #---index指的是序列中的第几个实体\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n # if len(entities_key_index) > 64 or len(entities_value_index) > 64:\n # from IPython import embed;embed();exit()\n \n for index in range(M*N):\n key = index // M\n value = index % M\n pred_label = logist[key][value].argmax(-1)\n\n if pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = entities_key_index[key]\n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = entities_value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n key_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"head_id\"]]].tolist()[:2]\n value_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"tail_id\"]]].tolist()[:2]\n rel[\"link\"] = (tuple(key_bbox_left_top), tuple(value_bbox_left_top))\n #--------\n pred_relations.append(rel)\n all_pred_relations.append(pred_relations)\n \n return all_pred_relations\n \n \n def get_loss(self,logists,entities,relations,key_mask,value_mask):\n #mask B M M H\n device = logists.device\n loss = 0\n B = key_mask.shape[0]\n all_logits = []\n all_labels = []\n for b in range(B):\n # 64,64 -> N,M\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n\n\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n entities_key_list = relations[b]['head']\n entities_value_list = relations[b]['tail']\n\n labels = torch.zeros(N*M).to(device).view(N,M)\n \n for i in range(len(entities_key_list)):\n try:\n key = entities_key_index.index(entities_key_list[i])\n value = entities_value_index.index(entities_value_list[i])\n labels[key][value] = 1\n except:\n continue\n \n \n labels = labels.view(-1).to(dtype=torch.long)\n logist = logist.view(N*M,-1).to(dtype=torch.float)\n all_logits.append(logist)\n all_labels.append(labels)\n all_logits = torch.cat(all_logits, 0)\n all_labels = torch.cat(all_labels, 0)\n loss = self.loss_fcn(all_logits+1e-10, all_labels)\n if (torch.isnan(loss).sum().item() > 0):\n loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)\n \n return loss" }, { "identifier": "ReOutput", "path": "GOSEfinetune/utils.py", "snippet": "class ReOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n entities: Optional[Dict] = None\n relations: Optional[Dict] = None\n pred_relations: Optional[Dict] = None" } ]
import math import torch import torch.nn as nn import torch.utils.checkpoint import os from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN, gelu from transformers.file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from .configuration_LiLTRobertaLike import LiLTRobertaLikeConfig from dataclasses import dataclass from typing import Dict, Optional, Tuple from transformers.file_utils import ModelOutput from ...modules.decoders.RE import RE from ...modules.decoders.gose import GOSE from ...utils import ReOutput
12,148
output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = torch.cat([sequence_output, layout_outputs], -1) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LiLTRobertaLikeForRelationExtraction(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.input_type = config.input_type self.freeze_model = config.freeze_model print(f'=============model freeze {self.freeze_model}===============') #from IPython import embed;embed() self.decoder = config.decoder_name if self.decoder == 're': self.extractor = REDecoder(config, config.hidden_size + config.hidden_size // config.channel_shrink_ratio) elif self.decoder == 'gose': self.extractor = GOSE(config) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, entities=None, relations=None, ): if self.input_type == 'bbox': input_mask = input_ids != 1 input_ids[input_mask] = 3 elif self.input_type == 'text': bbox[:,:,:] = 0 if self.freeze_model: with torch.no_grad(): outputs, layout_outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) else: outputs, layout_outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) seq_length = input_ids.size(1) sequence_output = outputs[0] sequence_output = torch.cat([sequence_output, layout_outputs], -1) sequence_output = self.dropout(sequence_output) loss, pred_relations = self.extractor(sequence_output, entities, relations, bbox)
# coding=utf-8 logger = logging.get_logger(__name__) class LiLTRobertaLikeTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings, position_ids def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LiLTRobertaLikeLayoutEmbeddings(nn.Module): def __init__(self, config): super(LiLTRobertaLikeLayoutEmbeddings, self).__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.padding_idx = config.pad_token_id self.box_position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size//config.channel_shrink_ratio, padding_idx=self.padding_idx ) self.box_linear_embeddings = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size//config.channel_shrink_ratio) self.LayerNorm = nn.LayerNorm(config.hidden_size//config.channel_shrink_ratio, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, bbox=None, position_ids=None, ): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings) box_position_embeddings = self.box_position_embeddings(position_ids) spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings) spatial_position_embeddings = self.dropout(spatial_position_embeddings) return spatial_position_embeddings class LiLTRobertaLikeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.layout_query = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_key = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_value = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder self.channel_shrink_ratio = config.channel_shrink_ratio def transpose_for_scores(self, x, r=1): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size//r) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): layout_value_layer = self.transpose_for_scores(self.layout_value(layout_inputs), r=self.channel_shrink_ratio) layout_key_layer = self.transpose_for_scores(self.layout_key(layout_inputs), r=self.channel_shrink_ratio) layout_query_layer = self.transpose_for_scores(self.layout_query(layout_inputs), r=self.channel_shrink_ratio) mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) layout_attention_scores = torch.matmul(layout_query_layer, layout_key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key tmp_attention_scores = attention_scores / math.sqrt(self.attention_head_size) tmp_layout_attention_scores = layout_attention_scores / math.sqrt(self.attention_head_size//self.channel_shrink_ratio) attention_scores = tmp_attention_scores + tmp_layout_attention_scores layout_attention_scores = tmp_layout_attention_scores + tmp_attention_scores if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) layout_attention_scores = layout_attention_scores + attention_mask # Normalize the attention scores to probabilities. layout_attention_probs = nn.Softmax(dim=-1)(layout_attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. layout_attention_probs = self.dropout(layout_attention_probs) # Mask heads if we want to if head_mask is not None: layout_attention_probs = layout_attention_probs * head_mask layout_context_layer = torch.matmul(layout_attention_probs, layout_value_layer) layout_context_layer = layout_context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = layout_context_layer.size()[:-2] + (self.all_head_size//self.channel_shrink_ratio,) layout_context_layer = layout_context_layer.view(*new_context_layer_shape) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = ((context_layer, layout_context_layer), attention_probs) if output_attentions else ((context_layer, layout_context_layer),) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class LiLTRobertaLikeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LiLTRobertaLikeSelfAttention(config) self.output = LiLTRobertaLikeSelfOutput(config) self.pruned_heads = set() ori_hidden_size = config.hidden_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio self.layout_output = LiLTRobertaLikeSelfOutput(config) config.hidden_size = ori_hidden_size def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, layout_inputs, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0][0], hidden_states) layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs) outputs = ((attention_output, layout_attention_output),) + self_outputs[1:] # add attentions if we output them return outputs class LiLTRobertaLikeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class LiLTRobertaLikeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LiLTRobertaLikeAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = LiLTRobertaLikeAttention(config) self.intermediate = LiLTRobertaLikeIntermediate(config) self.output = LiLTRobertaLikeOutput(config) ori_hidden_size = config.hidden_size ori_intermediate_size = config.intermediate_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio config.intermediate_size = config.intermediate_size // config.channel_shrink_ratio self.layout_intermediate = LiLTRobertaLikeIntermediate(config) self.layout_output = LiLTRobertaLikeOutput(config) config.hidden_size = ori_hidden_size config.intermediate_size = ori_intermediate_size def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, layout_inputs, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0][0] layout_attention_output = self_attention_outputs[0][1] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) layout_layer_output = apply_chunking_to_forward( self.layout_feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, layout_attention_output ) outputs = ((layer_output, layout_layer_output),) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def layout_feed_forward_chunk(self, attention_output): intermediate_output = self.layout_intermediate(attention_output) layer_output = self.layout_output(intermediate_output, attention_output) return layer_output class LiLTRobertaLikeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LiLTRobertaLikeLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0][0] layout_inputs = layer_outputs[0][1] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ), layout_inputs return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ), layout_inputs class LiLTRobertaLikePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class LiLTRobertaLikePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LiLTRobertaLikeConfig base_model_prefix = "liltrobertalike" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class LiLTRobertaLikeModel(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = LiLTRobertaLikeTextEmbeddings(config) self.layout_embeddings = LiLTRobertaLikeLayoutEmbeddings(config) self.encoder = LiLTRobertaLikeEncoder(config) self.pooler = LiLTRobertaLikePooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output, position_ids = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) layout_embedding_output = self.layout_embeddings( bbox=bbox, position_ids=position_ids, ) encoder_outputs, layout_encoder_outputs = self.encoder( embedding_output, layout_embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ), layout_encoder_outputs class LiLTRobertaLikeForTokenClassification(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size + config.hidden_size//config.channel_shrink_ratio, config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs, layout_outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = torch.cat([sequence_output, layout_outputs], -1) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LiLTRobertaLikeForRelationExtraction(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.input_type = config.input_type self.freeze_model = config.freeze_model print(f'=============model freeze {self.freeze_model}===============') #from IPython import embed;embed() self.decoder = config.decoder_name if self.decoder == 're': self.extractor = REDecoder(config, config.hidden_size + config.hidden_size // config.channel_shrink_ratio) elif self.decoder == 'gose': self.extractor = GOSE(config) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, entities=None, relations=None, ): if self.input_type == 'bbox': input_mask = input_ids != 1 input_ids[input_mask] = 3 elif self.input_type == 'text': bbox[:,:,:] = 0 if self.freeze_model: with torch.no_grad(): outputs, layout_outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) else: outputs, layout_outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) seq_length = input_ids.size(1) sequence_output = outputs[0] sequence_output = torch.cat([sequence_output, layout_outputs], -1) sequence_output = self.dropout(sequence_output) loss, pred_relations = self.extractor(sequence_output, entities, relations, bbox)
return ReOutput(
3
2023-10-19 14:36:32+00:00
16k
BurgerBurgerBurger/AA
run.py
[ { "identifier": "add_args", "path": "args.py", "snippet": "def add_args(parser):\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\n parser.add_argument(\"--model_name_or_path\", default=\"bert-base-cased\", type=str)\n\n parser.add_argument(\"--train_file\", default=\"train_annotated.json\", type=str)\n parser.add_argument(\"--dev_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--test_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--pred_file\", default=\"results.json\", type=str)\n parser.add_argument(\"--save_path\", default=\"\", type=str)\n parser.add_argument(\"--load_path\", default=\"\", type=str)\n parser.add_argument(\"--results_path\", default=\"\", type=str)\n parser.add_argument(\"--teacher_sig_path\", default=\"\", type=str)\n parser.add_argument(\"--save_attn\", action=\"store_true\", help=\"Whether store the evidence distribution or not\")\n\n # graph\n parser.add_argument(\"--attn_heads\", default=2, type=int, help=\"Attention heads\")\n parser.add_argument(\"--gcn_layers\", default=2, type=int, help=\"GCN layers\")\n parser.add_argument(\"--iters\", default=2, type=int, help=\"Iteration\")\n parser.add_argument(\"--use_graph\", action=\"store_true\", help=\"Use graph\")\n\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--max_seq_length\", default=1024, type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n\n parser.add_argument(\"--train_batch_size\", default=4, type=int,\n help=\"Batch size for training.\")\n parser.add_argument(\"--test_batch_size\", default=8, type=int,\n help=\"Batch size for testing.\")\n parser.add_argument(\"--eval_mode\", default=\"single\", type=str,\n choices=[\"single\", \"fushion\"], \n help=\"Single-pass evaluation or evaluation with inference-stage fusion.\")\n parser.add_argument(\"--gradient_accumulation_steps\", default=1, type=int,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--num_labels\", default=4, type=int,\n help=\"Max number of labels in prediction.\")\n parser.add_argument(\"--max_sent_num\", default=25, type=int,\n help=\"Max number of sentences in each document.\")\n parser.add_argument(\"--evi_thresh\", default=0.2, type=float,\n help=\"Evidence Threshold. \")\n parser.add_argument(\"--evi_lambda\", default=0.1, type=float,\n help=\"Weight of relation-agnostic evidence loss during training. \")\n parser.add_argument(\"--attn_lambda\", default=1.0, type=float,\n help=\"Weight of knowledge distillation loss for attentions during training. \")\n parser.add_argument(\"--lr_transformer\", default=5e-5, type=float,\n help=\"The initial learning rate for transformer.\")\n parser.add_argument(\"--lr_added\", default=1e-4, type=float,\n help=\"The initial learning rate for added modules.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-6, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--warmup_ratio\", default=0.06, type=float,\n help=\"Warm up ratio for Adam.\")\n parser.add_argument(\"--num_train_epochs\", default=30.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--evaluation_steps\", default=-1, type=int,\n help=\"Number of training steps between evaluations.\")\n parser.add_argument(\"--seed\", type=int, default=66,\n help=\"random seed for initialization\")\n parser.add_argument(\"--num_class\", type=int, default=97,\n help=\"Number of relation types in dataset.\")\n\n return parser" }, { "identifier": "DocREModel", "path": "model.py", "snippet": "class DocREModel(nn.Module):\n\n def __init__(self, args, config, model, tokenizer,\n emb_size=768, block_size=64, num_labels=-1,\n max_sent_num=25, evi_thresh=0.2):\n super().__init__()\n self.config = config\n self.model = model\n self.tokenizer = tokenizer\n self.hidden_size = config.hidden_size\n\n self.loss_fnt = ATLoss()\n self.loss_fnt_evi = nn.KLDivLoss(reduction=\"batchmean\")\n\n self.head_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n self.tail_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n\n self.use_graph = args.use_graph\n if self.use_graph:\n self.head_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.tail_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.bilinear = nn.Linear(emb_size * block_size, config.num_labels)\n\n self.emb_size = emb_size\n self.block_size = block_size\n self.num_labels = num_labels\n self.total_labels = config.num_labels\n self.max_sent_num = max_sent_num\n self.evi_thresh = evi_thresh\n\n self.edges = ['self-loop', 'mention-anaphor', 'co-reference', 'inter-entity']\n\n if self.use_graph:\n self.graph_layers = nn.ModuleList(\n AttentionGCNLayer(self.edges, self.hidden_size, nhead=args.attn_heads, iters=args.gcn_layers) for _ in\n range(args.iters))\n\n def encode(self, input_ids, attention_mask):\n config = self.config\n if config.transformer_type == \"bert\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id]\n elif config.transformer_type == \"roberta\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id, config.sep_token_id]\n # process long documents.\n sequence_output, attention = process_long_input(self.model, input_ids, attention_mask, start_tokens, end_tokens)\n\n return sequence_output, attention\n\n def get_hrt(self, sequence_output, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n hss, tss, rss = [], [], []\n ht_atts = []\n\n for i in range(len(entity_pos)): # for each batch\n entity_embs, entity_atts = [], []\n\n # obtain entity embedding from mention embeddings.\n for eid, e in enumerate(entity_pos[i]): # for each entity\n if len(e) > 1:\n e_emb, e_att = [], []\n for mid, (start, end) in enumerate(e): # for every mention\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n e_emb.append(sequence_output[i, start + offset])\n e_att.append(attention[i, :, start + offset])\n\n if len(e_emb) > 0:\n e_emb = torch.logsumexp(torch.stack(e_emb, dim=0), dim=0)\n e_att = torch.stack(e_att, dim=0).mean(0)\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n else:\n start, end = e[0]\n if start + offset < c:\n e_emb = sequence_output[i, start + offset]\n e_att = attention[i, :, start + offset]\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n\n entity_embs.append(e_emb)\n entity_atts.append(e_att)\n\n entity_embs = torch.stack(entity_embs, dim=0) # [n_e, d]\n entity_atts = torch.stack(entity_atts, dim=0) # [n_e, h, seq_len]\n\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n\n # obtain subject/object (head/tail) embeddings from entity embeddings.\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n\n h_att = torch.index_select(entity_atts, 0, ht_i[:, 0])\n t_att = torch.index_select(entity_atts, 0, ht_i[:, 1])\n\n ht_att = (h_att * t_att).mean(1) # average over all heads\n ht_att = ht_att / (ht_att.sum(1, keepdim=True) + 1e-30)\n ht_atts.append(ht_att)\n\n # obtain local context embeddings.\n rs = contract(\"ld,rl->rd\", sequence_output[i], ht_att)\n\n hss.append(hs)\n tss.append(ts)\n rss.append(rs)\n\n rels_per_batch = [len(b) for b in hss]\n hss = torch.cat(hss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n tss = torch.cat(tss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n rss = torch.cat(rss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n ht_atts = torch.cat(ht_atts, dim=0) # (num_ent_pairs_all_batches, max_doc_len)\n\n return hss, rss, tss, ht_atts, rels_per_batch\n\n def graph(self, sequence_output, graphs, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n\n max_node = max([graph.shape[0] for graph in graphs])\n graph_fea = torch.zeros(n, max_node, self.config.hidden_size, device=sequence_output.device)\n graph_adj = torch.zeros(n, max_node, max_node, device=sequence_output.device)\n\n for i, graph in enumerate(graphs):\n nodes_num = graph.shape[0]\n graph_adj[i, :nodes_num, :nodes_num] = torch.from_numpy(graph)\n\n for i in range(len(entity_pos)):\n mention_index = 0\n for e in entity_pos[i]:\n for start, end in e:\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n graph_fea[i, mention_index, :] = sequence_output[i, start + offset]\n else:\n graph_fea[i, mention_index, :] = torch.zeros(self.config.hidden_size).to(sequence_output)\n mention_index += 1\n\n for graph_layer in self.graph_layers:\n graph_fea, _ = graph_layer(graph_fea, graph_adj)\n\n h_entity, t_entity = [], []\n for i in range(len(entity_pos)):\n entity_embs = []\n mention_index = 0\n for e in entity_pos[i]:\n e_emb = graph_fea[i, mention_index:mention_index + len(e), :]\n mention_index += len(e)\n\n e_emb = torch.logsumexp(e_emb, dim=0) if len(e) > 1 else e_emb.squeeze(0)\n entity_embs.append(e_emb)\n\n entity_embs = torch.stack(entity_embs, dim=0)\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n h_entity.append(hs)\n t_entity.append(ts)\n\n h_entity = torch.cat(h_entity, dim=0)\n t_entity = torch.cat(t_entity, dim=0)\n return h_entity, t_entity\n\n def forward_rel(self, hs, ts, rs, h, t):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs, h], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs, t], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_rel_no_graph(self, hs, ts, rs):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_evi(self, doc_attn, sent_pos, batch_rel, offset):\n max_sent_num = max([len(sent) for sent in sent_pos])\n rel_sent_attn = []\n for i in range(len(sent_pos)): # for each batch\n # the relation ids corresponds to document in batch i is [sum(batch_rel[:i]), sum(batch_rel[:i+1]))\n curr_attn = doc_attn[sum(batch_rel[:i]):sum(batch_rel[:i + 1])]\n curr_sent_pos = [torch.arange(s[0], s[1]).to(curr_attn.device) + offset for s in sent_pos[i]] # + offset\n\n curr_attn_per_sent = [curr_attn.index_select(-1, sent) for sent in curr_sent_pos]\n curr_attn_per_sent += [torch.zeros_like(curr_attn_per_sent[0])] * (max_sent_num - len(curr_attn_per_sent))\n sum_attn = torch.stack([attn.sum(dim=-1) for attn in curr_attn_per_sent],\n dim=-1) # sum across those attentions\n rel_sent_attn.append(sum_attn)\n\n s_attn = torch.cat(rel_sent_attn, dim=0)\n return s_attn\n\n def forward(self,\n input_ids=None,\n attention_mask=None,\n labels=None, # relation labels\n entity_pos=None,\n hts=None, # entity pairs\n sent_pos=None,\n sent_labels=None, # evidence labels (0/1)\n teacher_attns=None, # evidence distribution from teacher model\n graph=None,\n tag=\"train\"\n ):\n\n offset = 1 if self.config.transformer_type in [\"bert\", \"roberta\"] else 0\n output = {}\n sequence_output, attention = self.encode(input_ids, attention_mask)\n\n hs, rs, ts, doc_attn, batch_rel = self.get_hrt(sequence_output, attention, entity_pos, hts, offset)\n\n if self.use_graph:\n h, t = self.graph(sequence_output, graph, attention, entity_pos, hts, offset)\n logits = self.forward_rel(hs, ts, rs, h, t)\n else:\n logits = self.forward_rel_no_graph(hs, ts, rs)\n\n output[\"rel_pred\"] = self.loss_fnt.get_label(logits, num_labels=self.num_labels)\n\n if sent_labels is not None: # human-annotated evidence available\n\n s_attn = self.forward_evi(doc_attn, sent_pos, batch_rel, offset)\n output[\"evi_pred\"] = F.pad(s_attn > self.evi_thresh, (0, self.max_sent_num - s_attn.shape[-1]))\n\n if tag in [\"test\", \"dev\"]: # testing\n scores_topk = self.loss_fnt.get_score(logits, self.num_labels)\n output[\"scores\"] = scores_topk[0]\n output[\"topks\"] = scores_topk[1]\n\n if tag == \"infer\": # teacher model inference\n output[\"attns\"] = doc_attn.split(batch_rel)\n\n else: # training\n # relation extraction loss\n loss = self.loss_fnt(logits.float(), labels.float())\n output[\"loss\"] = {\"rel_loss\": loss.to(sequence_output)}\n\n if sent_labels is not None: # supervised training with human evidence\n\n idx_used = torch.nonzero(labels[:, 1:].sum(dim=-1)).view(-1)\n # evidence retrieval loss (kldiv loss)\n s_attn = s_attn[idx_used]\n sent_labels = sent_labels[idx_used]\n norm_s_labels = sent_labels / (sent_labels.sum(dim=-1, keepdim=True) + 1e-30)\n norm_s_labels[norm_s_labels == 0] = 1e-30\n s_attn[s_attn == 0] = 1e-30\n evi_loss = self.loss_fnt_evi(s_attn.log(), norm_s_labels)\n output[\"loss\"][\"evi_loss\"] = evi_loss.to(sequence_output)\n\n elif teacher_attns is not None: # self training with teacher attention\n\n doc_attn[doc_attn == 0] = 1e-30\n teacher_attns[teacher_attns == 0] = 1e-30\n attn_loss = self.loss_fnt_evi(doc_attn.log(), teacher_attns)\n output[\"loss\"][\"attn_loss\"] = attn_loss.to(sequence_output)\n\n return output" }, { "identifier": "set_seed", "path": "utils.py", "snippet": "def set_seed(args):\n seed = int(args.seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n torch.use_deterministic_algorithms(True)" }, { "identifier": "collate_fn", "path": "utils.py", "snippet": "def collate_fn(batch):\n max_len = max([len(f[\"input_ids\"]) for f in batch])\n max_sent = max([len(f[\"sent_pos\"]) for f in batch])\n input_ids = [f[\"input_ids\"] + [0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n input_mask = [[1.0] * len(f[\"input_ids\"]) + [0.0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n labels = [f[\"labels\"] for f in batch]\n entity_pos = [f[\"entity_pos\"] for f in batch]\n hts = [f[\"hts\"] for f in batch]\n sent_pos = [f[\"sent_pos\"] for f in batch]\n sent_labels = [f[\"sent_labels\"] for f in batch if \"sent_labels\" in f]\n attns = [f[\"attns\"] for f in batch if \"attns\" in f]\n\n input_ids = torch.tensor(input_ids, dtype=torch.long)\n input_mask = torch.tensor(input_mask, dtype=torch.float)\n\n labels = [torch.tensor(label) for label in labels]\n labels = torch.cat(labels, dim=0)\n\n if sent_labels != [] and None not in sent_labels:\n sent_labels_tensor = []\n for sent_label in sent_labels:\n sent_label = np.array(sent_label)\n sent_labels_tensor.append(np.pad(sent_label, ((0, 0), (0, max_sent - sent_label.shape[1]))))\n sent_labels_tensor = torch.from_numpy(np.concatenate(sent_labels_tensor, axis=0))\n else:\n sent_labels_tensor = None\n\n if attns:\n attns = [np.pad(attn, ((0, 0), (0, max_len - attn.shape[1]))) for attn in attns]\n attns = torch.from_numpy(np.concatenate(attns, axis=0))\n else:\n attns = None\n\n graph = [f[\"graph\"] for f in batch]\n\n output = (input_ids, input_mask, labels, entity_pos, hts, sent_pos, sent_labels_tensor, attns, graph)\n\n return output" }, { "identifier": "create_directory", "path": "utils.py", "snippet": "def create_directory(d):\n if d and not os.path.exists(d):\n os.makedirs(d)\n return d" }, { "identifier": "read_docred", "path": "prepro.py", "snippet": "def read_docred(file_in,\n tokenizer,\n transformer_type=\"bert\",\n max_seq_length=1024,\n teacher_sig_path=\"\",\n single_results=None):\n\n i_line = 0\n pos_samples = 0\n neg_samples = 0\n features = []\n\n if file_in == \"\":\n return None\n\n with open(file_in, \"r\", encoding='utf-8') as fh:\n data = json.load(fh)\n\n if teacher_sig_path != \"\": # load logits\n basename = os.path.splitext(os.path.basename(file_in))[0]\n attns_file = os.path.join(teacher_sig_path, f\"{basename}.attns\")\n attns = pickle.load(open(attns_file, 'rb'))\n\n if single_results != None:\n # reorder predictions as relations by title\n pred_pos_samples = 0\n pred_neg_samples = 0\n pred_rels = single_results\n title2preds = {}\n for pred_rel in pred_rels:\n if pred_rel[\"title\"] in title2preds:\n title2preds[pred_rel[\"title\"]].append(pred_rel)\n else:\n title2preds[pred_rel[\"title\"]] = [pred_rel]\n\n for doc_id in tqdm(range(len(data)), desc=\"Loading examples\"):\n\n sample = data[doc_id]\n entities = sample['vertexSet']\n entity_start, entity_end = [], []\n # record entities\n for entity in entities:\n for mention in entity:\n sent_id = mention[\"sent_id\"]\n pos = mention[\"pos\"]\n entity_start.append((sent_id, pos[0],))\n entity_end.append((sent_id, pos[1] - 1,))\n\n # add entity markers\n sents, sent_map, sent_pos = add_entity_markers(sample, tokenizer, entity_start, entity_end)\n\n # training triples with positive examples (entity pairs with labels)\n train_triple = {}\n\n if \"labels\" in sample:\n for label in sample['labels']:\n evidence = label['evidence']\n r = int(docred_rel2id[label['r']])\n\n # update training triples\n if (label['h'], label['t']) not in train_triple:\n train_triple[(label['h'], label['t'])] = [\n {'relation': r, 'evidence': evidence}]\n else:\n train_triple[(label['h'], label['t'])].append(\n {'relation': r, 'evidence': evidence})\n\n # get anaphors in the doc\n mentions = set([m['name'] for e in entities for m in e])\n\n potential_mention = get_anaphors(sample['sents'], mentions)\n\n entities.append(potential_mention)\n\n # entity start, end position\n entity_pos = []\n\n for e in entities:\n entity_pos.append([])\n for m in e:\n start = sent_map[m[\"sent_id\"]][m[\"pos\"][0]]\n end = sent_map[m[\"sent_id\"]][m[\"pos\"][1]]\n label = m[\"type\"]\n entity_pos[-1].append((start, end,))\n\n relations, hts, sent_labels = [], [], []\n\n for h, t in train_triple.keys(): # for every entity pair with gold relation\n relation = [0] * len(docred_rel2id)\n sent_evi = [0] * len(sent_pos)\n\n for mention in train_triple[h, t]: # for each relation mention with head h and tail t\n relation[mention[\"relation\"]] = 1\n for i in mention[\"evidence\"]:\n sent_evi[i] += 1\n\n relations.append(relation)\n hts.append([h, t])\n sent_labels.append(sent_evi)\n pos_samples += 1\n\n for h in range(len(entities) - 1):\n for t in range(len(entities) - 1):\n # all entity pairs that do not have relation are treated as negative samples\n if h != t and [h, t] not in hts: # and [t, h] not in hts:\n relation = [1] + [0] * (len(docred_rel2id) - 1)\n sent_evi = [0] * len(sent_pos)\n relations.append(relation)\n\n hts.append([h, t])\n sent_labels.append(sent_evi)\n neg_samples += 1\n\n graph = create_graph(entity_pos)\n\n assert len(relations) == (len(entities) - 1) * (len(entities) - 2)\n assert len(sents) < max_seq_length\n sents = sents[:max_seq_length - 2] # truncate, -2 for [CLS] and [SEP]\n input_ids = tokenizer.convert_tokens_to_ids(sents)\n input_ids = tokenizer.build_inputs_with_special_tokens(input_ids)\n\n feature = [{'input_ids': input_ids,\n 'entity_pos': entity_pos if entity_pos[-1] != [] else entity_pos[:-1],\n 'labels': relations,\n 'hts': hts,\n 'sent_pos': sent_pos,\n 'sent_labels': sent_labels,\n 'title': sample['title'],\n 'graph': graph\n }]\n\n if teacher_sig_path != '': # add evidence distributions from the teacher model\n feature[0]['attns'] = attns[doc_id][:, :len(input_ids)]\n\n if single_results is not None: # get pseudo documents from predictions of the single run\n offset = 1 if transformer_type in [\"bert\", \"roberta\"] else 0\n if sample[\"title\"] in title2preds:\n feature, pos_sample, neg_sample, = get_pseudo_features(feature[0], title2preds[sample[\"title\"]],\n entities, sent_map, offset, tokenizer)\n pred_pos_samples += pos_sample\n pred_neg_samples += neg_sample\n\n i_line += len(feature)\n features.extend(feature)\n\n print(\"# of documents {}.\".format(i_line))\n if single_results is not None:\n print(\"# of positive examples {}.\".format(pred_pos_samples))\n print(\"# of negative examples {}.\".format(pred_neg_samples))\n\n else:\n print(\"# of positive examples {}.\".format(pos_samples))\n print(\"# of negative examples {}.\".format(neg_samples))\n\n return features" }, { "identifier": "to_official", "path": "evaluation.py", "snippet": "def to_official(preds: list, features: list, evi_preds: list = [], scores: list = [], topks: list = []):\n '''\n Convert the predictions to official format for evaluating.\n Input:\n :preds: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :evi_preds: list of the evidence prediction corresponding to each relation triple prediction.\n :scores: list of scores of topk relation labels for each entity pair.\n :topks: list of topk relation labels for each entity pair.\n Output:\n :official_res: official results used for evaluation.\n :res: topk results to be dumped into file, which can be further used during fushion.\n '''\n\n h_idx, t_idx, title, sents = [], [], [], []\n\n for f in features:\n if \"entity_map\" in f:\n hts = [[f[\"entity_map\"][ht[0]], f[\"entity_map\"][ht[1]]] for ht in f[\"hts\"]]\n else:\n hts = f[\"hts\"]\n\n h_idx += [ht[0] for ht in hts]\n t_idx += [ht[1] for ht in hts]\n title += [f[\"title\"] for ht in hts]\n sents += [len(f[\"sent_pos\"])] * len(hts)\n\n official_res = []\n res = []\n\n for i in range(preds.shape[0]): # for each entity pair\n if scores != []:\n score = extract_relative_score(scores[i], topks[i])\n pred = topks[i]\n else:\n pred = preds[i]\n pred = np.nonzero(pred)[0].tolist()\n\n for p in pred: # for each predicted relation label (topk)\n curr_result = {\n 'title': title[i],\n 'h_idx': h_idx[i],\n 't_idx': t_idx[i],\n 'r': id2rel[p],\n }\n if evi_preds != []:\n curr_evi = evi_preds[i]\n evis = np.nonzero(curr_evi)[0].tolist()\n curr_result[\"evidence\"] = [evi for evi in evis if evi < sents[i]]\n if scores != []:\n curr_result[\"score\"] = score[np.where(topks[i] == p)].item()\n if p != 0 and p in np.nonzero(preds[i])[0].tolist():\n official_res.append(curr_result)\n res.append(curr_result)\n\n return official_res, res" }, { "identifier": "official_evaluate", "path": "evaluation.py", "snippet": "def official_evaluate(tmp, path, train_file=\"train_annotated.json\", dev_file=\"dev.json\"):\n '''\n Adapted from the official evaluation code\n '''\n truth_dir = os.path.join(path, 'ref')\n\n if not os.path.exists(truth_dir):\n os.makedirs(truth_dir)\n\n fact_in_train_annotated = gen_train_facts(os.path.join(path, train_file), truth_dir)\n fact_in_train_distant = gen_train_facts(os.path.join(path, \"train_distant.json\"), truth_dir)\n\n truth = json.load(open(os.path.join(path, dev_file)))\n\n std = {}\n tot_evidences = 0\n titleset = set([])\n\n title2vectexSet = {}\n\n for x in truth:\n title = x['title']\n titleset.add(title)\n\n vertexSet = x['vertexSet']\n title2vectexSet[title] = vertexSet\n\n if 'labels' not in x: # official test set from DocRED\n continue\n\n for label in x['labels']:\n r = label['r']\n h_idx = label['h']\n t_idx = label['t']\n std[(title, r, h_idx, t_idx)] = set(label['evidence'])\n tot_evidences += len(label['evidence'])\n\n tot_relations = len(std)\n tmp.sort(key=lambda x: (x['title'], x['h_idx'], x['t_idx'], x['r']))\n submission_answer = [tmp[0]]\n\n for i in range(1, len(tmp)):\n x = tmp[i]\n y = tmp[i - 1]\n if (x['title'], x['h_idx'], x['t_idx'], x['r']) != (y['title'], y['h_idx'], y['t_idx'], y['r']):\n submission_answer.append(tmp[i])\n\n correct_re = 0\n correct_evidence = 0\n pred_evi = 0\n\n correct_in_train_annotated = 0\n correct_in_train_distant = 0\n titleset2 = set([])\n for x in submission_answer:\n title = x['title']\n h_idx = x['h_idx']\n t_idx = x['t_idx']\n r = x['r']\n titleset2.add(title)\n if title not in title2vectexSet:\n continue\n vertexSet = title2vectexSet[title]\n\n if 'evidence' in x: # and (title, h_idx, t_idx) in std:\n evi = set(x['evidence'])\n else:\n evi = set([])\n pred_evi += len(evi)\n\n if (title, r, h_idx, t_idx) in std:\n correct_re += 1\n stdevi = std[(title, r, h_idx, t_idx)]\n correct_evidence += len(stdevi & evi)\n in_train_annotated = in_train_distant = False\n for n1 in vertexSet[h_idx]:\n for n2 in vertexSet[t_idx]:\n if (n1['name'], n2['name'], r) in fact_in_train_annotated:\n in_train_annotated = True\n if (n1['name'], n2['name'], r) in fact_in_train_distant:\n in_train_distant = True\n\n if in_train_annotated:\n correct_in_train_annotated += 1\n if in_train_distant:\n correct_in_train_distant += 1\n\n re_p = 1.0 * correct_re / len(submission_answer)\n re_r = 1.0 * correct_re / tot_relations if tot_relations != 0 else 0\n if re_p + re_r == 0:\n re_f1 = 0\n else:\n re_f1 = 2.0 * re_p * re_r / (re_p + re_r)\n\n evi_p = 1.0 * correct_evidence / pred_evi if pred_evi > 0 else 0\n evi_r = 1.0 * correct_evidence / tot_evidences if tot_evidences > 0 else 0\n\n if evi_p + evi_r == 0:\n evi_f1 = 0\n else:\n evi_f1 = 2.0 * evi_p * evi_r / (evi_p + evi_r)\n\n re_p_ignore_train_annotated = 1.0 * (correct_re - correct_in_train_annotated) / (\n len(submission_answer) - correct_in_train_annotated + 1e-5)\n re_p_ignore_train = 1.0 * (correct_re - correct_in_train_distant) / (\n len(submission_answer) - correct_in_train_distant + 1e-5)\n\n if re_p_ignore_train_annotated + re_r == 0:\n re_f1_ignore_train_annotated = 0\n else:\n re_f1_ignore_train_annotated = 2.0 * re_p_ignore_train_annotated * re_r / (re_p_ignore_train_annotated + re_r)\n\n if re_p_ignore_train + re_r == 0:\n re_f1_ignore_train = 0\n else:\n re_f1_ignore_train = 2.0 * re_p_ignore_train * re_r / (re_p_ignore_train + re_r)\n\n return [re_p, re_r, re_f1], [evi_p, evi_r, evi_f1], \\\n [re_p_ignore_train_annotated, re_r, re_f1_ignore_train_annotated], \\\n [re_p_ignore_train, re_r, re_f1_ignore_train]" }, { "identifier": "merge_results", "path": "evaluation.py", "snippet": "def merge_results(pred: list, pred_pseudo: list, features: list, thresh: float = None):\n '''\n Merge relation predictions from the original document and psuedo documents.\n Input:\n :pred: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :pred_pseudo: list of dictionaries, each dictionary entry is a predicted relation triple from pseudo documents. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :thresh: threshold for selecting predictions.\n Output:\n :merged_res: list of merged relation predictions. Each relation prediction is a dictionay with keys (title, h_idx, t_idx, r).\n :thresh: threshold of selecting relation predictions.\n '''\n\n title2pred = get_title2pred(pred)\n title2pred_pseudo = get_title2pred(pred_pseudo)\n\n title2gt = get_title2gt(features)\n num_gt = sum([len(title2gt[t]) for t in title2gt])\n\n titles = list(title2pred.keys())\n cand = []\n merged_res = []\n correct, num_pred = 0, 0\n\n for t in titles:\n rels = title2pred[t]\n rels_pseudo = title2pred_pseudo[t] if t in title2pred_pseudo else {}\n\n union = set(rels.keys()) | set(rels_pseudo.keys())\n for r in union:\n if r in rels and r in rels_pseudo: # add those into predictions\n if rels[r] > 0 and rels_pseudo[r] > 0:\n merged_res.append({'title': t, 'h_idx': r[0], 't_idx': r[1], 'r': r[2]})\n num_pred += 1\n correct += r in title2gt[t]\n continue\n score = rels[r] + rels_pseudo[r]\n elif r in rels: # -10 for penalty\n score = rels[r] - 10\n elif r in rels_pseudo:\n score = rels_pseudo[r] - 10\n cand.append((r in title2gt[t], score, t, r[0], r[1], r[2]))\n\n if thresh != None:\n sorted_pred = sorted(cand, key=lambda x: x[1], reverse=True)\n last = min(filter(lambda x: x[1] > thresh, sorted_pred))\n until = sorted_pred.index(last)\n cand = sorted_pred[:until + 1]\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n return merged_res, thresh\n\n if cand != []:\n thresh, cand = select_thresh(cand, num_gt, correct, num_pred)\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n\n return merged_res, thresh" } ]
import argparse import os import numpy as np import torch import ujson as json import pandas as pd import pickle from torch.cuda.amp import GradScaler from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from args import add_args from model import DocREModel from utils import set_seed, collate_fn, create_directory from prepro import read_docred from evaluation import to_official, official_evaluate, merge_results from tqdm import tqdm
11,387
dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file) return num_steps new_layer = ["extractor", "bilinear", "graph"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds, evi_preds = [], [] scores, topks = [], [] attns = [] for batch in dataloader: model.eval() if args.save_attn: tag = "infer" inputs = load_input(batch, args.device, tag) with torch.no_grad(): outputs = model(**inputs) pred = outputs["rel_pred"] pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) if "scores" in outputs: scores.append(outputs["scores"].cpu().numpy()) topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test": best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.test_file) else: best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.dev_file) else: best_re = best_evi = best_re_ign = [-1, -1, -1] output = { tag + "_rel": [i * 100 for i in best_re], tag + "_rel_ign": [i * 100 for i in best_re_ign], tag + "_evi": [i * 100 for i in best_evi], } scores = {"dev_F1": best_re[-1] * 100, "dev_evi_F1": best_evi[-1] * 100, "dev_F1_ign": best_re_ign[-1] * 100} if args.save_attn: attns_path = os.path.join(args.load_path, f"{os.path.splitext(args.test_file)[0]}.attns") print(f"saving attentions into {attns_path} ...") with open(attns_path, "wb") as f: pickle.dump(attns, f) return scores, output, official_results, results def dump_to_file(offi: list, offi_path: str, scores: list, score_path: str, results: list = [], res_path: str = "", thresh: float = None): ''' dump scores and (top-k) predictions to file. ''' print(f"saving official predictions into {offi_path} ...") json.dump(offi, open(offi_path, "w")) print(f"saving evaluations into {score_path} ...") headers = ["precision", "recall", "F1"] scores_pd = pd.DataFrame.from_dict(scores, orient="index", columns=headers) print(scores_pd) scores_pd.to_csv(score_path, sep='\t') if len(results) != 0: assert res_path != "" print(f"saving topk results into {res_path} ...") json.dump(results, open(res_path, "w")) if thresh is not None: thresh_path = os.path.join(os.path.dirname(offi_path), "thresh") if not os.path.exists(thresh_path): print(f"saving threshold into {thresh_path} ...") json.dump(thresh, open(thresh_path, "w")) return def main(): parser = argparse.ArgumentParser()
def load_input(batch, device, tag="dev"): input = {'input_ids': batch[0].to(device), 'attention_mask': batch[1].to(device), 'labels': batch[2].to(device), 'entity_pos': batch[3], 'hts': batch[4], 'sent_pos': batch[5], 'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None, 'teacher_attns': batch[7].to(device) if not batch[7] is None else None, 'graph': batch[8], 'tag': tag } return input def train(args, model, train_features, dev_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) scaler = GradScaler() print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator, desc='Train epoch'): for step, batch in enumerate(train_dataloader): model.zero_grad() optimizer.zero_grad() model.train() inputs = load_input(batch, args.device) outputs = model(**inputs) loss = [outputs["loss"]["rel_loss"]] if inputs["sent_labels"] is not None: loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda) if inputs["teacher_attns"] is not None: loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda) loss = sum(loss) / args.gradient_accumulation_steps scaler.scale(loss).backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scaler.step(optimizer) scaler.update() scheduler.step() model.zero_grad() num_steps += 1 if (step + 1) == len(train_dataloader) or ( args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev") print(dev_output) if dev_scores["dev_F1_ign"] > best_score: best_score = dev_scores["dev_F1_ign"] best_offi_results = official_results best_results = results best_output = dev_output ckpt_file = os.path.join(args.save_path, "best.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) if epoch == train_iterator[-1]: # last epoch ckpt_file = os.path.join(args.save_path, "last.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) pred_file = os.path.join(args.save_path, args.pred_file) score_file = os.path.join(args.save_path, "scores.csv") results_file = os.path.join(args.save_path, f"topk_{args.pred_file}") dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file) return num_steps new_layer = ["extractor", "bilinear", "graph"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds, evi_preds = [], [] scores, topks = [], [] attns = [] for batch in dataloader: model.eval() if args.save_attn: tag = "infer" inputs = load_input(batch, args.device, tag) with torch.no_grad(): outputs = model(**inputs) pred = outputs["rel_pred"] pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) if "scores" in outputs: scores.append(outputs["scores"].cpu().numpy()) topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test": best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.test_file) else: best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file, args.dev_file) else: best_re = best_evi = best_re_ign = [-1, -1, -1] output = { tag + "_rel": [i * 100 for i in best_re], tag + "_rel_ign": [i * 100 for i in best_re_ign], tag + "_evi": [i * 100 for i in best_evi], } scores = {"dev_F1": best_re[-1] * 100, "dev_evi_F1": best_evi[-1] * 100, "dev_F1_ign": best_re_ign[-1] * 100} if args.save_attn: attns_path = os.path.join(args.load_path, f"{os.path.splitext(args.test_file)[0]}.attns") print(f"saving attentions into {attns_path} ...") with open(attns_path, "wb") as f: pickle.dump(attns, f) return scores, output, official_results, results def dump_to_file(offi: list, offi_path: str, scores: list, score_path: str, results: list = [], res_path: str = "", thresh: float = None): ''' dump scores and (top-k) predictions to file. ''' print(f"saving official predictions into {offi_path} ...") json.dump(offi, open(offi_path, "w")) print(f"saving evaluations into {score_path} ...") headers = ["precision", "recall", "F1"] scores_pd = pd.DataFrame.from_dict(scores, orient="index", columns=headers) print(scores_pd) scores_pd.to_csv(score_path, sep='\t') if len(results) != 0: assert res_path != "" print(f"saving topk results into {res_path} ...") json.dump(results, open(res_path, "w")) if thresh is not None: thresh_path = os.path.join(os.path.dirname(offi_path), "thresh") if not os.path.exists(thresh_path): print(f"saving threshold into {thresh_path} ...") json.dump(thresh, open(thresh_path, "w")) return def main(): parser = argparse.ArgumentParser()
parser = add_args(parser)
0
2023-10-20 05:53:25+00:00
16k
xingchenshanyao/YOLOP-E
lib/core/function.py
[ { "identifier": "ConfusionMatrix", "path": "lib/core/evaluate.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc=1, conf=0.25, iou_thres=0.45):\n nc = 10 # 20230904 nc是类别数\n self.matrix = np.zeros((nc + 1, nc + 1))\n # import pdb;pdb.set_trace()\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = general.box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n # import pdb;pdb.set_trace()\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(np.int16)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[gc, detection_classes[m1[j]]] += 1 # correct\n else:\n # import pdb;pdb.set_trace()\n self.matrix[gc, self.nc] += 1 # background FP\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[self.nc, dc] += 1 # background FN\n\n def matrix(self):\n return self.matrix\n\n def plot(self, save_dir='', names=()):\n try:\n import seaborn as sn\n\n array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig = plt.figure(figsize=(12, 9), tight_layout=True)\n sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size\n labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels\n sn.heatmap(array, annot=self.nc < 30, annot_kws={\"size\": 8}, cmap='Blues', fmt='.2f', square=True,\n xticklabels=names + ['background FN'] if labels else \"auto\",\n yticklabels=names + ['background FP'] if labels else \"auto\").set_facecolor((1, 1, 1))\n fig.axes[0].set_xlabel('True')\n fig.axes[0].set_ylabel('Predicted')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n except Exception as e:\n pass\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "SegmentationMetric", "path": "lib/core/evaluate.py", "snippet": "class SegmentationMetric(object):\n '''\n imgLabel [batch_size, height(144), width(256)]\n confusionMatrix [[0(TN),1(FP)],\n [2(FN),3(TP)]]\n '''\n def __init__(self, numClass):\n self.numClass = numClass\n self.confusionMatrix = np.zeros((self.numClass,)*2)\n\n def pixelAccuracy(self):\n # return all class overall pixel accuracy\n # acc = (TP + TN) / (TP + TN + FP + TN)\n acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()\n return acc\n \n def lineAccuracy(self):\n Acc = np.diag(self.confusionMatrix) / (self.confusionMatrix.sum(axis=1) + 1e-12)\n return Acc[1]\n\n def classPixelAccuracy(self):\n # return each category pixel accuracy(A more accurate way to call it precision)\n # acc = (TP) / TP + FP\n classAcc = np.diag(self.confusionMatrix) / (self.confusionMatrix.sum(axis=0) + 1e-12)\n return classAcc\n\n def meanPixelAccuracy(self):\n classAcc = self.classPixelAccuracy()\n meanAcc = np.nanmean(classAcc)\n return meanAcc\n\n def meanIntersectionOverUnion(self):\n # Intersection = TP Union = TP + FP + FN\n # IoU = TP / (TP + FP + FN)\n intersection = np.diag(self.confusionMatrix)\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(self.confusionMatrix)\n IoU = intersection / union\n IoU[np.isnan(IoU)] = 0\n mIoU = np.nanmean(IoU)\n return mIoU\n \n def IntersectionOverUnion(self):\n intersection = np.diag(self.confusionMatrix)\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(self.confusionMatrix)\n IoU = intersection / union\n IoU[np.isnan(IoU)] = 0\n return IoU[1]\n\n def genConfusionMatrix(self, imgPredict, imgLabel):\n # remove classes from unlabeled pixels in gt image and predict\n # print(imgLabel.shape)\n mask = (imgLabel >= 0) & (imgLabel < self.numClass)\n label = self.numClass * imgLabel[mask] + imgPredict[mask]\n count = np.bincount(label, minlength=self.numClass**2)\n confusionMatrix = count.reshape(self.numClass, self.numClass)\n return confusionMatrix\n\n def Frequency_Weighted_Intersection_over_Union(self):\n # FWIOU = [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]\n freq = np.sum(self.confusionMatrix, axis=1) / np.sum(self.confusionMatrix)\n iu = np.diag(self.confusionMatrix) / (\n np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) -\n np.diag(self.confusionMatrix))\n FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()\n return FWIoU\n\n\n def addBatch(self, imgPredict, imgLabel):\n assert imgPredict.shape == imgLabel.shape\n self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)\n\n def reset(self):\n self.confusionMatrix = np.zeros((self.numClass, self.numClass))" }, { "identifier": "non_max_suppression", "path": "lib/core/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):\n \"\"\"Performs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n detections with shape: nx6 (x1, y1, x2, y2, conf, cls)\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "check_img_size", "path": "lib/core/general.py", "snippet": "def check_img_size(img_size, s=32):\n # Verify img_size is a multiple of stride s\n new_size = make_divisible(img_size, int(s)) # ceil gs-multiple # new_size = 640\n if new_size != img_size:\n print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))\n return new_size" }, { "identifier": "scale_coords", "path": "lib/core/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "xyxy2xywh", "path": "lib/core/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "xywh2xyxy", "path": "lib/core/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "box_iou", "path": "lib/core/general.py", "snippet": "def box_iou(box1, box2):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n def box_area(box):\n # box = 4xn\n return (box[2] - box[0]) * (box[3] - box[1]) #(x2-x1)*(y2-y1)\n\n area1 = box_area(box1.T)\n area2 = box_area(box2.T)\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)" }, { "identifier": "coco80_to_coco91_class", "path": "lib/core/general.py", "snippet": "def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n return x" }, { "identifier": "plot_images", "path": "lib/core/general.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n colors = color_list() # list of colors\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors[cls % len(colors)]\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic" }, { "identifier": "ap_per_class", "path": "lib/core/general.py", "snippet": "def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (nparray, nx1 or nx10).\n conf: Objectness value from 0-1 (nparray).\n pred_cls: Predicted object classes (nparray).\n target_cls: True object classes (nparray).\n plot: Plot precision-recall curve at [email protected]\n save_dir: Plot save directory\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n\n # Create Precision-Recall curve and compute AP for each class\n px, py = np.linspace(0, 1, 1000), [] # for plotting\n pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898\n s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)\n ap, p, r = np.zeros(s), np.zeros((unique_classes.shape[0], 1000)), np.zeros((unique_classes.shape[0], 1000))\n for ci, c in enumerate(unique_classes):\n i = pred_cls == c\n n_l = (target_cls == c).sum() # number of labels\n n_p = i.sum() # number of predictions\n\n if n_p == 0 or n_l == 0:\n continue\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum(0)\n tpc = tp[i].cumsum(0)\n\n # Recall\n recall = tpc / (n_l + 1e-16) # recall curve\n r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases\n\n # Precision\n precision = tpc / (tpc + fpc) # precision curve\n p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score\n # AP from recall-precision curve\n for j in range(tp.shape[1]):\n ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])\n if plot and (j == 0):\n py.append(np.interp(px, mrec, mpre)) # precision at [email protected]\n\n # Compute F1 score (harmonic mean of precision and recall)\n f1 = 2 * p * r / (p + r + 1e-16)\n i=r.mean(0).argmax()\n\n if plot:\n plot_pr_curve(px, py, ap, save_dir, names)\n\n return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')" }, { "identifier": "output_to_target", "path": "lib/core/general.py", "snippet": "def output_to_target(output):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]\n targets = []\n for i, o in enumerate(output):\n for *box, conf, cls in o.cpu().numpy():\n targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])\n return np.array(targets)" }, { "identifier": "time_synchronized", "path": "lib/utils/utils.py", "snippet": "def time_synchronized():\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return time.time()" }, { "identifier": "plot_img_and_mask", "path": "lib/utils/plot.py", "snippet": "def plot_img_and_mask(img, mask, index,epoch,save_dir):\r\n classes = mask.shape[2] if len(mask.shape) > 2 else 1\r\n fig, ax = plt.subplots(1, classes + 1)\r\n ax[0].set_title('Input image')\r\n ax[0].imshow(img)\r\n if classes > 1:\r\n for i in range(classes):\r\n ax[i+1].set_title(f'Output mask (class {i+1})')\r\n ax[i+1].imshow(mask[:, :, i])\r\n else:\r\n ax[1].set_title(f'Output mask')\r\n ax[1].imshow(mask)\r\n plt.xticks([]), plt.yticks([])\r\n # plt.show()\r\n plt.savefig(save_dir+\"/batch_{}_{}_seg.png\".format(epoch,index))\r" }, { "identifier": "plot_one_box", "path": "lib/utils/plot.py", "snippet": "def plot_one_box(x, img, color=None, label=None, line_thickness=None):\r\n # Plots one bounding box on image img 在图像上画一个检测框\r\n tl = line_thickness or round(0.0001 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\r\n color = color or [random.randint(0, 255) for _ in range(3)]\r\n c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))\r\n cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)\r\n if label:\r\n tf = max(tl - 1, 1) # font thickness\r\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\r\n c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\r\n cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled\r\n print(label)\r\n cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\r" }, { "identifier": "show_seg_result", "path": "lib/utils/plot.py", "snippet": "def show_seg_result(img, result, index, epoch, save_dir=None, is_ll=False,palette=None,is_demo=False,is_gt=False):\r\n # img = mmcv.imread(img)\r\n # img = img.copy()\r\n # seg = result[0]\r\n if palette is None:\r\n palette = np.random.randint(\r\n 0, 255, size=(3, 3))\r\n palette[0] = [0, 0, 0]\r\n palette[1] = [0, 255, 0]\r\n palette[2] = [255, 0, 0]\r\n palette = np.array(palette)\r\n assert palette.shape[0] == 3 # len(classes)\r\n assert palette.shape[1] == 3\r\n assert len(palette.shape) == 2\r\n \r\n if not is_demo:\r\n color_seg = np.zeros((result.shape[0], result.shape[1], 3), dtype=np.uint8)\r\n for label, color in enumerate(palette):\r\n color_seg[result == label, :] = color\r\n else:\r\n color_area = np.zeros((result[0].shape[0], result[0].shape[1], 3), dtype=np.uint8)\r\n \r\n # for label, color in enumerate(palette):\r\n # color_area[result[0] == label, :] = color\r\n\r\n color_area[result[0] == 1] = [0, 255, 0]\r\n color_area[result[1] ==1] = [255, 0, 0]\r\n color_seg = color_area\r\n\r\n # convert to BGR\r\n color_seg = color_seg[..., ::-1]\r\n # print(color_seg.shape)\r\n color_mask = np.mean(color_seg, 2)\r\n img[color_mask != 0] = img[color_mask != 0] * 0.5 + color_seg[color_mask != 0] * 0.5\r\n # img = img * 0.5 + color_seg * 0.5\r\n img = img.astype(np.uint8)\r\n img = cv2.resize(img, (1280,720), interpolation=cv2.INTER_LINEAR)\r\n\r\n if not is_demo:\r\n if not is_gt:\r\n if not is_ll:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_da_segresult.png\".format(epoch,index), img)\r\n else:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_ll_segresult.png\".format(epoch,index), img)\r\n else:\r\n if not is_ll:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_da_seg_gt.png\".format(epoch,index), img)\r\n else:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_ll_seg_gt.png\".format(epoch,index), img) \r\n return img\r" } ]
import time import torch import numpy as np import json import random import cv2 import os import math import wandb from lib.core.evaluate import ConfusionMatrix,SegmentationMetric from lib.core.general import non_max_suppression,check_img_size,scale_coords,xyxy2xywh,xywh2xyxy,box_iou,coco80_to_coco91_class,plot_images,ap_per_class,output_to_target from lib.utils.utils import time_synchronized from lib.utils import plot_img_and_mask,plot_one_box,show_seg_result from threading import Thread from PIL import Image from torchvision import transforms from pathlib import Path from torch.cuda import amp from tqdm import tqdm from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
11,836
if not config.DEBUG: img = img.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target nb, _, height, width = img.shape #batch size, channel, height, width with torch.no_grad(): pad_w, pad_h = shapes[0][1][1] pad_w = int(pad_w) pad_h = int(pad_h) ratio = shapes[0][1][0][0] t = time_synchronized() det_out, da_seg_out, ll_seg_out= model(img) # 检测图片? t_inf = time_synchronized() - t if batch_i > 0: T_inf.update(t_inf/img.size(0),img.size(0)) inf_out,train_out = det_out #driving area segment evaluation # 可驾驶区域分割评估 _,da_predict=torch.max(da_seg_out, 1) _,da_gt=torch.max(target[1], 1) da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] da_metric.reset() da_metric.addBatch(da_predict.cpu(), da_gt.cpu()) da_acc = da_metric.pixelAccuracy() da_IoU = da_metric.IntersectionOverUnion() da_mIoU = da_metric.meanIntersectionOverUnion() da_acc_seg.update(da_acc,img.size(0)) da_IoU_seg.update(da_IoU,img.size(0)) da_mIoU_seg.update(da_mIoU,img.size(0)) #lane line segment evaluation # 车道线分割评估 _,ll_predict=torch.max(ll_seg_out, 1) _,ll_gt=torch.max(target[2], 1) ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_metric.reset() ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu()) ll_acc = ll_metric.lineAccuracy() ll_IoU = ll_metric.IntersectionOverUnion() ll_mIoU = ll_metric.meanIntersectionOverUnion() ll_acc_seg.update(ll_acc,img.size(0)) ll_IoU_seg.update(ll_IoU,img.size(0)) ll_mIoU_seg.update(ll_mIoU,img.size(0)) total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss losses.update(total_loss.item(), img.size(0)) #NMS # 非极大值抑制 t = time_synchronized() target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb) #output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6) #output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES) t_nms = time_synchronized() - t if batch_i > 0: T_nms.update(t_nms/img.size(0),img.size(0)) if config.TEST.PLOTS: if batch_i == 0: for i in range(test_batch_size): img_test = cv2.imread(paths[i]) da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_seg_mask = torch.max(da_seg_mask, 1) da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_gt_mask = torch.max(da_gt_mask, 1) da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy() da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det): det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round() for *xyxy,conf,cls in reversed(det): #print(cls) # import pdb;pdb.set_trace() label_det_pred = f'{names[int(cls)]} {conf:.3f}' plot_one_box(xyxy, img_det , label=label_det_pred, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_pred.png".format(epoch,i),img_det) labels = target[0][target[0][:, 0] == i, 1:] # print(labels)
id_dict_SDExpressway = { 0:'Car', 1:'Truck', 2:'Guidance Sign', 3:'Warning Sign', 4:'Pending Sign', 5:'Speed Limit Sign', 6:'Emergency Telephone Sign', 7:'Directional Sign', 8:'Straight Ahead Arrow', 9:'Straight or Right Turn Arrow'} def train(cfg, train_loader, model, criterion, optimizer, scaler, epoch, num_batch, num_warmup, writer_dict, logger, device, rank=-1): """ train for one epoch Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return total_loss, head_losses - writer_dict: outputs(2,) output[0] len:3, [1,3,32,32,85], [1,3,16,16,85], [1,3,8,8,85] output[1] len:1, [2,256,256] output[2] len:1, [2,256,256] target(2,) target[0] [1,n,5] target[1] [2,256,256] target[2] [2,256,256] Returns: None """ batch_time = AverageMeter() # batch_time = <lib.core.function.AverageMeter object at 0x7f0255618970> data_time = AverageMeter() # data_time = <lib.core.function.AverageMeter object at 0x7f025561a4f0> losses = AverageMeter() # losses = <lib.core.function.AverageMeter object at 0x7f02402e7cd0> # switch to train mode model.train() start = time.time() # start = 1688805138.6791408 for i, (input, target, paths, shapes) in enumerate(train_loader): # i=0 # target = [tensor([[0.0000e+00,...335e-01]]), tensor([[[[1., 1., 1...., 0.]]]]), tensor([[[[1., 1., 1...., 0.]]]])] # paths = ('/home/xingchen/Study...3225df.jpg', '/home/xingchen/Study...49926c.jpg', ...) # shapes = (((720, 1280), ((0.5, 0.5), (0.0, 12.0))), ((...), (...)), ...) intermediate = time.time() # intermediate = 1688805496.5324085 #print('tims:{}'.format(intermediate-start)) num_iter = i + num_batch * (epoch - 1) # num_iter = 0 # num_batch = 4375 if num_iter < num_warmup: # warm up lf = lambda x: ((1 + math.cos(x * math.pi / cfg.TRAIN.END_EPOCH)) / 2) * \ (1 - cfg.TRAIN.LRF) + cfg.TRAIN.LRF # cosine xi = [0, num_warmup] # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 # 偏置lr从0.1下降到lr0,所有其他lr从0.0上升到lr0 x['lr'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_BIASE_LR if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_MOMENTUM, cfg.TRAIN.MOMENTUM]) data_time.update(time.time() - start) if not cfg.DEBUG: input = input.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target with amp.autocast(enabled=device.type != 'cpu'): outputs = model(input) # outputs = [[tensor([[[[[ 8.8806e...ackward0>), tensor([[[[[ 4.6631e...ackward0>), tensor([[[[[ 1.4758e...ackward0>)], tensor([[[[0.5151, 0...ackward0>), tensor([[[[0.4868, 0...ackward0>)] total_loss, head_losses = criterion(outputs, target, shapes,model) # print(head_losses) # compute gradient and do update step optimizer.zero_grad() scaler.scale(total_loss).backward() scaler.step(optimizer) scaler.update() if rank in [-1, 0]: # measure accuracy and record loss losses.update(total_loss.item(), input.size(0)) # _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(), # target.detach().cpu().numpy()) # acc.update(avg_acc, cnt) # measure elapsed time batch_time.update(time.time() - start) end = time.time() if i % cfg.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\t' \ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \ 'Speed {speed:.1f} samples/s\t' \ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \ 'Loss {loss.val:.5f} ({loss.avg:.5f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) # writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 def validate(epoch,config, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, logger=None, device='cpu', rank=-1,nc = 1): """ validata Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return - writer_dict: Return: None """ # setting max_stride = 32 weights = None save_dir = output_dir + os.path.sep + 'visualization' # save_dir = 'runs/BddDataset/_2023-07-09-09-50/visualization' if not os.path.exists(save_dir): os.mkdir(save_dir) # print(save_dir) _, imgsz = [check_img_size(x, s=max_stride) for x in config.MODEL.IMAGE_SIZE] #imgsz is multiple of max_stride batch_size = config.TRAIN.BATCH_SIZE_PER_GPU * len(config.GPUS) # batch_size = 16 test_batch_size = config.TEST.BATCH_SIZE_PER_GPU * len(config.GPUS) # test_batch_size = 16 training = False is_coco = False #is coco dataset save_conf=False # save auto-label confidences verbose=False save_hybrid=False log_imgs,wandb = min(16,100), None nc = 10 #20230904 iouv = torch.linspace(0.5,0.95,10).to(device) #iou vector for [email protected]:0.95 niou = iouv.numel() # niou = 10 try: except ImportError: wandb = None log_imgs = 0 seen = 0 # import pdb;pdb.set_trace() confusion_matrix = ConfusionMatrix(nc=model.nc) #detector confusion matrix # confusion matrix 混合矩阵 da_metric = SegmentationMetric(config.num_seg_class) #segment confusion matrix ll_metric = SegmentationMetric(2) #segment confusion matrix # names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} # names = {'0':0} names = id_dict_SDExpressway #20230904 colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] # colors = [[191, 83, 111]] coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95') # s = ' Class Images Targets P R [email protected] [email protected]:.95' p, r, f1, mp, mr, map50, map, t_inf, t_nms = 0., 0., 0., 0., 0., 0., 0., 0., 0. losses = AverageMeter() da_acc_seg = AverageMeter() da_IoU_seg = AverageMeter() da_mIoU_seg = AverageMeter() ll_acc_seg = AverageMeter() ll_IoU_seg = AverageMeter() ll_mIoU_seg = AverageMeter() T_inf = AverageMeter() T_nms = AverageMeter() # switch to train mode model.eval() jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, target, paths, shapes) in tqdm(enumerate(val_loader), total=len(val_loader)): if not config.DEBUG: img = img.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target nb, _, height, width = img.shape #batch size, channel, height, width with torch.no_grad(): pad_w, pad_h = shapes[0][1][1] pad_w = int(pad_w) pad_h = int(pad_h) ratio = shapes[0][1][0][0] t = time_synchronized() det_out, da_seg_out, ll_seg_out= model(img) # 检测图片? t_inf = time_synchronized() - t if batch_i > 0: T_inf.update(t_inf/img.size(0),img.size(0)) inf_out,train_out = det_out #driving area segment evaluation # 可驾驶区域分割评估 _,da_predict=torch.max(da_seg_out, 1) _,da_gt=torch.max(target[1], 1) da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] da_metric.reset() da_metric.addBatch(da_predict.cpu(), da_gt.cpu()) da_acc = da_metric.pixelAccuracy() da_IoU = da_metric.IntersectionOverUnion() da_mIoU = da_metric.meanIntersectionOverUnion() da_acc_seg.update(da_acc,img.size(0)) da_IoU_seg.update(da_IoU,img.size(0)) da_mIoU_seg.update(da_mIoU,img.size(0)) #lane line segment evaluation # 车道线分割评估 _,ll_predict=torch.max(ll_seg_out, 1) _,ll_gt=torch.max(target[2], 1) ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_metric.reset() ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu()) ll_acc = ll_metric.lineAccuracy() ll_IoU = ll_metric.IntersectionOverUnion() ll_mIoU = ll_metric.meanIntersectionOverUnion() ll_acc_seg.update(ll_acc,img.size(0)) ll_IoU_seg.update(ll_IoU,img.size(0)) ll_mIoU_seg.update(ll_mIoU,img.size(0)) total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss losses.update(total_loss.item(), img.size(0)) #NMS # 非极大值抑制 t = time_synchronized() target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb) #output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6) #output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES) t_nms = time_synchronized() - t if batch_i > 0: T_nms.update(t_nms/img.size(0),img.size(0)) if config.TEST.PLOTS: if batch_i == 0: for i in range(test_batch_size): img_test = cv2.imread(paths[i]) da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_seg_mask = torch.max(da_seg_mask, 1) da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_gt_mask = torch.max(da_gt_mask, 1) da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy() da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det): det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round() for *xyxy,conf,cls in reversed(det): #print(cls) # import pdb;pdb.set_trace() label_det_pred = f'{names[int(cls)]} {conf:.3f}' plot_one_box(xyxy, img_det , label=label_det_pred, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_pred.png".format(epoch,i),img_det) labels = target[0][target[0][:, 0] == i, 1:] # print(labels)
labels[:,1:5]=xywh2xyxy(labels[:,1:5])
6
2023-10-24 02:08:25+00:00
16k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It includes methods for preprocessing, resizing, and normalizing audio data.\n Subclasses may override these methods to implement dataset-specific processing and resizing logic.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initialize an AudioDataset instance.\n\n Args:\n data_config (DictConfig): Configuration for loading the dataset, including paths, audio properties, etc.\n split (str): Specifies which split of the dataset to load (e.g., 'train', 'validation', 'test').\n evaluation (bool, optional): Indicates whether the dataset is for evaluation purposes. Defaults to False.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def normalize_audio(audio_np: np.ndarray, sample_rate: int) -> np.ndarray:\n \"\"\"Normalize the amplitude of the audio data to a standard range.\n\n This method utilizes PyDub's effects module to perform audio normalization.\n\n Args:\n audio_np (np.ndarray): Audio data represented as a NumPy array.\n sample_rate (int): The sample rate of the audio data.\n\n Returns:\n np.ndarray: The normalized audio data as a NumPy array.\n \"\"\"\n # Convert numpy array to AudioSegment\n audio_segment = AudioSegment(audio_np.tobytes(), frame_rate=int(sample_rate), sample_width=2, channels=1)\n\n # Normalize with PyDub\n normalized_audio_segment = effects.normalize(audio_segment)\n\n # Convert back to numpy\n normalized_audio_np = np.array(normalized_audio_segment.get_array_of_samples())\n\n return normalized_audio_np\n\n def _resize_op(self, audio: tf.Tensor, size: int) -> tf.Tensor:\n \"\"\"Resize the input audio to a specified size and normalize its amplitude to the range [0, 1].\n\n If the audio length is less than the specified size, zero padding is applied to reach the desired size.\n If the audio length is greater, it is truncated to the specified size.\n\n Args:\n audio (tf.Tensor): Input audio data as a TensorFlow tensor.\n size (int): The target size for the audio data.\n\n Returns:\n tf.Tensor: The resized and normalized audio data as a TensorFlow tensor.\n \"\"\"\n # Normalize dataset\n pylogger.info(\"Normalizing audio...\")\n audio = tf.cast(audio, dtype=tf.int16)\n # Calculate current length of the audio\n pylogger.info(\"Resizing audio to size {}...\".format(size))\n audio_length = tf.shape(audio)[0]\n audio = tf.cond(\n audio_length < size,\n lambda: tf.concat([audio, tf.zeros(size - audio_length, dtype=audio.dtype)], axis=0),\n lambda: audio[:size],\n )\n audio_np = tf.numpy_function(self.normalize_audio, [audio, self.data_config.audio_sample_rate], tf.int16)\n audio = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n audio = tf.cast(audio, dtype=tf.float32)\n pylogger.info(\"Converting audio to range [-1, 1]...\")\n max_intensity = self.data_config.audio_max_intensity\n audio = audio / max_intensity\n return audio\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocess the input audio data.\n\n This method resizes the audio data to a specified size based on the dataset configuration and normalizes the amplitude to the range [-1, +1].\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input audio data and any associated metadata.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed audio data and any associated metadata.\n \"\"\"\n pylogger.info(\"Preprocessing audios for split {}...\".format(self.split))\n audio = self._resize_op(\n audio=d[\"audio\"], size=int(self.data_config.audio_sample_rate * self.data_config.audio_max_duration)\n )\n audio = tf.reshape(\n tensor=audio,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Audio reshaped to shape {}...\".format(audio.shape))\n return dict(data=audio, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Postprocess the output audio data.\n\n This method applies the inverse of the preprocessing steps to revert the audio data to its original form.\n\n Args:\n batch_data (Any): A batch of audio data to postprocess.\n inverse_scaler (Callable): A function that applies the inverse of the preprocessing steps.\n\n Returns:\n Any: A batch of postprocessed audio data.\n \"\"\"\n max_intensity = self.data_config.audio_max_intensity\n batch_audio = inverse_scaler(batch_data)\n batch_audio = batch_audio * max_intensity\n batch_post_processed = tf.cast(batch_audio, tf.int16)\n audio_np = tf.numpy_function(\n self.normalize_audio, [batch_post_processed, self.data_config.audio_sample_rate], tf.int16\n )\n batch_post_processed = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n return batch_post_processed" }, { "identifier": "ImageDataset", "path": "src/functional_diffusion_processes/datasets/image_dataset.py", "snippet": "class ImageDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for handling image datasets.\n\n Provides a structured way to load, preprocess, and post-process image data.\n This class can be extended to handle specific image datasets as required.\n\n Attributes:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initializes the ImageDataset object with dataset configurations.\n\n Args:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def _resize_op(image: Any, size: int) -> Any:\n \"\"\"Resizes the input image to the specified size and normalizes its values to the range [0,1].\n\n Args:\n image (Any): A tensor representing the input image.\n size (int): The target size for each dimension of the output image.\n\n Returns:\n Any: A tensor representing the resized and normalized image.\n \"\"\"\n # convert to range [0,1]\n pylogger.info(\"Converting image to range [0,1]...\")\n image = tf.image.convert_image_dtype(image=image, dtype=tf.float32)\n\n # resize to size\n pylogger.info(\"Resizing image to size {}...\".format(size))\n\n image = tf.image.resize(images=image, size=[size, size])\n\n return image\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocesses the input data by resizing, possibly flipping, and applying uniform dequantization.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data with keys 'image' and optionally 'label'.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data, with keys 'data' and optionally 'label'.\n \"\"\"\n image = self._resize_op(image=d[\"image\"], size=self.data_config.image_width_size)\n\n pylogger.info(\"Preprocessing images for split {}...\".format(self.split))\n\n if self.data_config.random_flip and not self.evaluation:\n pylogger.info(\"Applying random flips...\")\n image = tf.image.random_flip_left_right(image=image, seed=self.data_config.seed)\n\n if self.data_config.uniform_dequantization:\n pylogger.info(\"Applying uniform dequantization...\")\n image = (\n tf.random.uniform(shape=image.shape, dtype=tf.float32, seed=self.data_config.seed) + image * 255.0\n ) / 256.0\n\n image = tf.reshape(\n tensor=image,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Image reshaped to shape {}...\".format(image.shape))\n\n return dict(data=image, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Post-processes the output data by reverting the preprocessing steps.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to invert the scaling applied to the data.\n\n Returns:\n Any: A batch of postprocessed data, arranged in a grid for visualization.\n \"\"\"\n batch_post_processed = make_grid_image(\n ndarray=process_images(images=batch_data),\n inverse_scaler=inverse_scaler,\n )\n return batch_post_processed" }, { "identifier": "BaseDataset", "path": "src/functional_diffusion_processes/datasets/base_dataset.py", "snippet": "class BaseDataset(abc.ABC):\n \"\"\"Abstract base class for defining datasets.\n\n Provides a template for loading, preprocessing, and iterating over datasets.\n It encapsulates common dataset configurations and operations while allowing for dataset-specific\n preprocessing and post-processing through abstract methods.\n\n Attributes:\n dataset_builder: A builder object for loading the dataset.\n data_config (DictConfig): Configuration parameters for the dataset.\n split (str): Specifies which split of the dataset to load, e.g., 'train', 'validation', or 'test'.\n evaluation (bool): Indicates whether the dataset is for evaluation purposes.\n dataset_options: Options for configuring the dataset pipeline.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Abstract base class for defining datasets.\n\n This class provides a skeleton for defining datasets, with abstract methods for\n preprocessing data, generating batches of data, and resizing images. Subclasses\n must implement these methods to define their specific datasets.\n\n Args:\n data_config (DictConfig): A dictionary-like object containing the configuration for\n loading the dataset.\n\n split (str): A string specifying which split of the dataset to load.\n\n evaluation (bool): A boolean specifying whether the dataset is for evaluation purposes.\n \"\"\"\n self.dataset_builder = None\n self.data_config = data_config\n self.split = split\n self.evaluation = evaluation\n self.dataset_options = tf.data.Options()\n self.dataset_options.experimental_optimization.map_parallelization = True\n self.dataset_options.experimental_threading.private_threadpool_size = 48\n self.dataset_options.experimental_threading.max_intra_op_parallelism = 1\n\n @abc.abstractmethod\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Abstract method for preprocessing input data.\n\n Subclasses should override this method to implement dataset-specific preprocessing.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data.\n \"\"\"\n raise NotImplementedError(\"Subclasses must implement preprocess_fn method.\")\n\n @abc.abstractmethod\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Abstract method for postprocessing output data.\n\n Subclasses should override this method to implement dataset-specific post-processing.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to inverse the scaling of the data.\n\n Returns:\n Any: A dictionary containing the postprocessed data.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the postprocess_fn method.\")\n\n def _generator(self) -> Iterator[Any]:\n \"\"\"Generate batches of preprocessed data.\n\n Loads the dataset, shuffles the data, applies preprocessing, and batches the data.\n Subclasses might override this method to implement dataset-specific batching logic.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n # load the dataset\n if isinstance(self.dataset_builder, tfds.core.DatasetBuilder):\n read_config = tfds.ReadConfig(options=self.dataset_options)\n if self.data_config.download:\n self.dataset_builder.download_and_prepare()\n ds = self.dataset_builder.as_dataset(\n split=self.split,\n shuffle_files=False,\n read_config=read_config,\n as_supervised=False,\n )\n else:\n ds = self.dataset_builder.with_options(options=self.dataset_options)\n\n ds = ds.shuffle(buffer_size=10000, seed=self.data_config.seed)\n\n # apply the preprocessing function to each element in the dataset\n ds = ds.map(map_func=self.preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # determine the batch size per device\n ds = ds.batch(batch_size=self.data_config.batch_size, drop_remainder=True)\n ds = ds.batch(batch_size=jax.device_count(), drop_remainder=True)\n\n ds = ds.repeat(count=100000 if not self.evaluation else 1)\n\n return iter(ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE))\n\n def __iter__(self) -> Iterator[Any]:\n \"\"\"Return an iterator that generates batches of preprocessed data.\n\n Calls the `_generator` method to obtain an iterator for generating preprocessed data batches.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n return self._generator()\n\n def __len__(self) -> int:\n \"\"\"Return the number of examples in the dataset.\n\n Obtains the total number of examples in the specified dataset split from the dataset builder's info attribute.\n\n Returns:\n int: The number of examples in the dataset.\n \"\"\"\n return self.dataset_builder.info.splits[self.split].num_examples" }, { "identifier": "Loss", "path": "src/functional_diffusion_processes/losses/base_loss.py", "snippet": "class Loss(abc.ABC):\n \"\"\"Abstract class representing a loss function.\n\n Provides a framework for defining custom loss functions by enforcing the implementation\n of `construct_loss_fn` method in any derived classes. This class holds a reference to\n a stochastic differential equation (SDE) object which is used to calculate the weight factor for the loss.\n\n Attributes:\n sde (SDE): The stochastic differential equation instance associated with this loss.\n \"\"\"\n\n def __init__(self, sde: SDE) -> None:\n \"\"\"Initializes the Loss instance with a given SDE.\n\n Args:\n sde (SDE): An SDE instance which might be used in the loss computation.\n \"\"\"\n self.sde = sde\n\n def construct_loss_fn(self, model: Any) -> Callable:\n \"\"\"Abstract method to construct a loss function for a given model.\n\n This method should be implemented by any derived class to define the loss\n computation specific to the type of loss being implemented.\n\n Args:\n model (Any): The model for which to construct the loss function.\n\n Returns:\n Callable: A callable representing the constructed loss function.\n\n Raises:\n NotImplementedError: If the method is not implemented by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the construct_loss_fn method.\")" }, { "identifier": "FIDMetric", "path": "src/functional_diffusion_processes/metrics/fid_metric.py", "snippet": "class FIDMetric:\n \"\"\"Class for computing the Frechet Inception Distance (FID) metric.\n\n This class facilitates the computation of the FID metric, which measures the similarity between two distributions of images.\n It precomputes features for the real dataset using a specified Inception feature extractor and provides methods to compute\n and store features for generated images, and to compute the FID and Inception Score (IS).\n\n Attributes:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n generated_pools (list): List to store features of generated images.\n generated_logits (list): List to store logits of generated images.\n real_features (dict): Dictionary to store precomputed features of real dataset.\n \"\"\"\n\n def __init__(\n self,\n metric_config: DictConfig,\n feature_extractor: InceptionFeatureExtractor,\n dataset: BaseDataset,\n ) -> None:\n \"\"\"Initializes the FIDMetric class with specified configurations, feature extractor, and dataset.\n\n Args:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n \"\"\"\n self.metric_config = metric_config\n self.feature_extractor = feature_extractor\n self.dataset = dataset\n self.generated_pools = []\n self.generated_logits = []\n try:\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n except FileNotFoundError:\n self._precompute_features(\n dataset_name=metric_config.dataset_name,\n save_path=metric_config.real_features_path,\n )\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n\n def _precompute_features(self, dataset_name: str, save_path: str) -> None:\n \"\"\"Precomputes and saves features for the real dataset.\n\n Args:\n dataset_name (str): Name of the dataset.\n save_path (str): Path where the computed features will be saved.\n \"\"\"\n tf.io.gfile.makedirs(path=save_path)\n\n tf.io.gfile.makedirs(os.path.join(save_path, f\"{dataset_name.lower()}_clean\"))\n\n # Use the feature extractor to compute features for the real dataset\n all_pools = self.feature_extractor.extract_features(\n dataset=self.dataset, save_path=save_path, dataset_name=dataset_name\n )\n\n # Save latent represents of the Inception network to disk or Google Cloud Storage\n filename = f\"{dataset_name.lower()}_stats.npz\"\n\n if jax.host_id() == 0:\n pylogger.info(\"Saving real dataset stats to: %s\" % os.path.join(save_path, filename))\n\n with tf.io.gfile.GFile(os.path.join(save_path, filename), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=all_pools)\n f_out.write(io_buffer.getvalue())\n\n def compute_fid(self, eval_dir, num_sampling_round) -> Tuple[float, float]:\n \"\"\"Computes the FID and Inception Score (IS) for the generated and real images.\n\n Args:\n eval_dir (str): Directory path for evaluation.\n num_sampling_round (int): Number of sampling rounds.\n\n Returns:\n Tuple[float, float]: A tuple containing the FID and Inception Score.\n \"\"\"\n real_pools = self.real_features[\"pool_3\"]\n if not self.feature_extractor.inception_v3 and not self.feature_extractor.inception_v3 == \"lenet\":\n if len(self.generated_logits) == 0 or len(self.generated_pools) == 0:\n if jax.host_id() == 0:\n # Load all statistics that have been previously computed and saved for each host\n for host in range(jax.host_count()):\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n wait_message = False\n while len(stats) < num_sampling_round:\n if not wait_message:\n print(\"Waiting for statistics on host %d\" % (host,))\n wait_message = True\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n time.sleep(10)\n\n for stat_file in stats:\n with tf.io.gfile.GFile(stat_file, \"rb\") as fin:\n stat = np.load(fin)\n\n self.generated_pools.append(stat[\"pool_3\"])\n self.generated_logits.append(stat[\"logits\"])\n\n all_logits = np.concatenate(self.generated_logits, axis=0)[: self.metric_config.num_samples]\n inception_score = tfgan.eval.classifier_score_from_logits(logits=all_logits)\n else:\n inception_score = -1\n\n all_pools = np.concatenate(self.generated_pools, axis=0)[: self.metric_config.num_samples]\n\n fid = tfgan.eval.frechet_classifier_distance_from_activations(activations1=real_pools, activations2=all_pools)\n\n return fid, inception_score\n\n def compute_and_store_generated_features(self, images: Any, sample_dir: str, round_num: int) -> None:\n \"\"\"Computes features for the generated images and stores them in a specified directory.\n\n Args:\n images (Any): Tensor representing the generated images.\n sample_dir (str): Directory where the features will be stored.\n round_num (int): Round number in the training process.\n \"\"\"\n latents = self.feature_extractor.extract_features(images)\n\n self.generated_pools.append(latents[\"pool_3\"])\n\n gc.collect()\n\n if self.feature_extractor.model_name == \"inception\" or self.feature_extractor.inception_v3:\n self.generated_logits.append(latents[\"logits\"])\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(\n io_buffer,\n pool_3=latents[\"pool_3\"],\n logits=latents[\"logits\"],\n )\n\n f_out.write(io_buffer.getvalue())\n\n elif self.feature_extractor.model_name == \"lenet\":\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=latents[\"pool_3\"])\n f_out.write(io_buffer.getvalue())" }, { "identifier": "Sampler", "path": "src/functional_diffusion_processes/samplers/base_sampler.py", "snippet": "class Sampler(abc.ABC):\n \"\"\"Abstract base class for creating sampler objects.\n\n This class serves as a template for creating sampler objects which are\n designed to generate samples of a stochastic process governed by a\n specified stochastic differential equation (SDE). The process of sampling\n is carried out by employing specified predictor and corrector methods.\n\n Attributes:\n predictor (Predictor): The predictor method to be used in the sampling process.\n corrector (Corrector): The corrector method to be used in the sampling process.\n sde (SDE): The stochastic differential equation governing the process to be sampled.\n sampler_config (DictConfig): Configuration settings for the sampler.\n\n Methods:\n make_sampler(predict_fn: Callable) -> Callable:\n Abstract method to create a sampling function based on the specified predictor,\n corrector, and SDE.\n \"\"\"\n\n def __init__(self, predictor: Predictor, corrector: Corrector, sde: SDE, sampler_config: DictConfig) -> None:\n \"\"\"Initializes the Sampler object with specified predictor, corrector, SDE, and configuration.\n\n Args:\n predictor (Predictor): The predictor method for the sampler.\n corrector (Corrector): The corrector method for the sampler.\n sde (SDE): The stochastic differential equation governing the process.\n sampler_config (DictConfig): Configuration settings for the sampler.\n \"\"\"\n super().__init__()\n self.predictor = predictor\n self.corrector = corrector\n self.sampler_config = sampler_config\n self.sde = sde\n\n def make_sampler(self, predict_fn: Callable, auxiliary_fn: Union[Any, Callable]) -> Callable:\n \"\"\"Abstract method to create a sampler function.\n\n This method is intended to be overridden by derived classes to provide\n specific implementations for creating a sampler function. The sampler\n function will utilize the specified predictor and corrector methods\n along with the provided SDE to generate samples of the stochastic process.\n\n Args:\n predict_fn (Callable): The model prediction function.\n auxiliary_fn (Callable): The auxiliary prediction function for the model.\n\n Returns:\n Callable: The constructed sampling function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the make_sampler method.\")" }, { "identifier": "SDE", "path": "src/functional_diffusion_processes/sdetools/base_sde.py", "snippet": "class SDE(abc.ABC):\n \"\"\"Abstract base class for representing Stochastic Differential Equations (SDEs).\n\n This class provides a structured way to define and work with SDEs, including computing\n Fourier transforms, discretizing the equations, and defining the drift and diffusion terms.\n\n Attributes:\n sde_config (DictConfig): Configuration object containing SDE settings.\n T (float): Total time duration.\n N (int): Number of time steps.\n eps (float): Small constant for numerical stability.\n is_unidimensional (bool): Flag indicating if the SDE is unidimensional.\n \"\"\"\n\n def __init__(self, sde_config: DictConfig) -> None:\n \"\"\"Initializes the SDE with the given configuration.\n\n Args:\n sde_config (DictConfig): Configuration object containing SDE settings.\n \"\"\"\n super().__init__()\n self.sde_config = sde_config\n self.T = self.sde_config.T\n self.N = self.sde_config.N\n self.eps = self.sde_config.eps\n self.is_unidimensional = True if len(self.sde_config.shape) == 1 else False\n\n def fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.fft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.fft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n def inverse_fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the inverse Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose inverse Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Inverse Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.ifft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.ifft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n @abc.abstractmethod\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Abstract method to compute the drift and diffusion terms of the SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the drift and diffusion terms of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the sde method.\")\n\n @abc.abstractmethod\n def marginal_prob(\n self,\n rng: PRNGKeyArray,\n x: jnp.ndarray,\n t: jnp.ndarray,\n t0: Optional[jnp.ndarray] = None,\n ) -> Tuple[Any, jnp.ndarray | Any]:\n \"\"\"Computes the marginal probability density at a given time.\n\n This is an abstract method that should be overridden by subclasses to\n compute the marginal probability density based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): State of the system.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[Any, jnp.ndarray | Any]: Marginal probability density at the given time.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the marginal_prob method.\")\n\n @abc.abstractmethod\n def diffuse(\n self, rng: PRNGKeyArray, x: jnp.ndarray, t: jnp.ndarray, t0: Optional[jnp.ndarray] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Performs diffusion of the input from time t0 to time t.\n\n This is an abstract method that should be overridden by subclasses to\n implement the diffusion process based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): Input state.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Mean of the corrupted input and the corrupted input.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the diffuse method.\")\n\n @abc.abstractmethod\n def prior_sampling(\n self, rng: PRNGKeyArray, shape: Tuple[int, ...], t0: Optional[jnp.ndarray] = None\n ) -> jnp.ndarray:\n \"\"\"Generates a sample from the prior distribution of the SDE.\n\n This is an abstract method that should be overridden by subclasses to\n implement the prior sampling process based on the shape and initial time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the sample to be generated.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n jnp.ndarray: A sample from the prior distribution of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the prior_sampling method.\")\n\n @abc.abstractmethod\n def score_fn(\n self, y_corrupted: jnp.ndarray, y_reconstructed: jnp.ndarray, t: jnp.ndarray, rng: Optional[PRNGKeyArray] = None\n ) -> jnp.ndarray:\n \"\"\"Computes the score function based on the corrupted and reconstructed states.\n\n This is an abstract method that should be overridden by subclasses to\n compute the score function based on the state and time.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n y_reconstructed (jnp.ndarray): Reconstructed state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n\n Returns:\n jnp.ndarray: The score function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the score_fn method.\")\n\n @abc.abstractmethod\n def get_psm(self, t: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Power-Special-Matrix(PSM) used as a weighting factor for the loss.\n\n This is an abstract method that should be overridden by subclasses to\n compute the state-dependent diffusion matrix based on the time.\n\n Args:\n t (jnp.ndarray): Current time.\n\n Returns:\n jnp.ndarray: The state-dependent diffusion matrix.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_psm method.\")\n\n @abc.abstractmethod\n def get_reverse_noise(self, rng: PRNGKeyArray, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Generates noise for the reverse SDE.\n\n This is an abstract method that should be overridden by subclasses to\n generate reverse noise based on the shape.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the noise to be generated.\n\n Returns:\n jnp.ndarray: The reverse noise.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_reverse_noise method.\")\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the SDE into an iterative update rule.\n\n This method computes the discrete drift and diffusion terms based on the continuous SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the discrete drift and diffusion terms.\n \"\"\"\n dt = (self.T - self.eps) / self.N\n drift, diffusion = self.sde(y_corrupted, t, y_reconstructed)\n f = drift * dt\n g = diffusion * jnp.sqrt(dt)\n return f, g\n\n def reverse(self):\n \"\"\"Creates a reverse-time version of the current SDE.\n\n This method defines a nested class for the reverse-time SDE and returns an instance of it.\n\n Returns:\n ReverseSDE: An instance of the reverse-time SDE subclass.\n \"\"\"\n num_time_steps = self.N\n end_t = self.T\n sde_fn = self.sde\n discretize_fn = self.discretize\n score_fn = self.score_fn\n sde_config = self.sde_config\n\n class ReverseSDE(self.__class__, abc.ABC):\n \"\"\"Reverse Stochastic Differential Equation abstract base class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the ReverseSDE class.\n\n Inherits the properties from the original SDE class and overrides the relevant methods for the\n reverse-time SDE.\n \"\"\"\n super().__init__(sde_config)\n self.N = num_time_steps\n self.T = end_t\n self.score_fn = score_fn\n\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Return the drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the reverse-time SDE.\n \"\"\"\n drift, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = -drift + batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n # Set the diffusion function to zero for ODEs.\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the reverse-time SDE in the form of an iterative update rule.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the discretized reverse-time SDE.\n \"\"\"\n f, g = discretize_fn(y_corrupted, t, y_corrupted)\n rev_f = -f + batch_mul(\n g**2,\n self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n * (0.5 if self.sde_config.probability_flow else 1.0),\n )\n rev_g = jnp.zeros_like(g) if self.sde_config.probability_flow else g\n return rev_f, rev_g\n\n def semi_analytic(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Computes the semi-analytic drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the semi-analytic reverse-time SDE.\n \"\"\"\n _, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n return ReverseSDE()" }, { "identifier": "filter_mask", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def filter_mask(shape, radius):\n device_num, batch_size, rows, cols, n_channels = shape\n crow, ccol = int(rows / 2), int(cols / 2)\n center = [crow, ccol]\n x, y = jnp.ogrid[:rows, :cols]\n mask_area = (x - center[0]) ** 2 + (y - center[1]) ** 2 >= radius * radius\n mask = jnp.ones_like(mask_area)\n mask = jnp.where(mask_area, 0, mask)\n mask = mask.reshape(1, 1, rows, cols, 1)\n mask = jnp.repeat(mask, device_num, axis=0)\n mask = jnp.repeat(mask, batch_size, axis=1)\n mask = jnp.repeat(mask, n_channels, axis=4)\n return mask" }, { "identifier": "make_grid_image", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def make_grid_image(ndarray: Any, inverse_scaler: Callable, padding: int = 2, pad_value: float = 0.0) -> Any:\n \"\"\"Make a grid image from a Numpy Array.\n\n Args:\n ndarray: The Numpy Array.\n inverse_scaler: The inverse scaler.\n padding: The padding.\n pad_value: The padding value.\n\n Returns:\n The grid image.\n \"\"\"\n ndarray = jnp.asarray(ndarray)\n\n if ndarray.ndim == 4 and ndarray.shape[-1] == 1: # single-channel images\n ndarray = jnp.concatenate((ndarray, ndarray, ndarray), -1)\n\n n_row = int(np.sqrt(ndarray.shape[0]))\n # make the mini-batch of images into a grid\n n_maps = ndarray.shape[0]\n x_maps = min(n_row, n_maps)\n ymaps = int(math.ceil(float(n_maps) / x_maps))\n height, width = int(ndarray.shape[1] + padding), int(ndarray.shape[2] + padding)\n num_channels = ndarray.shape[3]\n grid = np.full((height * ymaps + padding, width * x_maps + padding, num_channels), pad_value).astype(np.float32)\n k = 0\n for y in range(ymaps):\n for x in range(x_maps):\n if k >= n_maps:\n break\n grid[\n y * height + padding : (y + 1) * height,\n x * width + padding : (x + 1) * width,\n ] = ndarray[k]\n k = k + 1\n\n ndarr = inverse_scaler(grid)\n ndarr = jnp.clip(ndarr * 255, 0, 255).astype(jnp.uint8)\n return ndarr" }, { "identifier": "process_images", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def process_images(images: Any) -> Any:\n \"\"\"Reshape images to the correct shape.\n\n Args:\n images: Tensor of images to reshape.\n\n Returns:\n A tensor of images with the correct shape.\n \"\"\"\n w = np.sqrt(images.shape[2]).astype(int)\n h = np.sqrt(images.shape[2]).astype(int)\n o = images.shape[3]\n return images.reshape(-1, w, h, o)" }, { "identifier": "save_samples", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def save_samples(round_num: int, samples: Any, file_path: str) -> None:\n \"\"\"Save samples to a file.\n\n Args:\n round_num: The round number of the evaluation.\n samples: Tensor of samples to save.\n file_path: string of the Path to the file where the samples will be saved.\n \"\"\"\n for i in range(samples.shape[0]):\n clean_path = os.path.join(file_path, f\"clean/samples_{round_num}_{i}.npy\")\n np.save(clean_path, samples[i])\n samples_path = os.path.join(file_path, f\"samples_{round_num}.npz\")\n with tf.io.gfile.GFile(samples_path, \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, samples=samples)\n f_out.write(io_buffer.getvalue())" }, { "identifier": "to_grayscale", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "@jax.pmap\ndef to_grayscale(images):\n weights = np.array([0.2989, 0.5870, 0.1140])[None, None, None, :] # Extend dimensions\n grayscale_images = np.sum(images * weights, axis=-1)\n return grayscale_images" }, { "identifier": "get_data_inverse_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_inverse_scaler(is_centered: bool) -> Callable:\n \"\"\"Inverse data normalizer.\n\n Rescale data to original range at the end of the diffusion.\n\n Args:\n is_centered: boolean if True data will rescaled from [-1, 1] to [0, 1].\n \"\"\"\n if is_centered:\n # Rescale [-1, 1] to [0, 1]\n return lambda x: (x + 1.0) / 2.0\n else:\n return lambda x: x" }, { "identifier": "get_data_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_scaler(is_centered: bool) -> Callable:\n \"\"\"Normalize data. Assume data are always in [0, 1].\n\n Args:\n is_centered: boolean if True data will be centered in [-1, 1].\n \"\"\"\n if is_centered:\n # Rescale to [-1, 1]\n return lambda x: x * 2.0 - 1.0\n else:\n return lambda x: x" }, { "identifier": "TrainState", "path": "src/functional_diffusion_processes/utils/training_state.py", "snippet": "class TrainState(train_state.TrainState):\n \"\"\"The training state for the model.\"\"\"\n\n opt_state_params: Any\n ema_params: Any\n rng: jax.random.PRNGKey" }, { "identifier": "colorizing_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def colorizing_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, gray_scale_img: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform colorizing task on a given grayscale image.\n\n Args:\n sample_fn (Callable): The sampling function used for colorization.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for colorization.\n gray_scale_img (jnp.ndarray): The grayscale image to be colorized.\n\n Returns:\n Tuple: The updated state and the colorized image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, gray_scale_img)" }, { "identifier": "construct_sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_sampling_fn(model: flax.linen.Module, sampler: Sampler) -> Callable:\n \"\"\"Construct a sampling function for generating samples from the model.\n\n Args:\n model (flax.linen.Module): The model instance from which to generate samples.\n sampler (Sampler): The sampler instance used for sampling.\n\n Returns:\n Callable: The constructed sampling function.\n \"\"\"\n predict_fn = model.make_predict_fn()\n if isinstance(model, BaseMAML):\n super_resolution_fn = model.make_super_resolution_fn()\n sample_fn = sampler.make_sampler(predict_fn, super_resolution_fn)\n else:\n sample_fn = sampler.make_sampler(predict_fn, None)\n return sample_fn" }, { "identifier": "construct_train_step", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_train_step(optimizer, loss_fn) -> Callable:\n \"\"\"Construct a train step function to be used in the training loop.\n\n This function creates a training step function which, when called, performs\n a single step of training including forward pass, loss computation, and\n backward pass for gradient computation and updates.\n\n Args:\n optimizer: The optimizer instance used for updating model parameters.\n loss_fn: The loss function used for computing the loss.\n\n Returns:\n Callable: The constructed train step function.\n \"\"\"\n\n @partial(jax.pmap, axis_name=\"device\")\n def train_fn(\n rng,\n params,\n optim_params,\n step,\n batch_input,\n batch,\n ):\n grad_params, (new_rng, loss, loss_inner, batch_reconstructed, batch_corrupted, target) = loss_fn(\n rng, params, step, batch_input, batch\n )\n\n loss = jax.lax.pmean(loss, axis_name=\"device\")\n grad_params = jax.lax.pmean(grad_params, axis_name=\"device\")\n\n updates, optim_params = optimizer.update(grad_params, optim_params, params)\n\n params = optax.apply_updates(params, updates)\n params = clip_learning_rates(params)\n return new_rng, loss, loss_inner, params, optim_params, batch_reconstructed, batch_corrupted, target\n\n return train_fn" }, { "identifier": "inpainting_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def inpainting_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, image: jnp.ndarray, mask: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform inpainting task on a given image using a mask.\n\n Args:\n sample_fn (Callable): The sampling function used for inpainting.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for inpainting.\n image (jnp.ndarray): The image to be inpainted.\n mask (jnp.ndarray): The mask used for inpainting.\n\n Returns:\n Tuple: The updated state and the inpainted image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, image, mask)" }, { "identifier": "sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def sampling_fn(sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray) -> Tuple:\n \"\"\"Perform sampling task using a given sampling function.\n\n Args:\n sample_fn (Callable): The sampling function.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for sampling.\n\n Returns:\n Tuple: The updated state after performing the sampling.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params)" } ]
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
13,980
# Resume training when intermediate checkpoints are detected if self.training_config.resume_training: pylogger.warning("Resuming training from the latest checkpoint.") if self.logging.use_wandb and self.model_name != "local": model_file = wandb.use_artifact(self.model_name).download() state = restore_checkpoint(ckpt_dir=model_file, prefix="checkpoint_", target=state) else: state = checkpoints.restore_checkpoint(ckpt_dir=self.checkpoint_dir, target=state) return run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input def train_step( self, train_step_fn: Callable, carry_state: Tuple, batch: jnp.ndarray, batch_input: jnp.ndarray, ) -> Tuple: """Perform a single training step, updating the model parameters. Args: train_step_fn (Callable): The train step function. carry_state (Tuple): The current state of the model and optimizer. batch (jnp.ndarray): The batch of data used for training. batch_input (jnp.ndarray): The input data to the model. Returns: Tuple: The updated state after performing the training step. """ (rng, state) = carry_state ( new_rng, loss, loss_inner, new_params, new_optim_state, batch_reconstructed, batch_corrupted, target, ) = train_step_fn( rng, state.params, state.opt_state_params, state.step, batch_input, batch, ) ema_rate = self.training_config.ema_rate new_params_ema = jax.tree_map( lambda p_ema, p: p_ema * ema_rate + p * (1.0 - ema_rate), state.ema_params, new_params, ) # update the state new_state = state.replace( rng=flax.jax_utils.unreplicate(new_rng), step=state.step + 1, opt_state_params=new_optim_state, params=new_params, ema_params=new_params_ema, ) new_carry_state = (new_rng, new_state) loss = flax.jax_utils.unreplicate(loss) step = int(flax_utils.unreplicate(state.step)) # Log the training progress if jax.host_id() == 0 and step % self.training_config.log_freq == 0: pylogger.info("step: %d, training_loss: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, "loss": loss}, step=step) if loss_inner is not None: loss_inner = flax.jax_utils.unreplicate(loss_inner) for inner_step, loss in enumerate(loss_inner): pylogger.info("step: %d, training_loss_inner: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, f"loss inner step {inner_step}": loss}, step=step) return new_carry_state, batch_reconstructed, batch_corrupted, target def save_checkpoint(self, step, run, state): pylogger.info("Saving the model at step %d." % (step,)) # Log the evaluation progress # Save the model parameters ( params, opt_state_params, step_, ema_params, ) = flax_utils.unreplicate( ( state.params, state.opt_state_params, state.step, state.ema_params, ) ) saved_state = state.replace( step=step_, opt_state_params=opt_state_params, params=params, ema_params=ema_params, ) checkpoint_file = checkpoints.save_checkpoint( self.checkpoint_dir, saved_state, step=step_ // self.training_config.eval_freq, keep=np.inf, ) if self.logging.use_wandb: wandb_model_artifact_name = str(step_) + "_" + run.id wandb_model = wandb.Artifact(wandb_model_artifact_name, type="model") wandb_model.add_file(checkpoint_file) run.log_artifact(wandb_model) # noinspection PyProtectedMember
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6)) state = TrainState.create( apply_fn=model.apply, params=params, tx=self.optimizer, opt_state_params=self.optimizer.init(params), rng=rng, ema_params=params, ) train_step_fn = construct_train_step(self.optimizer, self.loss_obj.construct_loss_fn(model)) sample_fn = construct_sampling_fn(model, self.sampler) # Resume training when intermediate checkpoints are detected if self.training_config.resume_training: pylogger.warning("Resuming training from the latest checkpoint.") if self.logging.use_wandb and self.model_name != "local": model_file = wandb.use_artifact(self.model_name).download() state = restore_checkpoint(ckpt_dir=model_file, prefix="checkpoint_", target=state) else: state = checkpoints.restore_checkpoint(ckpt_dir=self.checkpoint_dir, target=state) return run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input def train_step( self, train_step_fn: Callable, carry_state: Tuple, batch: jnp.ndarray, batch_input: jnp.ndarray, ) -> Tuple: """Perform a single training step, updating the model parameters. Args: train_step_fn (Callable): The train step function. carry_state (Tuple): The current state of the model and optimizer. batch (jnp.ndarray): The batch of data used for training. batch_input (jnp.ndarray): The input data to the model. Returns: Tuple: The updated state after performing the training step. """ (rng, state) = carry_state ( new_rng, loss, loss_inner, new_params, new_optim_state, batch_reconstructed, batch_corrupted, target, ) = train_step_fn( rng, state.params, state.opt_state_params, state.step, batch_input, batch, ) ema_rate = self.training_config.ema_rate new_params_ema = jax.tree_map( lambda p_ema, p: p_ema * ema_rate + p * (1.0 - ema_rate), state.ema_params, new_params, ) # update the state new_state = state.replace( rng=flax.jax_utils.unreplicate(new_rng), step=state.step + 1, opt_state_params=new_optim_state, params=new_params, ema_params=new_params_ema, ) new_carry_state = (new_rng, new_state) loss = flax.jax_utils.unreplicate(loss) step = int(flax_utils.unreplicate(state.step)) # Log the training progress if jax.host_id() == 0 and step % self.training_config.log_freq == 0: pylogger.info("step: %d, training_loss: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, "loss": loss}, step=step) if loss_inner is not None: loss_inner = flax.jax_utils.unreplicate(loss_inner) for inner_step, loss in enumerate(loss_inner): pylogger.info("step: %d, training_loss_inner: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, f"loss inner step {inner_step}": loss}, step=step) return new_carry_state, batch_reconstructed, batch_corrupted, target def save_checkpoint(self, step, run, state): pylogger.info("Saving the model at step %d." % (step,)) # Log the evaluation progress # Save the model parameters ( params, opt_state_params, step_, ema_params, ) = flax_utils.unreplicate( ( state.params, state.opt_state_params, state.step, state.ema_params, ) ) saved_state = state.replace( step=step_, opt_state_params=opt_state_params, params=params, ema_params=ema_params, ) checkpoint_file = checkpoints.save_checkpoint( self.checkpoint_dir, saved_state, step=step_ // self.training_config.eval_freq, keep=np.inf, ) if self.logging.use_wandb: wandb_model_artifact_name = str(step_) + "_" + run.id wandb_model = wandb.Artifact(wandb_model_artifact_name, type="model") wandb_model.add_file(checkpoint_file) run.log_artifact(wandb_model) # noinspection PyProtectedMember
def train(self, model: linen.Module, ds_train: BaseDataset, sde: SDE) -> None:
2
2023-10-24 22:01:35+00:00
16k
KosinskiLab/pyTME
tme/tests/test_structure.py
[ { "identifier": "Structure", "path": "tme/structure.py", "snippet": "class Structure:\n \"\"\"Represents atomic structures in accordance with the Protein Data Bank (PDB)\n format specification.\n\n Attributes\n ----------\n record_type : NDArray\n Type of the record, e.g., ATOM, HETATM. Array shape = (n,)\n atom_serial_number : NDArray\n Serial number assigned to each atom. Array shape = (n,)\n atom_name : NDArray\n Standardized names for each atom. Array shape = (n,)\n atom_coordinate : NDArray\n The 3D Cartesian coordinates of each atom in x, y, z. Array shape = (n,3 )\n alternate_location_indicator : NDArray\n Indicator for alternate locations of an atom if it exists in multiple places.\n Array shape = (n,)\n residue_name : NDArray\n Standard residue names where each atom belongs. Array shape = (n,)\n chain_identifier : NDArray\n Identifier for the chain where each atom is located. Array shape = (n,)\n residue_sequence_number : NDArray\n Sequence number of the residue in the protein chain for each atom.\n Array shape = (n,)\n code_for_residue_insertion : NDArray\n Code to denote any residue insertion. Array shape = (n,)\n occupancy : NDArray\n Occupancy factor of each atom, indicating the fraction of time the atom\n is located at its position. Array shape = (n,)\n temperature_factor : NDArray\n Measure of the atomic displacement or B-factor for each atom. Array shape = (n,)\n segment_identifier : NDArray\n Identifier for the segment where each atom belongs. Array shape = (n,)\n element_symbol : NDArray\n Atomic element symbol for each atom. Array shape = (n,)\n charge : NDArray\n Charge on the atom. Array shape = (n,)\n details : dict\n Any additional or auxiliary details. Array shape = (n,)\n\n References\n ----------\n .. [1] https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html\n \"\"\"\n\n #: Return a numpy array with record types, e.g. ATOM, HETATM.\n record_type: NDArray\n\n #: Return a numpy array with serial number of each atom.\n atom_serial_number: NDArray\n\n #: Return a numpy array with name of each atom.\n atom_name: NDArray\n\n #: Return a numpy array with coordinates of each atom in x, y, z.\n atom_coordinate: NDArray\n\n #: Return a numpy array with alternate location indicates of each atom.\n alternate_location_indicator: NDArray\n\n #: Return a numpy array with originating residue names of each atom.\n residue_name: NDArray\n\n #: Return a numpy array with originating structure chain of each atom.\n chain_identifier: NDArray\n\n #: Return a numpy array with originating residue id of each atom.\n residue_sequence_number: NDArray\n\n #: Return a numpy array with insertion information d of each atom.\n code_for_residue_insertion: NDArray\n\n #: Return a numpy array with occupancy factors of each atom.\n occupancy: NDArray\n\n #: Return a numpy array with B-factors for each atom.\n temperature_factor: NDArray\n\n #: Return a numpy array with segment identifier for each atom.\n segment_identifier: NDArray\n\n #: Return a numpy array with element symbols of each atom.\n element_symbol: NDArray\n\n #: Return a numpy array with charges of each atom.\n charge: NDArray\n\n #: Returns a dictionary with class instance metadata.\n details: dict\n\n def __post_init__(self, *args, **kwargs):\n \"\"\"\n Initialize the structure and populate header details.\n\n Raises\n ------\n ValueError\n If other NDArray attributes to not match the number of atoms.\n If the shape of atom_coordinates and chain_identifier doesn't match.\n \"\"\"\n self._elements = Elements()\n self.details = self._populate_details(self.details)\n\n n_atoms = self.atom_coordinate.shape[0]\n for attribute in self.__dict__:\n value = getattr(self, attribute)\n if type(value) != np.ndarray:\n continue\n if value.shape[0] != n_atoms:\n raise ValueError(\n f\"Expected shape of {attribute}: {n_atoms}, got {value.shape[0]}.\"\n )\n\n def __getitem__(self, indices: List[int]) -> \"Structure\":\n \"\"\"\n Get a Structure instance for specified indices.\n\n Parameters\n ----------\n indices : Union[int, bool, NDArray]\n The indices to get.\n\n Returns\n -------\n Structure\n The Structure instance for the given indices.\n \"\"\"\n if type(indices) in (int, bool):\n indices = (indices,)\n\n indices = np.asarray(indices)\n attributes = (\n \"record_type\",\n \"atom_serial_number\",\n \"atom_name\",\n \"atom_coordinate\",\n \"alternate_location_indicator\",\n \"residue_name\",\n \"chain_identifier\",\n \"residue_sequence_number\",\n \"code_for_residue_insertion\",\n \"occupancy\",\n \"temperature_factor\",\n \"segment_identifier\",\n \"element_symbol\",\n \"charge\",\n )\n kwargs = {attr: getattr(self, attr)[indices] for attr in attributes}\n ret = self.__class__(**kwargs, details={})\n return ret\n\n def __repr__(self):\n \"\"\"\n Return a string representation of the Structure.\n\n Returns\n -------\n str\n The string representation.\n \"\"\"\n unique_chains = \"-\".join(\n [\n \",\".join([str(x) for x in entity])\n for entity in self.details[\"unique_chains\"]\n ]\n )\n min_atom = np.min(self.atom_serial_number)\n max_atom = np.max(self.atom_serial_number)\n n_atom = self.atom_serial_number.size\n\n min_residue = np.min(self.residue_sequence_number)\n max_residue = np.max(self.residue_sequence_number)\n n_residue = self.residue_sequence_number.size\n\n repr_str = (\n f\"Structure object at {id(self)}\\n\"\n f\"Unique Chains: {unique_chains}, \"\n f\"Atom Range: {min_atom}-{max_atom} [N = {n_atom}], \"\n f\"Residue Range: {min_residue}-{max_residue} [N = {n_residue}]\"\n )\n return repr_str\n\n def get_chains(self) -> List[str]:\n \"\"\"\n Returns a list of available chains.\n\n Returns\n -------\n list\n The list of available chains.\n \"\"\"\n return list(self.details[\"chain_weight\"].keys())\n\n def copy(self) -> \"Structure\":\n \"\"\"\n Returns a copy of the Structure instance.\n\n Returns\n -------\n Structure\n The copied Structure instance.\n \"\"\"\n return deepcopy(self)\n\n def _populate_details(self, details: Dict = {}) -> Dict:\n \"\"\"\n Populate the details dictionary with the data from the Structure instance.\n\n Parameters\n ----------\n details : dict, optional\n The initial details dictionary, by default {}.\n\n Returns\n -------\n dict\n The populated details dictionary.\n \"\"\"\n details[\"weight\"] = np.sum(\n [self._elements[atype].atomic_weight for atype in self.element_symbol]\n )\n\n label, idx, chain = np.unique(\n self.chain_identifier, return_inverse=True, return_index=True\n )\n chain_weight = np.bincount(\n chain,\n [self._elements[atype].atomic_weight for atype in self.element_symbol],\n )\n labels = self.chain_identifier[idx]\n details[\"chain_weight\"] = {key: val for key, val in zip(labels, chain_weight)}\n\n # Group non-unique chains in separate lists in details[\"unique_chains\"]\n details[\"unique_chains\"], temp = [], {}\n for chain_label in label:\n index = len(details[\"unique_chains\"])\n chain_sequence = \"\".join(\n [\n str(y)\n for y in self.element_symbol[\n np.where(self.chain_identifier == chain_label)\n ]\n ]\n )\n if chain_sequence not in temp:\n temp[chain_sequence] = index\n details[\"unique_chains\"].append([chain_label])\n continue\n idx = temp.get(chain_sequence)\n details[\"unique_chains\"][idx].append(chain_label)\n\n filtered_data = [\n (label, integer)\n for label, integer in zip(\n self.chain_identifier, self.residue_sequence_number\n )\n ]\n filtered_data = sorted(filtered_data, key=lambda x: x[0])\n details[\"chain_range\"] = {}\n for label, values in groupby(filtered_data, key=lambda x: x[0]):\n values = [int(x[1]) for x in values]\n details[\"chain_range\"][label] = (min(values), max(values))\n\n return details\n\n @classmethod\n def from_file(\n cls,\n filename: str,\n keep_non_atom_records: bool = False,\n filter_by_elements: set = None,\n filter_by_residues: set = None,\n ) -> \"Structure\":\n \"\"\"\n Reads in an mmcif or pdb file and converts it into class instance.\n\n Parameters\n ----------\n filename : str\n Path to the mmcif or pdb file.\n keep_non_atom_records : bool, optional\n Wheter to keep residues that are not labelled ATOM.\n filter_by_elements: set, optional\n Which elements to keep. Default corresponds to all elements.\n filter_by_residues: set, optional\n Which residues to keep. Default corresponds to all residues.\n\n Raises\n ------\n ValueError\n If the extension is not '.pdb' or '.cif'.\n\n Returns\n -------\n Structure\n Read in structure file.\n \"\"\"\n _, file_extension = splitext(basename(filename.upper()))\n if file_extension == \".PDB\":\n func = cls._load_pdb\n elif file_extension == \".CIF\":\n func = cls._load_mmcif\n else:\n raise NotImplementedError(\n \"Could not determine structure filetype from extension.\"\n \" Supported filetypes are mmcif (.cif) and pdb (.pdb).\"\n )\n data = func(filename)\n\n keep = np.ones(data[\"element_symbol\"].size, dtype=bool)\n if filter_by_elements:\n keep = np.logical_and(\n keep,\n np.in1d(data[\"element_symbol\"], np.array(list(filter_by_elements))),\n )\n if filter_by_residues:\n keep = np.logical_and(\n keep, np.in1d(data[\"residue_name\"], np.array(list(filter_by_residues)))\n )\n if not keep_non_atom_records:\n keep = np.logical_and(keep, data[\"record_type\"] == \"ATOM\")\n\n for key in data:\n if key == \"details\":\n continue\n if type(data[key]) == np.ndarray:\n data[key] = data[key][keep]\n else:\n data[key] = [x for x, flag in zip(data[key], keep) if flag]\n\n data[\"details\"][\"filepath\"] = filename\n\n return cls(**data)\n\n @staticmethod\n def _load_mmcif(filename: str) -> Dict:\n \"\"\"\n Parses a macromolecular Crystallographic Information File (mmCIF)\n and returns the data in a dictionary format.\n\n Parameters\n ----------\n filename : str\n The filename of the mmCIF to load.\n\n Returns\n -------\n dict\n A dictionary of numpy arrays. Keys are the names of the PDB\n coordinate section. In addition, some details about the parsed\n structure are included. In case of conversion failure, the failing\n attribute is set to 0 if its supposed to be an integer value.\n \"\"\"\n result = MMCIFParser(filename)\n\n atom_site_mapping = {\n \"record_type\": (\"group_PDB\", str),\n \"atom_serial_number\": (\"id\", int),\n \"atom_name\": (\"label_atom_id\", str),\n \"alternate_location_indicator\": (\"label_alt_id\", str),\n \"residue_name\": (\"label_comp_id\", str),\n # \"chain_identifier\": (\"auth_asym_id\", str),\n \"chain_identifier\": (\"label_asym_id\", str),\n \"residue_sequence_number\": (\"label_seq_id\", int),\n \"code_for_residue_insertion\": (\"pdbx_PDB_ins_code\", str),\n \"occupancy\": (\"occupancy\", float),\n \"temperature_factor\": (\"B_iso_or_equiv\", float),\n \"segment_identifier\": (\"pdbx_PDB_model_num\", str),\n \"element_symbol\": (\"type_symbol\", str),\n \"charge\": (\"pdbx_formal_charge\", str),\n }\n\n out = {}\n for out_key, (atom_site_key, dtype) in atom_site_mapping.items():\n out_data = [\n x.strip() for x in result[\"atom_site\"].get(atom_site_key, [\".\"])\n ]\n if dtype == int:\n out_data = [0 if x == \".\" else int(x) for x in out_data]\n try:\n out[out_key] = np.asarray(out_data).astype(dtype)\n except ValueError:\n default = [\".\"] if dtype == str else 0\n print(f\"Converting {out_key} to {dtype} failed, set to {default}.\")\n out[out_key] = np.repeat(default, len(out_data)).astype(dtype)\n\n number_entries = len(max(out.values(), key=len))\n for key, value in out.items():\n if value.size != 1:\n continue\n out[key] = np.repeat(value, number_entries // value.size)\n\n out[\"details\"] = {}\n out[\"atom_coordinate\"] = np.transpose(\n np.array(\n [\n result[\"atom_site\"][\"Cartn_x\"],\n result[\"atom_site\"][\"Cartn_y\"],\n result[\"atom_site\"][\"Cartn_z\"],\n ],\n dtype=np.float32,\n )\n )\n\n detail_mapping = {\n \"resolution\": (\"em_3d_reconstruction\", \"resolution\", np.nan),\n \"resolution_method\": (\"em_3d_reconstruction\", \"resolution_method\", np.nan),\n \"method\": (\"exptl\", \"method\", np.nan),\n \"electron_source\": (\"em_imaging\", \"electron_source\", np.nan),\n \"illumination_mode\": (\"em_imaging\", \"illumination_mode\", np.nan),\n \"microscope_model\": (\"em_imaging\", \"microscope_model\", np.nan),\n }\n for out_key, (base_key, inner_key, default) in detail_mapping.items():\n if base_key not in result:\n continue\n out[\"details\"][out_key] = result[base_key].get(inner_key, default)\n\n return out\n\n @staticmethod\n def _load_pdb(filename: str) -> Dict:\n \"\"\"\n Parses a Protein Data Bank (PDB) file and returns the data\n in a dictionary format.\n\n Parameters\n ----------\n filename : str\n The filename of the PDB file to load.\n\n Returns\n -------\n dict\n A dictionary of numpy arrays. Keys are the names of the PDB\n coordinate section. In addition, some details about the parsed\n structure are included. In case of conversion failure, the failing\n attribute is set to 0 if its supposed to be an integer value.\n \"\"\"\n result = PDBParser(filename)\n\n atom_site_mapping = {\n \"record_type\": (\"record_type\", str),\n \"atom_serial_number\": (\"atom_serial_number\", int),\n \"atom_name\": (\"atom_name\", str),\n \"alternate_location_indicator\": (\"alternate_location_indicator\", str),\n \"residue_name\": (\"residue_name\", str),\n \"chain_identifier\": (\"chain_identifier\", str),\n \"residue_sequence_number\": (\"residue_sequence_number\", int),\n \"code_for_residue_insertion\": (\"code_for_residue_insertion\", str),\n \"occupancy\": (\"occupancy\", float),\n \"temperature_factor\": (\"temperature_factor\", float),\n \"segment_identifier\": (\"segment_identifier\", str),\n \"element_symbol\": (\"element_symbol\", str),\n \"charge\": (\"charge\", str),\n }\n\n out = {\"details\": result[\"details\"]}\n for out_key, (inner_key, dtype) in atom_site_mapping.items():\n out_data = [x.strip() for x in result[inner_key]]\n if dtype == int:\n out_data = [0 if x == \".\" else int(x) for x in out_data]\n try:\n out[out_key] = np.asarray(out_data).astype(dtype)\n except ValueError:\n default = \".\" if dtype == str else 0\n print(\n f\"Converting {out_key} to {dtype} failed. Setting {out_key} to {default}.\"\n )\n out[out_key] = np.repeat(default, len(out_data)).astype(dtype)\n\n out[\"atom_coordinate\"] = np.array(result[\"atom_coordinate\"], dtype=np.float32)\n\n return out\n\n def to_file(self, filename: str) -> None:\n \"\"\"\n Writes the Structure instance data to a Protein Data Bank (PDB) or\n macromolecular Crystallographic Information File (mmCIF) file depending\n one whether filename ends with '.pdb' or '.cif'.\n\n Raises\n ------\n ValueError\n If the extension is not '.pdb' or '.cif'.\n\n Parameters\n ----------\n filename : str\n The filename of the file to write.\n \"\"\"\n data_out = []\n if np.any(np.vectorize(len)(self.chain_identifier) > 2):\n warnings.warn(\"Chain identifiers longer than one will be shortened.\")\n\n _, file_extension = splitext(basename(filename.upper()))\n if file_extension == \".PDB\":\n func = self._write_pdb\n elif file_extension == \".CIF\":\n func = self._write_mmcif\n else:\n raise NotImplementedError(\n \"Could not determine structure filetype.\"\n \" Supported filetypes are mmcif (.cif) and pdb (.pdb).\"\n )\n\n if self.atom_coordinate.shape[0] > 10**5 and func == self._write_pdb:\n warnings.warn(\n \"The structure contains more than 100,000 atoms. Consider using mmcif.\"\n )\n\n with open(filename, mode=\"w\", encoding=\"utf-8\") as ofile:\n ofile.writelines(func())\n\n def _write_pdb(self) -> List[str]:\n \"\"\"\n Returns a PDB string representation of the structure instance.\n\n Returns\n -------\n list\n List containing PDB file coordine lines.\n \"\"\"\n data_out = []\n for index in range(self.atom_coordinate.shape[0]):\n x, y, z = self.atom_coordinate[index, :]\n line = list(\" \" * 80)\n line[0:6] = f\"{self.record_type[index]:<6}\"\n line[6:11] = f\"{self.atom_serial_number[index]:>5}\"\n line[12:16] = f\"{self.atom_name[index]:<4}\"\n line[16] = f\"{self.alternate_location_indicator[index]:<1}\"\n line[17:20] = f\"{self.residue_name[index]:<3}\"\n line[21] = f\"{self.chain_identifier[index][0]:<1}\"\n line[22:26] = f\"{self.residue_sequence_number[index]:>4}\"\n line[26] = f\"{self.code_for_residue_insertion[index]:<1}\"\n line[30:38] = f\"{x:>8.3f}\"\n line[38:46] = f\"{y:>8.3f}\"\n line[46:54] = f\"{z:>8.3f}\"\n line[54:60] = f\"{self.occupancy[index]:>6.2f}\"\n line[60:66] = f\"{self.temperature_factor[index]:>6.2f}\"\n line[72:76] = f\"{self.segment_identifier[index]:>4}\"\n line[76:78] = f\"{self.element_symbol[index]:<2}\"\n line[78:80] = f\"{self.charge[index]:>2}\"\n data_out.append(\"\".join(line))\n data_out.append(\"END\")\n data_out = \"\\n\".join(data_out)\n return data_out\n\n def _write_mmcif(self) -> List[str]:\n \"\"\"\n Returns a MMCIF string representation of the structure instance.\n\n Returns\n -------\n list\n List containing MMCIF file coordinate lines.\n \"\"\"\n model_num, entity_id = 1, 1\n data = {\n \"group_PDB\": [],\n \"id\": [],\n \"type_symbol\": [],\n \"label_atom_id\": [],\n \"label_alt_id\": [],\n \"label_comp_id\": [],\n \"label_asym_id\": [],\n \"label_entity_id\": [],\n \"label_seq_id\": [],\n \"pdbx_PDB_ins_code\": [],\n \"Cartn_x\": [],\n \"Cartn_y\": [],\n \"Cartn_z\": [],\n \"occupancy\": [],\n \"B_iso_or_equiv\": [],\n \"pdbx_formal_charge\": [],\n \"auth_seq_id\": [],\n \"auth_comp_id\": [],\n \"auth_asym_id\": [],\n \"auth_atom_id\": [],\n \"pdbx_PDB_model_num\": [],\n }\n\n for index in range(self.atom_coordinate.shape[0]):\n x, y, z = self.atom_coordinate[index, :]\n data[\"group_PDB\"].append(self.record_type[index])\n data[\"id\"].append(str(self.atom_serial_number[index]))\n data[\"type_symbol\"].append(self.element_symbol[index])\n data[\"label_atom_id\"].append(self.atom_name[index])\n data[\"label_alt_id\"].append(self.alternate_location_indicator[index])\n data[\"label_comp_id\"].append(self.residue_name[index])\n data[\"label_asym_id\"].append(self.chain_identifier[index][0])\n data[\"label_entity_id\"].append(str(entity_id))\n data[\"label_seq_id\"].append(str(self.residue_sequence_number[index]))\n data[\"pdbx_PDB_ins_code\"].append(self.code_for_residue_insertion[index])\n data[\"Cartn_x\"].append(f\"{x:.3f}\")\n data[\"Cartn_y\"].append(f\"{y:.3f}\")\n data[\"Cartn_z\"].append(f\"{z:.3f}\")\n data[\"occupancy\"].append(f\"{self.occupancy[index]:.2f}\")\n data[\"B_iso_or_equiv\"].append(f\"{self.temperature_factor[index]:.2f}\")\n data[\"pdbx_formal_charge\"].append(self.charge[index])\n data[\"auth_seq_id\"].append(str(self.residue_sequence_number[index]))\n data[\"auth_comp_id\"].append(self.residue_name[index])\n data[\"auth_asym_id\"].append(self.chain_identifier[index][0])\n data[\"auth_atom_id\"].append(self.atom_name[index])\n data[\"pdbx_PDB_model_num\"].append(str(model_num))\n\n output_data = {\"atom_site\": data}\n original_file = self.details.get(\"filepath\", \"\")\n try:\n new_data = {k: v for k, v in MMCIFParser(original_file).items()}\n index = self.atom_serial_number - 1\n new_data[\"atom_site\"] = {\n k: [v[i] for i in index] for k, v in new_data[\"atom_site\"].items()\n }\n new_data[\"atom_site\"][\"Cartn_x\"] = data[\"Cartn_x\"]\n new_data[\"atom_site\"][\"Cartn_y\"] = data[\"Cartn_y\"]\n new_data[\"atom_site\"][\"Cartn_z\"] = data[\"Cartn_z\"]\n output_data = new_data\n except Exception:\n pass\n\n ret = \"\"\n for category, subdict in output_data.items():\n ret += \"#\\n\"\n is_loop = isinstance(subdict[list(subdict.keys())[0]], list)\n if not is_loop:\n for k in subdict:\n ret += f\"_{category}.{k}\\t{subdict[k]}\\n\"\n else:\n ret += \"loop_\\n\"\n ret += \"\".join([f\"_{category}.{k}\\n\" for k in subdict])\n padded_subdict = _format_mmcif_colunns(subdict)\n\n data = [\n \"\".join([str(x) for x in content])\n for content in zip(*padded_subdict.values())\n ]\n ret += \"\\n\".join([entry for entry in data]) + \"\\n\"\n\n return ret\n\n def subset_by_chain(self, chain: str = None) -> \"Structure\":\n \"\"\"\n Return a subset of the structure that contains only atoms belonging to\n a specific chain. If no chain is specified, all chains are returned.\n\n Parameters\n ----------\n chain : str, optional\n The chain identifier. If multiple chains should be selected they need\n to be a comma separated string, e.g. 'A,B,CE'. If chain None,\n all chains are returned. Default is None.\n\n Returns\n -------\n Structure\n A subset of the original structure containing only the specified chain.\n \"\"\"\n chain = np.unique(self.chain_identifier) if chain is None else chain.split(\",\")\n keep = np.in1d(self.chain_identifier, chain)\n return self[keep]\n\n def subset_by_range(\n self,\n start: int,\n stop: int,\n chain: str = None,\n ) -> \"Structure\":\n \"\"\"\n Return a subset of the structure within a specific range of residues.\n\n Parameters\n ----------\n start : int\n The starting residue sequence number.\n\n stop : int\n The ending residue sequence number.\n\n chain : str, optional\n The chain identifier. If multiple chains should be selected they need\n to be a comma separated string, e.g. 'A,B,CE'. If chain None,\n all chains are returned. Default is None.\n\n Returns\n -------\n Structure\n A subset of the original structure within the specified residue range.\n \"\"\"\n ret = self.subset_by_chain(chain=chain)\n keep = np.logical_and(\n ret.residue_sequence_number >= start, ret.residue_sequence_number <= stop\n )\n return ret[keep]\n\n def center_of_mass(self) -> NDArray:\n \"\"\"\n Calculate the center of mass of the structure.\n\n Returns\n -------\n NDArray\n The center of mass of the structure.\n \"\"\"\n weights = [self._elements[atype].atomic_weight for atype in self.element_symbol]\n return np.dot(self.atom_coordinate.T, weights) / np.sum(weights)\n\n def rigid_transform(\n self,\n rotation_matrix: NDArray,\n translation: NDArray,\n use_geometric_center: bool = False,\n ) -> \"Structure\":\n \"\"\"\n Performs a rigid transform of internal structure coordinates.\n\n Parameters\n ----------\n rotation_matrix : NDArray\n The rotation matrix to apply to the coordinates.\n translation : NDArray\n The vector to translate the coordinates by.\n use_geometric_center : bool, optional\n Whether to use geometric or coordinate center.\n\n Returns\n -------\n Structure\n The transformed instance of :py:class:`tme.structure.Structure`.\n \"\"\"\n out = np.empty_like(self.atom_coordinate.T)\n rigid_transform(\n coordinates=self.atom_coordinate.T,\n rotation_matrix=rotation_matrix,\n translation=translation,\n out=out,\n use_geometric_center=use_geometric_center,\n )\n ret = self.copy()\n ret.atom_coordinate = out.T.copy()\n return ret\n\n def centered(self) -> Tuple[\"Structure\", NDArray]:\n \"\"\"\n Shifts the structure analogous to :py:meth:`tme.density.Density.centered`.\n\n Returns\n -------\n Structure\n A copy of the class instance whose data center of mass is in the\n center of the data array.\n NDArray\n The coordinate translation.\n\n See Also\n --------\n :py:meth:`tme.Density.centered`\n \"\"\"\n center_of_mass = self.center_of_mass()\n enclosing_box = minimum_enclosing_box(coordinates=self.atom_coordinate.T)\n shift = np.subtract(np.divide(enclosing_box, 2), center_of_mass)\n\n transformed_structure = self.rigid_transform(\n translation=shift, rotation_matrix=np.eye(shift.size)\n )\n\n return transformed_structure, shift\n\n def _coordinate_to_position(\n self,\n shape: Tuple[int],\n sampling_rate: Tuple[float],\n origin: Tuple[float],\n ) -> (NDArray, Tuple[str], Tuple[int], float, Tuple[float]):\n \"\"\"\n Converts coordinates to positions.\n\n Parameters\n ----------\n shape : Tuple[int,]\n The desired shape of the output array.\n\n sampling_rate : float\n The sampling rate of the output array in unit of self.atom_coordinate.\n\n origin : Tuple[float,]\n The origin of the coordinate system.\n Returns\n -------\n Tuple[NDArray, List[str], Tuple[int, ], float, Tuple[float,]]\n Returns positions, atom_types, shape, sampling_rate, and origin.\n \"\"\"\n coordinates = self.atom_coordinate.copy()\n atom_types = self.element_symbol.copy()\n\n # positions are in x, y, z map is z, y, x\n coordinates = coordinates[:, ::-1]\n\n sampling_rate = 1 if sampling_rate is None else sampling_rate\n adjust_origin = origin is not None and shape is None\n origin = coordinates.min(axis=0) if origin is None else origin\n positions = (coordinates - origin) / sampling_rate\n positions = np.rint(positions).astype(int)\n\n if adjust_origin:\n left_shift = positions.min(axis=0)\n positions -= left_shift\n shape = positions.max(axis=0) + 1\n origin = origin + np.multiply(left_shift, sampling_rate)\n\n if shape is None:\n shape = positions.max(axis=0) + 1\n\n valid_positions = np.sum(\n np.logical_and(positions < shape, positions >= 0), axis=1\n )\n\n positions = positions[valid_positions == positions.shape[1], :]\n atom_types = atom_types[valid_positions == positions.shape[1]]\n\n self.details[\"nAtoms_outOfBound\"] = 0\n if positions.shape[0] != coordinates.shape[0]:\n out_of_bounds = coordinates.shape[0] - positions.shape[0]\n print(f\"{out_of_bounds}/{coordinates.shape[0]} atoms were out of bounds.\")\n self.details[\"nAtoms_outOfBound\"] = out_of_bounds\n\n return positions, atom_types, shape, sampling_rate, origin\n\n def _position_to_vdw_sphere(\n self,\n positions: Tuple[float],\n atoms: Tuple[str],\n sampling_rate: Tuple[float],\n volume: NDArray,\n ) -> None:\n \"\"\"\n Updates a volume with van der Waals spheres.\n\n Parameters\n ----------\n positions : Tuple[float, float, float]\n The positions of the atoms.\n\n atoms : Tuple[str]\n The types of the atoms.\n\n sampling_rate : float\n The desired sampling rate in unit of self.atom_coordinate of the\n output array.\n\n volume : NDArray\n The volume to update.\n \"\"\"\n index_dict, vdw_rad, shape = {}, {}, volume.shape\n for atom_index, atom_position in enumerate(positions):\n atom_type = atoms[atom_index]\n if atom_type not in index_dict.keys():\n atom_vdwr = np.ceil(\n np.divide(self._elements[atom_type].vdwr, (sampling_rate * 100))\n ).astype(int)\n\n vdw_rad[atom_type] = atom_vdwr\n atom_slice = tuple(slice(-k, k + 1) for k in atom_vdwr)\n distances = np.linalg.norm(\n np.divide(\n np.mgrid[atom_slice],\n atom_vdwr.reshape((-1,) + (1,) * volume.ndim),\n ),\n axis=0,\n )\n index_dict[atom_type] = (distances <= 1).astype(volume.dtype)\n\n footprint = index_dict[atom_type]\n start = np.maximum(np.subtract(atom_position, vdw_rad[atom_type]), 0)\n stop = np.minimum(np.add(atom_position, vdw_rad[atom_type]) + 1, shape)\n volume_slice = tuple(slice(*coord) for coord in zip(start, stop))\n\n start_index = np.maximum(-np.subtract(atom_position, vdw_rad[atom_type]), 0)\n stop_index = np.add(\n footprint.shape,\n np.minimum(\n np.subtract(shape, np.add(atom_position, vdw_rad[atom_type]) + 1), 0\n ),\n )\n index_slice = tuple(slice(*coord) for coord in zip(start_index, stop_index))\n volume[volume_slice] += footprint[index_slice]\n\n def _position_to_scattering_factors(\n self,\n positions: NDArray,\n atoms: NDArray,\n sampling_rate: NDArray,\n volume: NDArray,\n lowpass_filter: bool = True,\n downsampling_factor: float = 1.35,\n source: str = \"peng1995\",\n ) -> None:\n \"\"\"\n Updates a volume with scattering factors.\n\n Parameters\n ----------\n positions : NDArray\n The positions of the atoms.\n atoms : NDArray\n Element symbols.\n sampling_rate : float\n Sampling rate that was used to convert coordinates to positions.\n volume : NDArray\n The volume to update.\n lowpass_filter : NDArray\n Whether the scattering factors hsould be lowpass filtered.\n downsampling_factor : NDArray\n Downsampling factor for scattering factor computation.\n source : str\n Which scattering factors to use\n\n Reference\n ---------\n https://github.com/I2PC/xmipp.\n \"\"\"\n scattering_profiles, shape = dict(), volume.shape\n for atom_index, point in enumerate(positions):\n if atoms[atom_index] not in scattering_profiles:\n spline = atom_profile(\n atom=atoms[atom_index],\n M=downsampling_factor,\n method=source,\n lfilter=lowpass_filter,\n )\n scattering_profiles.update({atoms[atom_index]: spline})\n\n atomic_radius = np.divide(\n self._elements[atoms[atom_index]].vdwr, sampling_rate * 100\n )\n starts = np.maximum(np.ceil(point - atomic_radius), 0).astype(int)\n stops = np.minimum(np.floor(point + atomic_radius), shape).astype(int)\n\n grid_index = np.meshgrid(\n *[range(start, stop) for start, stop in zip(starts, stops)]\n )\n distances = np.einsum(\n \"aijk->ijk\",\n np.array([(grid_index[i] - point[i]) ** 2 for i in range(len(point))]),\n dtype=np.float64,\n )\n distances = np.sqrt(distances)\n if not len(distances):\n grid_index, distances = point, 0\n np.add.at(\n volume,\n tuple(grid_index),\n scattering_profiles[atoms[atom_index]](distances),\n )\n\n def _get_atom_weights(\n self, atoms: Tuple[str] = None, weight_type: str = \"atomic_weight\"\n ) -> Tuple[float]:\n \"\"\"\n Returns weights of individual atoms according to a specified weight type.\n\n Parameters\n ----------\n atoms : Tuple of strings, optional\n The atoms to get the weights for. If None, weights for all atoms\n are used. Default is None.\n\n weight_type : str, optional\n The type of weights to return. This can either be 'atomic_weight',\n 'atomic_number', or 'van_der_waals_radius'. Default is 'atomic_weight'.\n\n Returns\n -------\n List[float]\n A list containing the weights of the atoms.\n \"\"\"\n atoms = self.element_symbol if atoms is None else atoms\n match weight_type:\n case \"atomic_weight\":\n weight = [self._elements[atom].atomic_weight for atom in atoms]\n case \"atomic_number\":\n weight = [self._elements[atom].atomic_number for atom in atoms]\n case _:\n raise NotImplementedError(\n \"weight_type can either be 'atomic_weight' or 'atomic_number'\"\n )\n return weight\n\n def to_volume(\n self,\n shape: Tuple[int] = None,\n sampling_rate: NDArray = None,\n origin: Tuple[float] = None,\n chain: str = None,\n weight_type: str = \"atomic_weight\",\n scattering_args: Dict = dict(),\n ) -> Tuple[NDArray, Tuple[int], NDArray]:\n \"\"\"\n Converts atom coordinates of shape [n x 3] x, y, z to a volume with\n index z, y, x.\n\n Parameters\n ----------\n shape : Tuple[int, ...], optional\n Desired shape of the output array. If shape is given its expected to be\n in z, y, x form.\n sampling_rate : float, optional\n Sampling rate of the output array in the unit of self.atom_coordinate\n origin : Tuple[float, ...], optional\n Origin of the coordinate system. If origin is given its expected to be\n in z, y, x form.\n chain : str, optional\n The chain identifier. If multiple chains should be selected they need\n to be a comma separated string, e.g. 'A,B,CE'. If chain None,\n all chains are returned. Default is None.\n weight_type : str, optional\n Which weight should be given to individual atoms.\n scattering_args : dict, optional\n Additional arguments for scattering factor computation.\n\n Returns\n -------\n Tuple[NDArray, Tuple[int], NDArray]\n The volume, its origin and the voxel size in Ångstrom.\n \"\"\"\n _weight_types = {\n \"atomic_weight\",\n \"atomic_number\",\n \"van_der_waals_radius\",\n \"scattering_factors\",\n \"lowpass_scattering_factors\",\n }\n _weight_string = \",\".join([f\"'{x}'\" for x in _weight_types])\n if weight_type not in _weight_types:\n raise NotImplementedError(f\"weight_type needs to be in {_weight_string}\")\n\n if sampling_rate is None:\n sampling_rate = np.ones(self.atom_coordinate.shape[1])\n sampling_rate = np.array(sampling_rate)\n if sampling_rate.size == 1:\n sampling_rate = np.repeat(sampling_rate, self.atom_coordinate.shape[1])\n elif sampling_rate.size != self.atom_coordinate.shape[1]:\n raise ValueError(\n \"sampling_rate should either be single value of array with\"\n f\"size {self.atom_coordinate.shape[1]}.\"\n )\n if \"source\" not in scattering_args:\n scattering_args[\"source\"] = \"peng1995\"\n\n temp = self.subset_by_chain(chain=chain)\n\n positions, atoms, shape, sampling_rate, origin = temp._coordinate_to_position(\n shape=shape, sampling_rate=sampling_rate, origin=origin\n )\n volume = np.zeros(shape, dtype=np.float32)\n if weight_type in (\"atomic_weight\", \"atomic_number\"):\n weights = temp._get_atom_weights(atoms=atoms, weight_type=weight_type)\n np.add.at(volume, tuple(positions.T), weights)\n elif weight_type == \"van_der_waals_radius\":\n self._position_to_vdw_sphere(positions, atoms, sampling_rate, volume)\n elif weight_type == \"scattering_factors\":\n self._position_to_scattering_factors(\n positions,\n atoms,\n sampling_rate,\n volume,\n lowpass_filter=False,\n **scattering_args,\n )\n elif weight_type == \"lowpass_scattering_factors\":\n self._position_to_scattering_factors(\n positions,\n atoms,\n sampling_rate,\n volume,\n lowpass_filter=True,\n **scattering_args,\n )\n\n self.details.update(temp.details)\n return volume, origin, sampling_rate\n\n @classmethod\n def compare_structures(\n cls,\n structure1: \"Structure\",\n structure2: \"Structure\",\n origin: NDArray = None,\n sampling_rate: float = None,\n weighted: bool = False,\n ) -> float:\n \"\"\"\n Compute root mean square deviation (RMSD) between two structures.\n\n Both structures need to have the same number of atoms. In practice, this means\n that *structure2* is a transformed version of *structure1*\n\n Parameters\n ----------\n structure1 : Structure\n Structure 1.\n\n structure2 : Structure\n Structure 2.\n\n origin : NDArray, optional\n Origin of the structure coordinate system.\n\n sampling_rate : float, optional\n Sampling rate if discretized on a grid in the unit of self.atom_coordinate.\n\n weighted : bool, optional\n Whether atoms should be weighted by their atomic weight.\n\n Returns\n -------\n float\n Root Mean Square Deviation (RMSD)\n \"\"\"\n if origin is None:\n origin = np.zeros(structure1.atom_coordinate.shape[1])\n\n coordinates1 = structure1.atom_coordinate\n coordinates2 = structure2.atom_coordinate\n atoms1, atoms2 = structure1.element_symbol, structure2.element_symbol\n if sampling_rate is not None:\n coordinates1 = np.rint((coordinates1 - origin) / sampling_rate).astype(int)\n coordinates2 = np.rint((coordinates2 - origin) / sampling_rate).astype(int)\n\n weights1 = np.array(structure1._get_atom_weights(atoms=atoms1))\n weights2 = np.array(structure2._get_atom_weights(atoms=atoms2))\n if not weighted:\n weights1 = np.ones_like(weights1)\n weights2 = np.ones_like(weights2)\n\n if not np.allclose(coordinates1.shape, coordinates2.shape):\n raise ValueError(\n \"Input structures need to have the same number of coordinates.\"\n )\n if not np.allclose(weights1.shape, weights2.shape):\n raise ValueError(\"Input structures need to have the same number of atoms.\")\n\n squared_diff = np.sum(np.square(coordinates1 - coordinates2), axis=1)\n weighted_quared_diff = squared_diff * ((weights1 + weights2) / 2)\n rmsd = np.sqrt(np.mean(weighted_quared_diff))\n\n return rmsd\n\n @classmethod\n def align_structures(\n cls,\n structure1: \"Structure\",\n structure2: \"Structure\",\n origin: NDArray = None,\n sampling_rate: float = None,\n weighted: bool = False,\n ) -> Tuple[\"Structure\", float]:\n \"\"\"\n Align the atom coordinates of structure2 to structure1 using\n the Kabsch algorithm.\n\n Both structures need to have the same number of atoms. In practice, this means\n that *structure2* is a subset of *structure1*\n\n Parameters\n ----------\n structure1 : Structure\n Structure 1.\n\n structure2 : Structure\n Structure 2.\n\n origin : NDArray, optional\n Origin of the structure coordinate system.\n\n sampling_rate : float, optional\n Voxel size if discretized on a grid.\n\n weighted : bool, optional\n Whether atoms should be weighted by their atomic weight.\n\n Returns\n -------\n Structure\n *structure2* aligned to *structure1*.\n float\n Root Mean Square Error (RMSE)\n \"\"\"\n if origin is None:\n origin = np.minimum(\n structure1.atom_coordinate.min(axis=0),\n structure2.atom_coordinate.min(axis=0),\n ).astype(int)\n\n initial_rmsd = cls.compare_structures(\n structure1=structure1,\n structure2=structure2,\n origin=origin,\n sampling_rate=sampling_rate,\n weighted=weighted,\n )\n\n reference = structure1.atom_coordinate.copy()\n query = structure2.atom_coordinate.copy()\n if sampling_rate is not None:\n reference, atoms1, shape, _, _ = structure1._coordinate_to_position(\n shape=None, sampling_rate=sampling_rate, origin=origin\n )\n query, atoms2, shape, _, _ = structure2._coordinate_to_position(\n shape=None, sampling_rate=sampling_rate, origin=origin\n )\n\n reference_mean = reference.mean(axis=0)\n query_mean = query.mean(axis=0)\n\n reference = reference - reference_mean\n query = query - query_mean\n\n corr = np.dot(query.T, reference)\n U, S, Vh = np.linalg.svd(corr)\n\n rotation = np.dot(Vh.T, U.T).T\n if np.linalg.det(rotation) < 0:\n Vh[2, :] *= -1\n rotation = np.dot(Vh.T, U.T).T\n\n translation = reference_mean - np.dot(query_mean, rotation)\n\n temp = structure1.copy()\n temp.atom_coordinate = reference + reference_mean\n ret = structure2.copy()\n ret.atom_coordinate = np.dot(query + query_mean, rotation) + translation\n\n final_rmsd = cls.compare_structures(\n structure1=temp,\n structure2=ret,\n origin=origin,\n sampling_rate=None,\n weighted=weighted,\n )\n\n print(f\"Initial RMSD: {initial_rmsd:.5f} - Final RMSD: {final_rmsd:.5f}\")\n\n return ret, final_rmsd" }, { "identifier": "euler_to_rotationmatrix", "path": "tme/matching_utils.py", "snippet": "def euler_to_rotationmatrix(angles: Tuple[float]) -> NDArray:\n \"\"\"\n Convert Euler angles to a rotation matrix.\n\n Parameters\n ----------\n angles : tuple\n A tuple representing the Euler angles in degrees.\n\n Returns\n -------\n NDArray\n The generated rotation matrix.\n \"\"\"\n if len(angles) == 1:\n angles = (angles, 0, 0)\n rotation_matrix = (\n Rotation.from_euler(\"zyx\", angles, degrees=True).as_matrix().astype(np.float32)\n )\n return rotation_matrix" }, { "identifier": "minimum_enclosing_box", "path": "tme/matching_utils.py", "snippet": "def minimum_enclosing_box(\n coordinates: NDArray,\n margin: NDArray = None,\n use_geometric_center: bool = False,\n) -> Tuple[int]:\n \"\"\"\n Computes the minimal enclosing box around coordinates with margin.\n\n Parameters\n ----------\n coordinates : NDArray\n Coordinates of which the enclosing box should be computed. The shape\n of this array should be [d, n] with d dimensions and n coordinates.\n margin : NDArray, optional\n Box margin. Defaults to None.\n use_geometric_center : bool, optional\n Whether the box should accommodate the geometric or the coordinate\n center. Defaults to False.\n\n Returns\n -------\n tuple\n Integers corresponding to the minimum enclosing box shape.\n \"\"\"\n point_cloud = np.asarray(coordinates)\n dim = point_cloud.shape[0]\n point_cloud = point_cloud - point_cloud.min(axis=1)[:, None]\n\n margin = np.zeros(dim) if margin is None else margin\n margin = np.asarray(margin).astype(int)\n\n norm_cloud = point_cloud - point_cloud.mean(axis=1)[:, None]\n # Adding one avoids clipping during scipy.ndimage.affine_transform\n shape = np.repeat(\n np.ceil(2 * np.linalg.norm(norm_cloud, axis=0).max()) + 1, dim\n ).astype(int)\n if use_geometric_center:\n hull = ConvexHull(point_cloud.T)\n distance, _ = max_euclidean_distance(point_cloud[:, hull.vertices].T)\n distance += np.linalg.norm(np.ones(dim))\n shape = np.repeat(np.rint(distance).astype(int), dim)\n\n return shape" } ]
from tempfile import mkstemp from os import remove from tme import Structure from tme.matching_utils import euler_to_rotationmatrix, minimum_enclosing_box import pytest import numpy as np
12,964
else: assert value == value_comparison @pytest.mark.parametrize( "modified_attribute", [ ("record_type"), ("atom_serial_number"), ("atom_name"), ("atom_coordinate"), ("alternate_location_indicator"), ("residue_name"), ("chain_identifier"), ("residue_sequence_number"), ("code_for_residue_insertion"), ("occupancy"), ("temperature_factor"), ("segment_identifier"), ("element_symbol"), ], ) def test_initialization_errors(self, modified_attribute): kwargs = { attribute: getattr(self.structure, attribute) for attribute in STRUCTURE_ATTRIBUTES if attribute != modified_attribute } kwargs[modified_attribute] = getattr(self.structure, modified_attribute)[:1] with pytest.raises(ValueError): Structure(**kwargs) def test__getitem__(self): ret_single_index = self.structure[1] ret = self.structure[[1]] self.compare_structures(ret_single_index, ret) ret = self.structure[self.structure.record_type == "ATOM"] assert np.all(ret.record_type == "ATOM") ret = self.structure[self.structure.element_symbol == "C"] assert np.all(ret.element_symbol == "C") def test__repr__(self): unique_chains = "-".join( [ ",".join([str(x) for x in entity]) for entity in self.structure.details["unique_chains"] ] ) min_atom = np.min(self.structure.atom_serial_number) max_atom = np.max(self.structure.atom_serial_number) n_atom = self.structure.atom_serial_number.size min_residue = np.min(self.structure.residue_sequence_number) max_residue = np.max(self.structure.residue_sequence_number) n_residue = self.structure.residue_sequence_number.size repr_str = ( f"Structure object at {id(self.structure)}\n" f"Unique Chains: {unique_chains}, " f"Atom Range: {min_atom}-{max_atom} [N = {n_atom}], " f"Residue Range: {min_residue}-{max_residue} [N = {n_residue}]" ) assert repr_str == self.structure.__repr__() @pytest.mark.parametrize( "path", [ ("./tme/tests/data/Structures/5khe.cif"), ("./tme/tests/data/Structures/5khe.pdb"), ], ) def test_fromfile(self, path): _ = Structure.from_file(path) def test_fromfile_error(self): with pytest.raises(NotImplementedError): _ = Structure.from_file("madeup.extension") @pytest.mark.parametrize("file_format", [("cif"), ("pdb")]) def test_to_file(self, file_format): _, path = mkstemp() path = f"{path}.{file_format}" self.structure.to_file(path) read = self.structure.from_file(path) comparison = self.structure.copy() self.compare_structures(comparison, read, exclude_attributes=["details"]) def test_to_file_error(self): _, path = mkstemp() path = f"{path}.RAISERROR" with pytest.raises(NotImplementedError): self.structure.to_file(path) def test_subset_by_chain(self): chain = "A" ret = self.structure.subset_by_chain(chain=chain) assert np.all(ret.chain_identifier == chain) def test_subset_by_chain_range(self): chain, start, stop = "A", 0, 20 ret = self.structure.subset_by_range(chain=chain, start=start, stop=stop) assert np.all(ret.chain_identifier == chain) assert np.all( np.logical_and( ret.residue_sequence_number >= start, ret.residue_sequence_number <= stop, ) ) def test_center_of_mass(self): center_of_mass = self.structure.center_of_mass() assert center_of_mass.shape[0] == self.structure.atom_coordinate.shape[1] assert np.allclose(center_of_mass, [-0.89391639, 29.94908928, -2.64736741]) def test_centered(self): ret, translation = self.structure.centered()
STRUCTURE_ATTRIBUTES = [ "record_type", "atom_serial_number", "atom_name", "atom_coordinate", "alternate_location_indicator", "residue_name", "chain_identifier", "residue_sequence_number", "code_for_residue_insertion", "occupancy", "temperature_factor", "segment_identifier", "element_symbol", "charge", "details", ] class TestStructure: def setup_method(self): self.structure = Structure.from_file("./tme/tests/data/Structures/5khe.cif") _, self.path = mkstemp() def teardown_method(self): del self.structure remove(self.path) def compare_structures(self, structure1, structure2, exclude_attributes=[]): for attribute in STRUCTURE_ATTRIBUTES: if attribute in exclude_attributes: continue value = getattr(structure1, attribute) value_comparison = getattr(structure2, attribute) if type(value) == np.ndarray: assert np.all(value_comparison == value) else: assert value == value_comparison def test_initialization(self): structure = Structure( record_type=self.structure.record_type, atom_serial_number=self.structure.atom_serial_number, atom_name=self.structure.atom_name, atom_coordinate=self.structure.atom_coordinate, alternate_location_indicator=self.structure.alternate_location_indicator, residue_name=self.structure.residue_name, chain_identifier=self.structure.chain_identifier, residue_sequence_number=self.structure.residue_sequence_number, code_for_residue_insertion=self.structure.code_for_residue_insertion, occupancy=self.structure.occupancy, temperature_factor=self.structure.temperature_factor, segment_identifier=self.structure.segment_identifier, element_symbol=self.structure.element_symbol, charge=self.structure.charge, details=self.structure.details, ) for attribute in STRUCTURE_ATTRIBUTES: value = getattr(self.structure, attribute) value_comparison = getattr(structure, attribute) if type(value) == np.ndarray: assert np.all(value_comparison == value) else: assert value == value_comparison @pytest.mark.parametrize( "modified_attribute", [ ("record_type"), ("atom_serial_number"), ("atom_name"), ("atom_coordinate"), ("alternate_location_indicator"), ("residue_name"), ("chain_identifier"), ("residue_sequence_number"), ("code_for_residue_insertion"), ("occupancy"), ("temperature_factor"), ("segment_identifier"), ("element_symbol"), ], ) def test_initialization_errors(self, modified_attribute): kwargs = { attribute: getattr(self.structure, attribute) for attribute in STRUCTURE_ATTRIBUTES if attribute != modified_attribute } kwargs[modified_attribute] = getattr(self.structure, modified_attribute)[:1] with pytest.raises(ValueError): Structure(**kwargs) def test__getitem__(self): ret_single_index = self.structure[1] ret = self.structure[[1]] self.compare_structures(ret_single_index, ret) ret = self.structure[self.structure.record_type == "ATOM"] assert np.all(ret.record_type == "ATOM") ret = self.structure[self.structure.element_symbol == "C"] assert np.all(ret.element_symbol == "C") def test__repr__(self): unique_chains = "-".join( [ ",".join([str(x) for x in entity]) for entity in self.structure.details["unique_chains"] ] ) min_atom = np.min(self.structure.atom_serial_number) max_atom = np.max(self.structure.atom_serial_number) n_atom = self.structure.atom_serial_number.size min_residue = np.min(self.structure.residue_sequence_number) max_residue = np.max(self.structure.residue_sequence_number) n_residue = self.structure.residue_sequence_number.size repr_str = ( f"Structure object at {id(self.structure)}\n" f"Unique Chains: {unique_chains}, " f"Atom Range: {min_atom}-{max_atom} [N = {n_atom}], " f"Residue Range: {min_residue}-{max_residue} [N = {n_residue}]" ) assert repr_str == self.structure.__repr__() @pytest.mark.parametrize( "path", [ ("./tme/tests/data/Structures/5khe.cif"), ("./tme/tests/data/Structures/5khe.pdb"), ], ) def test_fromfile(self, path): _ = Structure.from_file(path) def test_fromfile_error(self): with pytest.raises(NotImplementedError): _ = Structure.from_file("madeup.extension") @pytest.mark.parametrize("file_format", [("cif"), ("pdb")]) def test_to_file(self, file_format): _, path = mkstemp() path = f"{path}.{file_format}" self.structure.to_file(path) read = self.structure.from_file(path) comparison = self.structure.copy() self.compare_structures(comparison, read, exclude_attributes=["details"]) def test_to_file_error(self): _, path = mkstemp() path = f"{path}.RAISERROR" with pytest.raises(NotImplementedError): self.structure.to_file(path) def test_subset_by_chain(self): chain = "A" ret = self.structure.subset_by_chain(chain=chain) assert np.all(ret.chain_identifier == chain) def test_subset_by_chain_range(self): chain, start, stop = "A", 0, 20 ret = self.structure.subset_by_range(chain=chain, start=start, stop=stop) assert np.all(ret.chain_identifier == chain) assert np.all( np.logical_and( ret.residue_sequence_number >= start, ret.residue_sequence_number <= stop, ) ) def test_center_of_mass(self): center_of_mass = self.structure.center_of_mass() assert center_of_mass.shape[0] == self.structure.atom_coordinate.shape[1] assert np.allclose(center_of_mass, [-0.89391639, 29.94908928, -2.64736741]) def test_centered(self): ret, translation = self.structure.centered()
box = minimum_enclosing_box(coordinates=self.structure.atom_coordinate.T)
2
2023-10-20 13:46:01+00:00
16k
tonnetonne814/MB-iSTFT-BERT-VITS2-44100-Ja
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert, ja_bert)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert = torch.load(bert_path)\n assert bert.shape[-1] == len(phone)\n except:\n bert = get_bert(text, word2ph, language_str, device=\"cuda\")\n torch.save(bert, bert_path)\n assert bert.shape[-1] == len(phone), phone\n\n if language_str == \"ZH\":\n bert = bert\n ja_bert = torch.zeros(768, len(phone))\n elif language_str == \"JP\":\n ja_bert = bert\n bert = torch.zeros(1024, len(phone))\n else:\n bert = torch.zeros(1024, len(phone))\n ja_bert = torch.zeros(768, len(phone))\n assert bert.shape[-1] == len(phone), (\n bert.shape,\n len(phone),\n sum(word2ph),\n p1,\n p2,\n t1,\n t2,\n pold,\n pold2,\n word2ph,\n text,\n w2pho,\n )\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 768, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=6,\n flow_share_parameter=False,\n use_transformer_flow=True,\n subbands=8, # add\n gen_istft_n_fft=16, # add\n gen_istft_hop_size=4, # add\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.subbands = subbands\n self.gen_istft_n_fft = gen_istft_n_fft\n self.gen_istft_hop_size = gen_istft_hop_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n # self.dec = Generator(\n # inter_channels,\n # resblock,\n # resblock_kernel_sizes,\n # resblock_dilation_sizes,\n # upsample_rates,\n # upsample_initial_channel,\n # upsample_kernel_sizes,\n # gin_channels=gin_channels,\n # )\n self.dec = Multistream_iSTFT_Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gen_istft_n_fft,\n gen_istft_hop_size, \n subbands,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers > 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert, ja_bert):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import os import torch import torch.distributed as dist import logging import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
11,120
for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cudnn.benchmark = True torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): #dist.init_process_group( # backend="gloo", # init_method="env://", # Due to some training problem,we proposed to use gloo instead of nccl. #) # Use torchrun instead of mp.spawn #rank = dist.get_rank() #n_gpus = dist.get_world_size() rank = 0 n_gpus = 1 hps = utils.get_hparams() torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) # if net_dur_disc is not None: # net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: # if not optim_dur_disc.param_groups[0].get("initial_lr"): # optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel
loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
9
2023-10-16 10:04:32+00:00
16k
violet-sto/HN-GFN
main.py
[ { "identifier": "Dataset", "path": "dataset.py", "snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.test_mols = []\n self.all_mols = []\n self.train_mols_map = {}\n\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(device, args.proxy_repr_type, include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n if args.floatX == 'float64':\n self.mdp.floatX = torch.double\n else:\n self.mdp.floatX = torch.float\n self.mdp._cue_max_blocks = args.max_blocks\n self.max_blocks = args.max_blocks\n self.oracle = oracle\n self._device = device\n self.seen_molecules = set()\n self.stop_event = threading.Event()\n\n self.target_norm = [-8.6, 1.10] # for dockerscore\n\n self.hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives)))\n\n def load_h5(self, path, test_ratio=0.1, num_init_examples=None):\n import json\n columns = [\"smiles\", \"dockscore\",\"blockidxs\", \"slices\", \"jbonds\", \"stems\"]\n store = pd.HDFStore(path, 'r')\n df = store.select('df')\n # Pandas has problem with calculating some stuff on float16\n df.dockscore = df.dockscore.astype(\"float64\")\n for cl_mame in columns[2:]:\n df.loc[:, cl_mame] = df[cl_mame].apply(json.loads)\n\n test_idxs = self.test_split_rng.choice(\n len(df), int(test_ratio * len(df)), replace=False)\n\n split_bool = np.zeros(len(df), dtype=np.bool)\n split_bool[test_idxs] = True\n self.scores = []\n self.smis = []\n for i in tqdm(range(len(df))):\n m = BlockMoleculeDataExtended()\n for c in range(1, len(columns)):\n setattr(m, columns[c], df.iloc[i, c - 1])\n m.blocks = [self.mdp.block_mols[i] for i in m.blockidxs]\n if len(m.blocks) > self.max_blocks:\n continue\n m.numblocks = len(m.blocks)\n m.score = self.oracle.get_score([m])\n self.scores.append(m.score)\n self.smis.append(m.smiles)\n self.all_mols.append(m)\n if split_bool[i]: \n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n if len(self.train_mols)+len(self.test_mols) >= num_init_examples:\n break\n store.close()\n\n print(\"Sampling initial {} molecules from all {} molecules...\".format(\n num_init_examples, len(split_bool)))\n print(len(self.train_mols), 'train mols')\n print(len(self.test_mols), 'test mols')\n\n def r2r(self, dockscore=None, normscore=None):\n if dockscore is not None:\n normscore = 4-(min(0, dockscore) -\n self.target_norm[0])/self.target_norm[1]\n normscore = max(0.1, normscore)\n return (normscore/1) ** 1\n\n def _get(self, i, dset):\n return [(dset[i], dset[i].score)]\n\n def sample(self, n):\n eidx = np.random.randint(0, len(self.train_mols), n)\n samples = sum((self._get(i, self.train_mols) for i in eidx), [])\n\n return zip(*samples)\n\n def sample2batch(self, mb):\n s, r = mb\n s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])\n r = torch.tensor(pd.DataFrame.from_dict(\n r).values, device=self._device).float()\n return (s, r)\n\n def iterset(self, n, mode):\n if mode == 'test':\n dset = self.test_mols\n elif mode == 'train':\n dset = self.train_mols\n\n N = len(dset)\n for i in range(int(np.ceil(N/n))):\n samples = sum((self._get(j, dset)\n for j in range(i*n, min(N, (i+1)*n))), [])\n yield self.sample2batch(zip(*samples))\n\n def add_samples(self, batch):\n picked_mols, scores, picked_smis = batch\n\n for m in picked_mols:\n if np.random.uniform() < (1/10):\n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n self.all_mols.append(m)\n \n self.scores += scores\n self.smis += [smis[-1] for smis in picked_smis]\n \n self.stop_event.clear()\n\n def compute_hypervolume(self):\n scores = torch.tensor(pd.DataFrame.from_dict(self.scores).values)\n volume = self.hypervolume.compute(scores)\n\n return volume\n \n def start_samplers(self, n, mbsize):\n self.ready_events = [threading.Event() for i in range(n)]\n self.resume_events = [threading.Event() for i in range(n)]\n self.results = [None] * n\n def f(idx):\n while not self.stop_event.is_set():\n try:\n self.results[idx] = self.sample2batch(self.sample(mbsize))\n except Exception as e:\n print(\"Exception while sampling:\")\n print(e)\n self.sampler_threads[idx].failed = True\n self.sampler_threads[idx].exception = e\n self.ready_events[idx].set()\n break\n self.ready_events[idx].set()\n self.resume_events[idx].clear()\n self.resume_events[idx].wait()\n self.sampler_threads = [threading.Thread(target=f, args=(i,)) for i in range(n)]\n [setattr(i, 'failed', False) for i in self.sampler_threads]\n [i.start() for i in self.sampler_threads]\n round_robin_idx = [0]\n def get():\n while True:\n idx = round_robin_idx[0]\n round_robin_idx[0] = (round_robin_idx[0] + 1) % n\n if self.ready_events[idx].is_set():\n r = self.results[idx]\n self.ready_events[idx].clear()\n self.resume_events[idx].set()\n return r\n elif round_robin_idx[0] == 0:\n time.sleep(0.001)\n return get\n\n def stop_samplers_and_join(self):\n self.stop_event.set()\n if hasattr(self, 'sampler_threads'):\n while any([i.is_alive() for i in self.sampler_threads]):\n [i.set() for i in self.resume_events]\n [i.join(0.05) for i in self.sampler_threads]" }, { "identifier": "MolMDPExtended", "path": "mol_mdp_ext.py", "snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)" }, { "identifier": "BlockMoleculeDataExtended", "path": "mol_mdp_ext.py", "snippet": "class BlockMoleculeDataExtended(BlockMoleculeData):\n\n @property\n def mol(self):\n return chem.mol_from_frag(jun_bonds=self.jbonds, frags=self.blocks)[0]\n\n @property\n def smiles(self):\n return Chem.MolToSmiles(self.mol)\n\n def copy(self): # shallow copy\n o = BlockMoleculeDataExtended()\n o.blockidxs = list(self.blockidxs)\n o.blocks = list(self.blocks)\n o.slices = list(self.slices)\n o.numblocks = self.numblocks\n o.jbonds = list(self.jbonds)\n o.stems = list(self.stems)\n return o\n\n def as_dict(self):\n return {'blockidxs': self.blockidxs,\n 'slices': self.slices,\n 'numblocks': self.numblocks,\n 'jbonds': self.jbonds,\n 'stems': self.stems}" }, { "identifier": "Oracle", "path": "oracle/oracle.py", "snippet": "class Oracle():\n def __init__(self, args, mols_ref=None):\n '''\n @params:\n args (dict): argsurations\n '''\n self.objectives = args.objectives\n self.fps_ref = [AllChem.GetMorganFingerprintAsBitVect(x, 3, 2048) \n for x in mols_ref] if mols_ref else None\n self.device = torch.device(args.device)\n\n def batch_get_scores(self, mols):\n '''\n @params:\n mols: molecules to estimate score\n @return:\n dicts (list): list of score dictionaries\n '''\n dicts = [{} for _ in mols]\n for obj in self.objectives:\n scores = get_scores(obj, mols, device=self.device)\n for i, mol in enumerate(mols):\n dicts[i][obj] = scores[i]\n return dicts\n \n def get_score(self, mol):\n scores = {}\n for obj in self.objectives:\n score = get_scores(obj, mol, device=self.device)\n scores[obj] = score[0]\n \n return scores" }, { "identifier": "get_proxy", "path": "proxy/proxy.py", "snippet": "def get_proxy(args, bpath, oracle):\n if args.acq_fn.lower() == 'none':\n return NoAF(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ucb':\n return UCB(args, bpath, oracle)\n \n elif args.acq_fn.lower() == 'ucb_chebyshev':\n return UCB_chebyshev(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ei':\n return EI(args, bpath, oracle)" }, { "identifier": "FMGFlowNet", "path": "generator/gfn.py", "snippet": "class FMGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n mdp = MolMDPExtended(bpath)\n mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n mdp.build_translation_table()\n self.model = make_model(args, mdp, is_proxy=False)\n self.opt = torch.optim.Adam(self.model.parameters(\n ), args.learning_rate, weight_decay=args.weight_decay)\n\n self.loginf = 1000 # to prevent nans\n self.log_reg_c = args.log_reg_c\n self.balanced_loss = args.balanced_loss\n self.do_nblocks_reg = False\n self.max_blocks = args.max_blocks\n self.leaf_coef = args.leaf_coef\n self.clip_grad = args.clip_grad\n # self.score_criterion = nn.MSELoss(reduction='none')\n self.score_criterion = nn.MSELoss()\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss, term_loss, flow_loss = self.FMLoss(p, pb, a, pw, w, r, s, d)\n\n self.opt.zero_grad()\n loss.backward()\n if self.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.clip_grad)\n self.opt.step()\n self.model.training_steps = i+1\n \n return (loss.item(), term_loss.item(), flow_loss.item())\n\n def FMLoss(self, p, pb, a, pw, w, r, s, d):\n # Since we sampled 'mbsize' trajectories, we're going to get\n # roughly mbsize * H (H is variable) transitions\n ntransitions = r.shape[0]\n # state outputs\n stem_out_s, mol_out_s = self.model(s, w) # log(F)\n # parents of the state outputs\n stem_out_p, mol_out_p = self.model(p, pw)\n # index parents by their corresponding actions\n qsa_p = self.model.index_output_by_action(\n p, stem_out_p, mol_out_p[:, 0], a)\n # then sum the parents' contribution, this is the inflow\n exp_inflow = (torch.zeros((ntransitions,), device=qsa_p.device, dtype=qsa_p.dtype)\n .index_add_(0, pb, torch.exp(qsa_p))) # pb is the parents' batch index\n inflow = torch.log(exp_inflow + self.log_reg_c)\n # sum the state's Q(s,a), this is the outflow\n exp_outflow = self.model.sum_output(s, torch.exp(\n stem_out_s), torch.exp(mol_out_s[:, 0]))\n # include reward and done multiplier, then take the log\n # we're guarenteed that r > 0 iff d = 1, so the log always works\n outflow_plus_r = torch.log(self.log_reg_c + r + exp_outflow * (1-d))\n if self.do_nblocks_reg:\n losses = _losses = ((inflow - outflow_plus_r) /\n (s.nblocks * self.max_blocks)).pow(2)\n else:\n losses = _losses = (inflow - outflow_plus_r).pow(2)\n\n term_loss = (losses * d).sum() / (d.sum() + 1e-20) # terminal nodes\n flow_loss = (losses * (1-d)).sum() / \\\n ((1-d).sum() + 1e-20) # non-terminal nodes\n \n if self.balanced_loss:\n loss = term_loss * self.leaf_coef + flow_loss\n else:\n loss = losses.mean()\n\n return loss, term_loss, flow_loss" }, { "identifier": "TBGFlowNet", "path": "generator/gfn.py", "snippet": "class TBGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n self.model = make_model(args, self.mdp, is_proxy=False)\n self.Z = nn.Sequential(nn.Linear(len(args.objectives), args.nemb//2), nn.LeakyReLU(),\n nn.Linear(args.nemb//2, 1))\n self.Z.to(args.device)\n self.opt = torch.optim.Adam(self.model.parameters(), args.learning_rate, weight_decay=args.weight_decay)\n self.opt_Z = torch.optim.Adam(self.Z.parameters(), args.Z_learning_rate, weight_decay=args.weight_decay)\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss = self.TBLoss(p, a, w, r, d, mols)\n self.opt.zero_grad()\n self.opt_Z.zero_grad()\n loss.backward()\n if self.args.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.args.clip_grad)\n self.opt.step()\n self.opt_Z.step()\n\n return (loss.item(),)\n\n @property\n def Z(self):\n return self.model.Z\n\n def TBLoss(self, p, a, w, r, d, mols):\n # logit\n stem_out_p, mol_out_p = self.model(p, w)\n # index parents by their corresponding actions\n logits = -self.model.action_negloglikelihood(\n p, a, stem_out_p, mol_out_p)\n\n b = torch.cat([torch.tensor([0], device=logits.device),\n torch.cumsum(d.long(), 0)[:-1]], dim=0)\n n = torch.tensor([len(self.mdp.parents(mol)) if a[idx, 0].item() != -1 else 1.\n for idx, mol in enumerate(mols[1])], device=logits.device)\n # n = torch.tensor([len(self.mdp.parents(mol)) for mol in mols[1]], device=logits.device)\n forward_ll = scatter(logits, b, reduce='sum')\n backward_ll = scatter(torch.log(1/n), b, reduce='sum')\n\n losses = ((self.Z(w[d==1.]) + forward_ll) - (torch.log(r[d == 1.]) + backward_ll)).pow(2) \n loss = losses.mean()\n\n return loss" }, { "identifier": "circle_points", "path": "utils/metrics.py", "snippet": "def circle_points(K, min_angle=None, max_angle=None):\n # generate evenly distributed preference vector\n ang0 = 1e-6 if min_angle is None else min_angle\n ang1 = np.pi / 2 - ang0 if max_angle is None else max_angle\n angles = np.linspace(ang0, ang1, K, endpoint=True)\n x = np.cos(angles)\n y = np.sin(angles)\n weights = np.c_[x, y]\n normalized_weights = weights/weights.sum(1, keepdims=True)\n\n return normalized_weights.astype(np.float32)" }, { "identifier": "compute_success", "path": "utils/metrics.py", "snippet": "def compute_success(mols, scores, objectives, score_succ):\n print(\"Computing successful rate...\")\n positive_mols = []\n success_dict = {k: 0. for k in objectives}\n\n for mol, score in zip(mols, scores):\n all_success = True\n for k, v in score.items():\n if v >= score_succ[k]:\n success_dict[k] += 1\n else:\n all_success = False\n if all_success:\n positive_mols.append(mol)\n\n success = 1.*len(positive_mols)/len(mols)\n\n return success, positive_mols" }, { "identifier": "compute_diversity", "path": "utils/metrics.py", "snippet": "def compute_diversity(mols):\n print(\"Computing diversity...\")\n\n if len(mols) == 0:\n return 0\n\n sims = []\n fps = [AllChem.GetMorganFingerprintAsBitVect(x.mol, 3, 2048) for x in mols]\n for i in range(len(fps)):\n sims += DataStructs.BulkTanimotoSimilarity(fps[i], fps[:i])\n\n return 1 - np.mean(sims)" }, { "identifier": "compute_novelty", "path": "utils/metrics.py", "snippet": "def compute_novelty(mols, ref_mols):\n print(\"Computing novelty...\")\n positive_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x.mol, 3, 2048) for x in mols]\n ref_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x, 3, 2048) for x in ref_mols]\n\n n_sim = 0.\n for i in range(len(positive_fps)):\n sims = DataStructs.BulkTanimotoSimilarity(positive_fps[i], ref_fps)\n if max(sims) >= 0.4:\n n_sim += 1\n novelty = 1. - 1. * n_sim / (len(positive_fps)+1e-6)\n\n return novelty" }, { "identifier": "evaluate", "path": "utils/metrics.py", "snippet": "def evaluate(args, generator, rollout_worker, k):\n time_start = time.time()\n print(f\"Sampling molecules and evaluating...\")\n test_weights = rollout_worker.test_weights\n picked_mols = []\n all_scores = []\n # top_scores = []\n top_scores = defaultdict(list)\n mean_scores = []\n hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives)))\n \n for weights in test_weights:\n sampled_mols = []\n rewards = []\n scores = []\n for i in range(args.num_samples):\n rollout_worker.rollout(\n generator, use_rand_policy=False, weights=weights.unsqueeze(0))\n (raw_r, _, m, _, _) = rollout_worker.sampled_mols[-1]\n sampled_mols.append(m)\n rewards.append(raw_r[0])\n scores.append(raw_r[1])\n\n idx_pick = np.argsort(rewards)[::-1][:k] \n picked_mols += np.array(sampled_mols)[idx_pick].tolist()\n top_rewards = np.array(rewards)[idx_pick]\n mean_scores.append(np.array(scores).mean(0))\n \n picked_scores = np.array(scores)[idx_pick]\n weight_specific_volume = hypervolume.compute(torch.tensor(picked_scores))\n print(f'Hypervolume w.r.t test weights {weights}: {weight_specific_volume}')\n \n for K in [10, 100]:\n scores_np = np.array(scores)\n top_scores_weight = [scores_np[np.argsort(scores_np[:,i])[::-1][:K], i].mean() for i in range(len(args.objectives))]\n top_scores[K].append(top_scores_weight)\n print(f'Top {K} scores w.r.t test weights {weights}: {top_scores_weight}')\n \n all_scores += scores\n print('Top_rewards: {}'.format(top_rewards.mean())) # Top-100 rewards\n \n volume = hypervolume.compute(torch.tensor(all_scores))\n diversity = compute_diversity(picked_mols) # Top-100\n\n print('Hypervolume: {}, Diversity: {}, Time: {}'.format(\n volume, diversity, time.time()-time_start))\n\n return volume, diversity" }, { "identifier": "compute_correlation", "path": "utils/metrics.py", "snippet": "def compute_correlation(args, model, rollout_worker, test_mols):\n\n mdp = rollout_worker.mdp\n device = args.device\n def tf(x): return torch.tensor(x, device=device).to(torch.float)\n def tint(x): return torch.tensor(x, device=device).long()\n\n # test_mols = pickle.load(gzip.open('data/some_mols_U_1k.pkl.gz'))\n logsoftmax = nn.LogSoftmax(0)\n corrs = []\n numblocks = []\n\n start_time = time.time()\n if args.n_objectives == 3:\n test_weights = rollout_worker.test_weights[::2]\n elif args.n_objectives == 4:\n test_weights = rollout_worker.test_weights[1:-2:4]\n else:\n test_weights = rollout_worker.test_weights\n \n for weights in test_weights:\n print(\"Computing correlation w.r.t test weights {}\".format(weights))\n weights = torch.tensor(weights).to(args.device)\n logp = []\n rewards = []\n for m in tqdm(test_mols):\n try:\n agraph = get_mol_path_graph(m, mdp)\n except:\n continue\n # rewards.append(np.log(moli[0][0]))\n reward = rollout_worker._get_reward(m, weights)[0].item()\n rewards.append(np.log(reward))\n s = mdp.mols2batch([mdp.mol2repr(agraph.nodes[i]['mol'])\n for i in agraph.nodes])\n numblocks.append(len(m.blocks))\n with torch.no_grad():\n # get the mols_out_s for ALL molecules not just the end one.\n if args.condition_type == 'Hyper_scorepred':\n stem_out_s, mol_out_s, _ = model(\n s, weights.repeat(s.num_graphs, 1))\n else:\n stem_out_s, mol_out_s = model(\n s, weights.repeat(s.num_graphs, 1))\n per_mol_out = []\n # Compute pi(a|s)\n for j in range(len(agraph.nodes)):\n a, b = s._slice_dict['stems'][j:j+2]\n\n stop_allowed = len(\n agraph.nodes[j]['mol'].blocks) >= args.min_blocks\n mp = logsoftmax(torch.cat([\n stem_out_s[a:b].reshape(-1),\n # If num_blocks < min_blocks, the model is not allowed to stop\n mol_out_s[j, :1] if stop_allowed else tf([-1000])]))\n per_mol_out.append(\n (mp[:-1].reshape((-1, stem_out_s.shape[1])), mp[-1]))\n\n # When the model reaches 8 blocks, it is stopped automatically. If instead it stops before\n # that, we need to take into account the STOP action's logprob\n if len(m.blocks) < 8:\n if args.condition_type == 'Hyper_scorepred':\n stem_out_last, mol_out_last, _ = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0))\n else:\n stem_out_last, mol_out_last = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0)) \n mplast = logsoftmax(\n torch.cat([stem_out_last.reshape(-1), mol_out_last[0, :1]]))\n MSTOP = mplast[-1]\n\n # assign logprob to edges\n for u, v in agraph.edges:\n a = agraph.edges[u, v]['action']\n if a[0] == -1:\n agraph.edges[u, v]['logprob'] = per_mol_out[v][1]\n else:\n agraph.edges[u,\n v]['logprob'] = per_mol_out[v][0][a[1], a[0]]\n\n # propagate logprobs through the graph\n for n in list(nx.topological_sort(agraph))[::-1]:\n for c in agraph.predecessors(n):\n if len(m.blocks) < 8 and c == 0:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0) + MSTOP)\n else:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0))\n\n # add the first item\n # logp.append((moli, agraph.nodes[n]['logprob'].item()))\n logp.append(agraph.nodes[n]['logprob'].item())\n corrs.append(stats.spearmanr(rewards, logp).correlation)\n\n print('Spearmanr: {}, mean: {}, Time: {}'.format(corrs, np.mean(corrs), time.time()-start_time))\n return corrs" }, { "identifier": "set_random_seed", "path": "utils/utils.py", "snippet": "def set_random_seed(seed, deterministic=True):\n \"\"\"Set random seed.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "get_logger", "path": "utils/logging.py", "snippet": "def get_logger(args):\n if args.enable_tensorboard:\n return TensorboardLogger(args)\n else:\n return Logger(args)" } ]
from curses import raw from dataset import Dataset from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended from oracle.oracle import Oracle from proxy import get_proxy from generator import TBGFlowNet, FMGFlowNet from utils.metrics import circle_points, compute_success, compute_diversity, compute_novelty, evaluate, compute_correlation from utils.utils import set_random_seed from utils.logging import get_logger from datetime import datetime from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.utils.sampling import sample_simplex from botorch.utils.transforms import normalize, unnormalize from torch.distributions.dirichlet import Dirichlet from rdkit.Chem import AllChem from rdkit import DataStructs from pymoo.util.ref_dirs import get_reference_directions import os import argparse import json import time import threading import pdb import pickle import gzip import warnings import torch.multiprocessing as mp import torch.nn.functional as F import torch import pandas as pd import numpy as np
12,247
args.logger.add_scalar( 'Loss/train', train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, diversity = evaluate(args, generator, rollout_worker, 100) corrs = compute_correlation(args, generator, rollout_worker, rollout_worker.test_mols) args.logger.add_scalar( 'Top-100-sampled/volumes', volume, use_context=False) args.logger.add_scalar( 'Top-100-sampled/dists', diversity, use_context=False) args.logger.add_scalar( 'Top-100-sampled/corr', np.mean(corrs), use_context=False) if do_save: save_stuff(i) if volume > best_hv: best_hv = volume if do_save: save_stuff('volume') if np.mean(corrs) > best_corr: best_corr = np.mean(corrs) if do_save: save_stuff('corr') stop_everything() if do_save: save_stuff(i) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def get_test_mols(args, mdp, num): samples = [] fps = [] early_stops = [] while len(samples) < num: if len(samples) % 5000 == 0: print(f'{len(samples)}/{num} mols have been sampled') m = BlockMoleculeDataExtended() min_blocks = args.min_blocks max_blocks = args.max_blocks early_stop_at = np.random.randint(min_blocks, max_blocks + 1) early_stops.append(early_stop_at) for t in range(max_blocks): if t == 0: length = mdp.num_blocks+1 else: length = len(m.stems)*mdp.num_blocks+1 action = np.random.randint(1, length) if t == early_stop_at: action = 0 if t >= min_blocks and action == 0: fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break else: action = max(0, action-1) action = (action % mdp.num_blocks, action // mdp.num_blocks) #print('..', action) m = mdp.add_block_to(m, *action) if len(m.blocks) and not len(m.stems) or t == max_blocks - 1: # can't add anything more to this mol so let's make it # terminal. Note that this node's parent isn't just m, # because this is a sink for all parent transitions fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break return samples def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" # Initialization: oracle and dataset oracle = Oracle(args) args.n_objectives = len(args.objectives) if args.n_objectives == 2: test_weights = circle_points(K=5, min_angle=0.1, max_angle=np.pi/2-0.1) else: test_weights = get_test_rays() if args.criterion == 'TB': generator = TBGFlowNet(args, bpath) elif args.criterion == 'FM':
warnings.filterwarnings('ignore') def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--device", type=str, default='cuda') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument("--run", default=0, help="run", type=int) parser.add_argument('--save', action='store_true', default=False, help='Save model.') parser.add_argument('--debug', action='store_true', default=False, help='debug mode, no multi thread') parser.add_argument("--enable_tensorboard", action='store_true', default=False) parser.add_argument("--log_dir", default='runs/synthetic') parser.add_argument("--include_nblocks", default=False) parser.add_argument("--num_samples", default=1000, type=int) parser.add_argument("--floatX", default='float32') parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics') # objectives parser.add_argument("--objectives", type=str, default='gsk3b,jnk3') parser.add_argument("--scalar", default='WeightedSum', type=str) #TODO: other scalars parser.add_argument("--alpha", default=1., type=float, help='dirichlet distribution') parser.add_argument("--alpha_vector", default='1,1', type=str) # GFlowNet parser.add_argument("--min_blocks", default=2, type=int) parser.add_argument("--max_blocks", default=8, type=int) parser.add_argument("--num_iterations", default=30000, type=int) # 30k parser.add_argument("--criterion", default="FM", type=str) parser.add_argument("--learning_rate", default=5e-4, help="Learning rate", type=float) parser.add_argument("--Z_learning_rate", default=5e-3, help="Learning rate", type=float) parser.add_argument("--clip_grad", default=0, type=float) parser.add_argument("--trajectories_mbsize", default=16, type=int) parser.add_argument("--offline_mbsize", default=0, type=int) parser.add_argument("--hindsight_mbsize", default=0, type=int) parser.add_argument("--reward_min", default=1e-2, type=float) parser.add_argument("--reward_norm", default=0.8, type=float) parser.add_argument("--reward_exp", default=6, type=float) parser.add_argument("--reward_exp_ramping", default=0, type=float) # Hyperparameters for TB parser.add_argument("--partition_init", default=30, type=float) # Hyperparameters for FM parser.add_argument("--log_reg_c", default=(0.1/8) ** 4, type=float) # (0.1/8)**8 parser.add_argument("--balanced_loss", default=True) parser.add_argument("--leaf_coef", default=10, type=float) # Architecture parser.add_argument("--repr_type", default='block_graph') parser.add_argument("--model_version", default='v4') parser.add_argument("--condition_type", default='HN', type=str) # 'HN', 'FiLM', 'concat' parser.add_argument("--num_conv_steps", default=10, type=int) parser.add_argument("--nemb", default=256, help="#hidden", type=int) parser.add_argument("--weight_decay", default=0, type=float) parser.add_argument("--random_action_prob", default=0.05, type=float) parser.add_argument("--bootstrap_tau", default=0, type=float) parser.add_argument("--ray_hidden_dim", default=100, type=int) parser.add_argument("--logit_clipping", default=0., type=float) return parser.parse_args() class RolloutWorker: def __init__(self, args, bpath, proxy, device): self.args = args self.test_split_rng = np.random.RandomState(142857) self.train_rng = np.random.RandomState(int(time.time())) self.mdp = MolMDPExtended(bpath) self.mdp.post_init(device, args.repr_type, include_nblocks=args.include_nblocks) self.mdp.build_translation_table() if args.floatX == 'float64': self.mdp.floatX = self.floatX = torch.double else: self.mdp.floatX = self.floatX = torch.float self.proxy = proxy self._device = device self.seen_molecules = set() self.stop_event = threading.Event() ####### # This is the "result", here a list of (reward, BlockMolDataExt, info...) tuples self.sampled_mols = [] self.online_mols = [] self.hindsight_mols = [] self.max_online_mols = 1000 self.max_hindsight_mols = 1000 self.min_blocks = args.min_blocks self.max_blocks = args.max_blocks self.mdp._cue_max_blocks = self.max_blocks self.reward_exp = args.reward_exp self.reward_min = args.reward_min self.reward_norm = args.reward_norm self.reward_exp_ramping = args.reward_exp_ramping self.random_action_prob = args.random_action_prob # If True this basically implements Buesing et al's TreeSample Q, # samples uniformly from it though, no MTCS involved if args.criterion == 'TB' or args.criterion == "Reinforce": self.ignore_parents = True elif args.criterion == 'FM': self.ignore_parents = False def rollout(self, generator, use_rand_policy=True, weights=None, replay=False): weights = Dirichlet(torch.ones(len(self.args.objectives))*self.args.alpha).sample_n(1).to( self.args.device) if weights is None else weights m = BlockMoleculeDataExtended() samples = [] max_blocks = self.max_blocks trajectory_stats = [] for t in range(max_blocks): s = self.mdp.mols2batch([self.mdp.mol2repr(m)]) s_o, m_o = generator(s, vec_data=weights, do_stems=True) # fix from run 330 onwards if t < self.min_blocks: m_o = m_o*0 - 1000 # prevent assigning prob to stop # when we can't stop ## logits = torch.cat([m_o.reshape(-1), s_o.reshape(-1)]) cat = torch.distributions.Categorical( logits=logits) action = cat.sample().item() if use_rand_policy and self.random_action_prob > 0: # just for training if self.train_rng.uniform() < self.random_action_prob: action = self.train_rng.randint( int(t < self.min_blocks), logits.shape[0]) q = torch.cat([m_o.reshape(-1), s_o.reshape(-1)]) trajectory_stats.append( (q[action].item(), action, torch.logsumexp(q, 0).item())) if t >= self.min_blocks and action == 0: r, raw_r = self._get_reward(m, weights) # r: reward, raw_r: scores for the objectives samples.append(((m,), ((-1, 0),), weights, weights, r, m, 1)) break else: action = max(0, action-1) action = (action % self.mdp.num_blocks, action // self.mdp.num_blocks) m_old = m m = self.mdp.add_block_to(m, *action) if len(m.blocks) and not len(m.stems) or t == max_blocks - 1: # can't add anything more to this mol so let's make it # terminal. Note that this node's parent isn't just m, # because this is a sink for all parent transitions r, raw_r = self._get_reward(m, weights) if self.ignore_parents: samples.append( ((m_old,), (action,), weights, weights, r, m, 1)) else: parents, actions = zip(*self.mdp.parents(m)) samples.append((parents, actions, weights.repeat( len(parents), 1), weights, r, m, 1)) break else: if self.ignore_parents: samples.append( ((m_old,), (action,), weights, weights, 0, m, 0)) else: parents, actions = zip(*self.mdp.parents(m)) samples.append( (parents, actions, weights.repeat(len(parents), 1), weights, 0, m, 0)) p = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in samples[-1][0]]) qp = generator(p, weights.repeat(p.num_graphs, 1)) qsa_p = generator.model.index_output_by_action( p, qp[0], qp[1][:, 0], torch.tensor(samples[-1][1], device=self._device).long()) inflow = torch.logsumexp(qsa_p.flatten(), 0).item() self.sampled_mols.append( ([i.cpu().numpy() for i in raw_r], weights.cpu().numpy(), m, trajectory_stats, inflow)) if replay and self.args.hindsight_prob > 0.0: self._add_mol_to_replay(m) return samples def _get_reward(self, m, weights=None): rdmol = m.mol if rdmol is None: return self.reward_min # get scores from oracle score = self.proxy.get_score([m]) score = torch.tensor(list(score.values())).to(self.args.device) if self.args.scalar == 'WeightedSum': raw_reward = (weights*score).sum() elif self.args.scalar == 'Tchebycheff': raw_reward = (weights*score).min() + 0.1 * (weights*score).sum() reward = self.l2r(raw_reward.clip(self.reward_min)) return reward, (raw_reward, score) def execute_train_episode_batch(self, generator, dataset=None, use_rand_policy=True): if self.args.condition_type is None: weights = self.test_weights # train specific model else: weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better samples = sum((self.rollout(generator, use_rand_policy, weights) for i in range(self.args.trajectories_mbsize)), []) return zip(*samples) def sample2batch(self, mb): p, a, p_weights, weights, r, s, d, *o = mb mols = (p, s) # The batch index of each parent p_batch = torch.tensor(sum([[i]*len(p) for i, p in enumerate(p)], []), device=self._device).long() # Convert all parents and states to repr. Note that this # concatenates all the parent lists, which is why we need # p_batch p = self.mdp.mols2batch(list(map(self.mdp.mol2repr, sum(p, ())))) s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s]) # Concatenate all the actions (one per parent per sample) a = torch.tensor(sum(a, ()), device=self._device).long() # rewards and dones r = torch.tensor(r, device=self._device).to(self.floatX) d = torch.tensor(d, device=self._device).to(self.floatX) # weights p_w = torch.cat(p_weights, 0) w = torch.cat(weights, 0) return (p, p_batch, a, p_w, w, r, s, d, mols, *o) def l2r(self, raw_reward, t=0): if self.reward_exp_ramping > 0: reward_exp = 1 + (self.reward_exp - 1) * \ (1 - 1/(1 + t / self.reward_exp_ramping)) # when t=0, exp = 1; t->∞, exp = self.reward_exp else: reward_exp = self.reward_exp reward = (raw_reward/self.reward_norm)**reward_exp return reward def start_samplers(self, generator, n, dataset): self.ready_events = [threading.Event() for i in range(n)] self.resume_events = [threading.Event() for i in range(n)] self.results = [None] * n def f(idx): while not self.stop_event.is_set(): try: self.results[idx] = self.sample2batch( self.execute_train_episode_batch(generator, dataset, use_rand_policy=True)) except Exception as e: print("Exception while sampling:") print(e) self.sampler_threads[idx].failed = True self.sampler_threads[idx].exception = e self.ready_events[idx].set() break self.ready_events[idx].set() self.resume_events[idx].clear() self.resume_events[idx].wait() self.sampler_threads = [threading.Thread( target=f, args=(i,)) for i in range(n)] [setattr(i, 'failed', False) for i in self.sampler_threads] [i.start() for i in self.sampler_threads] round_robin_idx = [0] def get(): while True: idx = round_robin_idx[0] round_robin_idx[0] = (round_robin_idx[0] + 1) % n if self.ready_events[idx].is_set(): r = self.results[idx] self.ready_events[idx].clear() self.resume_events[idx].set() return r elif round_robin_idx[0] == 0: time.sleep(0.001) return get def stop_samplers_and_join(self): self.stop_event.set() if hasattr(self, 'sampler_threads'): while any([i.is_alive() for i in self.sampler_threads]): [i.set() for i in self.resume_events] [i.join(0.05) for i in self.sampler_threads] def train_generative_model_with_oracle(args, generator, bpath, oracle, test_weights, dataset=None, do_save=False): print("Training generator...") device = args.device rollout_worker = RolloutWorker(args, bpath, oracle, device) if args.condition_type is None: rollout_worker.test_weights = torch.tensor(test_weights).to(device)[args.run :args.run+1] else: rollout_worker.test_weights = torch.tensor(test_weights).to(device) rollout_worker.test_mols = pickle.load(gzip.open('./data/test_mols_6062.pkl.gz', 'rb')) def save_stuff(iter): torch.save(generator.state_dict(), os.path.join( args.log_dir, '{}_generator_checkpoint.pth'.format(iter))) pickle.dump(rollout_worker.sampled_mols, gzip.open(f'{args.log_dir}/sampled_mols.pkl.gz', 'wb')) multi_thread = not args.debug if multi_thread: sampler = rollout_worker.start_samplers(generator, 8, dataset) def stop_everything(): print('joining') rollout_worker.stop_samplers_and_join() last_losses = [] train_losses = [] test_losses = [] test_infos = [] train_infos = [] best_hv = 0 best_corr = 0 time_last_check = time.time() for i in range(args.num_iterations + 1): rollout_worker.reward_exp = 1 + (args.reward_exp-1) * (1-1/(1+i/20)) if multi_thread: r = sampler() for thread in rollout_worker.sampler_threads: if thread.failed: stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(loss), 3) for loss in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/train', train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, diversity = evaluate(args, generator, rollout_worker, 100) corrs = compute_correlation(args, generator, rollout_worker, rollout_worker.test_mols) args.logger.add_scalar( 'Top-100-sampled/volumes', volume, use_context=False) args.logger.add_scalar( 'Top-100-sampled/dists', diversity, use_context=False) args.logger.add_scalar( 'Top-100-sampled/corr', np.mean(corrs), use_context=False) if do_save: save_stuff(i) if volume > best_hv: best_hv = volume if do_save: save_stuff('volume') if np.mean(corrs) > best_corr: best_corr = np.mean(corrs) if do_save: save_stuff('corr') stop_everything() if do_save: save_stuff(i) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def get_test_mols(args, mdp, num): samples = [] fps = [] early_stops = [] while len(samples) < num: if len(samples) % 5000 == 0: print(f'{len(samples)}/{num} mols have been sampled') m = BlockMoleculeDataExtended() min_blocks = args.min_blocks max_blocks = args.max_blocks early_stop_at = np.random.randint(min_blocks, max_blocks + 1) early_stops.append(early_stop_at) for t in range(max_blocks): if t == 0: length = mdp.num_blocks+1 else: length = len(m.stems)*mdp.num_blocks+1 action = np.random.randint(1, length) if t == early_stop_at: action = 0 if t >= min_blocks and action == 0: fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break else: action = max(0, action-1) action = (action % mdp.num_blocks, action // mdp.num_blocks) #print('..', action) m = mdp.add_block_to(m, *action) if len(m.blocks) and not len(m.stems) or t == max_blocks - 1: # can't add anything more to this mol so let's make it # terminal. Note that this node's parent isn't just m, # because this is a sink for all parent transitions fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break return samples def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" # Initialization: oracle and dataset oracle = Oracle(args) args.n_objectives = len(args.objectives) if args.n_objectives == 2: test_weights = circle_points(K=5, min_angle=0.1, max_angle=np.pi/2-0.1) else: test_weights = get_test_rays() if args.criterion == 'TB': generator = TBGFlowNet(args, bpath) elif args.criterion == 'FM':
generator = FMGFlowNet(args, bpath)
5
2023-10-24 14:10:35+00:00
16k
SALT-NLP/Efficient_Unlearning
src/models/transformers/parameter-efficient-finetuning/heads/base.py
[ { "identifier": "ImageClassifierOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class ImageClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of image classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states\n (also called feature maps) of the model at the output of each stage.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, patch_size,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "MultipleChoiceModelOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class MultipleChoiceModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of multiple choice models.\n\n Args:\n loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided):\n Classification loss.\n logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`):\n *num_choices* is the second dimension of the input tensors. (see *input_ids* above).\n\n Classification scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "QuestionAnsweringModelOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class QuestionAnsweringModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of question answering models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Span-start scores (before SoftMax).\n end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Span-end scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n start_logits: torch.FloatTensor = None\n end_logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "Seq2SeqModelOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class Seq2SeqModelOutput(ModelOutput):\n \"\"\"\n Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential\n decoding.\n\n Args:\n last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the decoder of the model.\n\n If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,\n hidden_size)` is output.\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n \"\"\"\n\n last_hidden_state: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "Seq2SeqQuestionAnsweringModelOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class Seq2SeqQuestionAnsweringModelOutput(ModelOutput):\n \"\"\"\n Base class for outputs of sequence-to-sequence question answering models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.\n start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Span-start scores (before SoftMax).\n end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Span-end scores (before SoftMax).\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n start_logits: torch.FloatTensor = None\n end_logits: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "Seq2SeqSequenceClassifierOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class Seq2SeqSequenceClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of sequence-to-sequence sentence classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention\n blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.\n decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.\n decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the\n weighted average in the cross-attention heads.\n encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Sequence of hidden-states at the output of the last layer of the encoder of the model.\n encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.\n encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the\n self-attention heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "SequenceClassifierOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class SequenceClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of sentence classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):\n Classification (or regression if config.num_labels==1) loss.\n logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):\n Classification (or regression if config.num_labels==1) scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "TokenClassifierOutput", "path": "src/models/transformers/modeling_outputs.py", "snippet": "class TokenClassifierOutput(ModelOutput):\n \"\"\"\n Base class for outputs of token classification models.\n\n Args:\n loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) :\n Classification loss.\n logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`):\n Classification scores (before SoftMax).\n hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):\n Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +\n one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.\n attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):\n Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,\n sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n \"\"\"\n\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None" }, { "identifier": "ModelOutput", "path": "src/models/transformers/utils/generic.py", "snippet": "class ModelOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple\n before.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n if not all(field.default is None for field in class_fields[1:]):\n raise ValueError(f\"{self.__class__.__name__} should not have more than one required field.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and not is_tensor(first_field):\n if isinstance(first_field, dict):\n iterator = first_field.items()\n first_field_iterator = True\n else:\n try:\n iterator = iter(first_field)\n first_field_iterator = True\n except TypeError:\n first_field_iterator = False\n\n # if we provided an iterator as first field and the iterator is a (key, value) iterator\n # set the associated fields\n if first_field_iterator:\n for element in iterator:\n if (\n not isinstance(element, (list, tuple))\n or not len(element) == 2\n or not isinstance(element[0], str)\n ):\n break\n setattr(self, element[0], element[1])\n if element[1] is not None:\n self[element[0]] = element[1]\n elif first_field is not None:\n self[class_fields[0].name] = first_field\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = {k: v for (k, v) in self.items()}\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" }, { "identifier": "AdapterCompositionBlock", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class AdapterCompositionBlock(Sequence):\n def __init__(self, *children):\n self.children = [parse_composition(b, None) for b in children]\n\n def __getitem__(self, key):\n return self.children[key]\n\n def __len__(self):\n return len(self.children)\n\n def __eq__(self, o: object) -> bool:\n if isinstance(o, type(self)):\n return all([c1 == c2 for c1, c2 in zip(self.children, o.children)])\n else:\n return False\n\n def __repr__(self):\n child_repr = \", \".join(map(str, self.children))\n return f\"{self.__class__.__name__}[{child_repr}]\"\n\n def first(self):\n if not isinstance(self.children[0], AdapterCompositionBlock):\n return self.children[0]\n else:\n return self.children[0].first()\n\n def last(self):\n if not isinstance(self.children[-1], AdapterCompositionBlock):\n return self.children[-1]\n else:\n return self.children[-1].last()\n\n @property\n def parallel_channels(self):\n return max([b.parallel_channels if isinstance(b, AdapterCompositionBlock) else 1 for b in self.children])\n\n def flatten(self) -> Set[str]:\n return set(itertools.chain(*[[b] if isinstance(b, str) else b.flatten() for b in self.children]))" }, { "identifier": "BatchSplit", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class BatchSplit(AdapterCompositionBlock):\n def __init__(self, *split_adapters: List[Union[AdapterCompositionBlock, str]], batch_sizes: Union[List[int], int]):\n super().__init__(*split_adapters)\n self.batch_sizes = batch_sizes if isinstance(batch_sizes, list) else [batch_sizes] * len(split_adapters)" }, { "identifier": "Parallel", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "class Parallel(AdapterCompositionBlock):\n def __init__(self, *parallel_adapters: List[str]):\n \"\"\"\n Can be used to perform inference for multiple tasks (i.e., adapters) in parallel (for the same input).\n\n See AdapterDrop https://arxiv.org/abs/2010.11918\n \"\"\"\n super().__init__(*parallel_adapters)\n\n @property\n def parallel_channels(self):\n return len(self.children)" }, { "identifier": "parse_heads_from_composition", "path": "src/models/transformers/parameter-efficient-finetuning/composition.py", "snippet": "def parse_heads_from_composition(adapter_composition, reference_heads: list = None):\n \"\"\"\n Parses a potential head configuration from a setup of adapters.\n\n Args:\n adapter_composition: The adapter setup to be parsed.\n reference_heads: The list of available to validate the retrieved head configuration against.\n \"\"\"\n final_block = adapter_composition\n if isinstance(final_block, Stack):\n final_block = final_block.children[-1]\n\n if isinstance(final_block, str) and (reference_heads is None or final_block in reference_heads):\n return final_block\n elif isinstance(final_block, Parallel):\n return [a if isinstance(a, str) else a.last() for a in final_block.children]\n elif isinstance(final_block, BatchSplit):\n # Convert BatchSplit of adapters to a BatchSplit of heads.\n blocks = [block.last() if isinstance(block, AdapterCompositionBlock) else block for block in final_block]\n head_setup = BatchSplit(*blocks, batch_sizes=final_block.batch_sizes)\n if reference_heads is None or all(head in reference_heads for head in head_setup):\n return head_setup\n else:\n raise ValueError(\n \"Missing at least one head for the given BatchSplit setup. Expected heads: {}\".format(blocks)\n )\n else:\n return None" }, { "identifier": "AdapterSetup", "path": "src/models/transformers/parameter-efficient-finetuning/context.py", "snippet": "class AdapterSetup:\n \"\"\"\n Represents an adapter setup of a model including active adapters and active heads. This class is intended to be\n used as a context manager using the ``with`` statement. The setup defined by the ``AdapterSetup`` context will\n override static adapter setups defined in a model (i.e. setups specified via ``active_adapters``).\n\n Example::\n\n with AdapterSetup(Stack(\"a\", \"b\")):\n # will use the adapter stack \"a\" and \"b\" outputs = model(**inputs)\n\n Note that the context manager is thread-local, i.e. it can be used with different setups in a multi-threaded\n environment.\n \"\"\"\n\n # thread-local storage that holds a stack of active contexts\n storage = threading.local()\n\n def __init__(self, adapter_setup, head_setup=None, ignore_empty: bool = False):\n self.adapter_setup = parse_composition(adapter_setup)\n if head_setup:\n self.head_setup = head_setup\n else:\n self.head_setup = parse_heads_from_composition(self.adapter_setup)\n self._empty = ignore_empty and self.adapter_setup is None and self.head_setup is None\n\n def __enter__(self):\n if not self._empty:\n AdapterSetup.get_contexts().append(self)\n return self\n\n def __exit__(self, type, value, traceback):\n if not self._empty:\n AdapterSetup.get_contexts().pop()\n\n @classmethod\n def get_contexts(cls):\n if not hasattr(cls.storage, \"contexts\"):\n cls.storage.contexts = []\n return cls.storage.contexts\n\n @classmethod\n def get_context(cls):\n try:\n return cls.get_contexts()[-1]\n except IndexError:\n return None\n\n @classmethod\n def get_context_adapter_setup(cls):\n context = cls.get_context()\n if context:\n return context.adapter_setup\n return None\n\n @classmethod\n def get_context_head_setup(cls):\n context = cls.get_context()\n if context:\n return context.head_setup\n return None" }, { "identifier": "ForwardContext", "path": "src/models/transformers/parameter-efficient-finetuning/context.py", "snippet": "class ForwardContext:\n \"\"\"\n Holds context information during a forward pass through a model. This class should be used via the\n ``ForwardContext.wrap()`` method.\n\n Note that the context is thread-local.\n \"\"\"\n\n # thread-local storage that holds a stack of active contexts\n storage = threading.local()\n\n context_attributes = [\"adapter_gating_scores\", \"adapter_fusion_attentions\", \"adapter_input_parallelized\"]\n\n def __init__(self, model, *args, **kwargs):\n # If the model has a method ``forward_context()``, use it to create the context.\n if hasattr(model, \"forward_context\"):\n model.forward_context(self, *args, **kwargs)\n\n def __enter__(self):\n ForwardContext.get_contexts().append(self)\n return self\n\n def __exit__(self, type, value, traceback):\n ForwardContext.get_contexts().pop()\n\n @classmethod\n def wrap(cls, f):\n \"\"\"\n Decorator method that wraps a ``forward()`` function of a model class.\n \"\"\"\n\n @functools.wraps(f)\n def wrapper_func(self, *args, **kwargs):\n if self.config.adapters is not None:\n with cls(self, *args, **kwargs) as ctx:\n kwargs = {\n k: v for k, v in kwargs.items() if k.replace(\"output_\", \"\") not in cls.context_attributes\n }\n results = f(self, *args, **kwargs)\n\n # append output attributes\n if isinstance(results, tuple):\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results = results + (dict(getattr(ctx, attr)),)\n else:\n for attr in cls.context_attributes:\n if getattr(ctx, \"output_\" + attr, False):\n results[attr] = dict(getattr(ctx, attr))\n return results\n else:\n return f(self, *args, **kwargs)\n\n return wrapper_func\n\n @classmethod\n def get_contexts(cls):\n if not hasattr(cls.storage, \"contexts\"):\n cls.storage.contexts = []\n return cls.storage.contexts\n\n @classmethod\n def get_context(cls):\n try:\n return cls.get_contexts()[-1]\n except IndexError:\n return None" }, { "identifier": "ModelWithHeadsAdaptersMixin", "path": "src/models/transformers/parameter-efficient-finetuning/model_mixin.py", "snippet": "class ModelWithHeadsAdaptersMixin(ModelAdaptersMixin):\n \"\"\"\n Mixin adding support for loading/ saving adapters to transformer models with head(s).\n \"\"\"\n\n def __init__(self, config, *args, **kwargs):\n super().__init__(config, *args, **kwargs)\n self._convert_to_flex_head = False\n\n def iter_layers(self) -> Iterable[Tuple[int, nn.Module]]:\n \"\"\"\n Iterates over all layers of the model.\n \"\"\"\n if self.base_model is self:\n return super().iter_layers()\n else:\n return self.base_model.iter_layers()\n\n def add_adapter(self, adapter_name: str, config=None, overwrite_ok: bool = False, set_active: bool = False):\n \"\"\"\n Adds a new adapter module of the specified type to the model.\n\n Args:\n adapter_name (str): The name of the adapter module to be added.\n config (str or dict, optional): The adapter configuration, can be either:\n\n - the string identifier of a pre-defined configuration dictionary\n - a configuration dictionary specifying the full config\n - if not given, the default configuration for this adapter type will be used\n overwrite_ok (bool, optional):\n Overwrite an adapter with the same name if it exists. By default (False), an exception is thrown.\n set_active (bool, optional):\n Set the adapter to be the active one. By default (False), the adapter is added but not activated.\n\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n super().add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n else:\n self.base_model.add_adapter(adapter_name, config, overwrite_ok=overwrite_ok, set_active=set_active)\n\n def train_adapter(self, adapter_setup: Union[list, AdapterCompositionBlock], train_embeddings=False):\n \"\"\"\n Sets the model into mode for training the given adapters. If self.base_model is self, must inherit from a class\n that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter(adapter_setup, train_embeddings)\n else:\n self.base_model.train_adapter(adapter_setup, train_embeddings)\n\n def train_adapter_fusion(self, adapter_setup: Union[list, AdapterCompositionBlock], unfreeze_adapters=False):\n \"\"\"\n Sets the model into mode for training of adapter fusion determined by a list of adapter names. If\n self.base_model is self, must inherit from a class that implements this method, to preclude infinite recursion\n \"\"\"\n if self.base_model is self:\n super().train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n else:\n self.base_model.train_adapter_fusion(adapter_setup, unfreeze_adapters=unfreeze_adapters)\n\n def save_head(self, save_directory: str, head_name: str = None):\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, name=head_name)\n\n def load_head(self, save_directory, load_as=None, id2label=None, **kwargs):\n loader = PredictionHeadLoader(self, convert_to_flex_head=self._convert_to_flex_head)\n return loader.load(save_directory, load_as=load_as, id2label=id2label, **kwargs)\n\n def save_adapter(\n self,\n save_directory: str,\n adapter_name: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().save_adapter(\n save_directory,\n adapter_name,\n meta_dict=meta_dict,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def load_adapter(\n self,\n adapter_name_or_path: str,\n config: Union[dict, str] = None,\n version: str = None,\n model_name: str = None,\n load_as: str = None,\n source: str = None,\n with_head: bool = True,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n leave_out: Optional[List[int]] = None,\n id2label=None,\n set_active: bool = False,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(\n PredictionHeadLoader(\n self,\n error_on_missing=False,\n convert_to_flex_head=self._convert_to_flex_head,\n )\n )\n # Support passing a num_labels for compatibility reasons. Convert to label map here.\n num_labels = kwargs.pop(\"num_labels\", None)\n if num_labels is not None:\n id2label = {i: \"LABEL_\" + str(i) for i in range(num_labels)}\n return super().load_adapter(\n adapter_name_or_path,\n config=config,\n version=version,\n model_name=model_name,\n load_as=load_as,\n source=source,\n custom_weights_loaders=custom_weights_loaders,\n leave_out=leave_out,\n id2label=id2label,\n set_active=set_active,\n **kwargs,\n )\n\n def save_all_adapters(\n self,\n save_directory: str,\n with_head: bool = True,\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n ):\n os.makedirs(save_directory, exist_ok=True)\n for name in self.config.adapters:\n adapter_config = self.config.adapters.get(name)\n h = get_adapter_config_hash(adapter_config)\n save_path = join(save_directory, name)\n if meta_dict:\n meta_dict.update({\"config_id\": h})\n else:\n meta_dict = {\"config_id\": h}\n self.save_adapter(\n save_path,\n name,\n meta_dict=meta_dict,\n with_head=with_head,\n custom_weights_loaders=custom_weights_loaders,\n )\n\n def save_adapter_fusion(\n self,\n save_directory: str,\n adapter_names: Union[Fuse, list, str],\n meta_dict: dict = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n with_head: Union[bool, str] = False,\n ):\n \"\"\"\n Saves an AdapterFusion layer and its configuration file to a directory so that it can be shared or reloaded\n using `load_adapter_fusion()`.\n\n Args:\n save_directory (str): Path to a directory where the AdapterFusion should be saved.\n adapter_names (Union[Fuse, list, str]): AdapterFusion to be saved.\n with_head (Union[bool, str]):\n If True, will save a head with the same name as the AdapterFusionLayer. If a string, this will be used\n as the name of the head to be saved.\n\n Raises:\n ValueError: If the given AdapterFusion name is invalid.\n \"\"\"\n super().save_adapter_fusion(save_directory, adapter_names, meta_dict, custom_weights_loaders)\n\n if with_head:\n # Make sure to cover the different options for adapter_names\n if isinstance(with_head, str):\n head_name = with_head\n elif isinstance(adapter_names, Fuse):\n head_name = adapter_names.name\n elif isinstance(adapter_names, list):\n head_name = \",\".join(adapter_names)\n else:\n head_name = adapter_names\n if head_name not in self.heads:\n raise ValueError(\"No head with name {} found\".format(head_name))\n loader = PredictionHeadLoader(self)\n loader.save(save_directory, head_name)\n\n def load_adapter_fusion(\n self,\n adapter_fusion_name_or_path: str,\n load_as: str = None,\n custom_weights_loaders: Optional[List[WeightsLoader]] = None,\n set_active: bool = False,\n with_head: bool = True,\n **kwargs\n ) -> str:\n if with_head:\n if custom_weights_loaders is None:\n custom_weights_loaders = []\n custom_weights_loaders.append(PredictionHeadLoader(self, error_on_missing=False))\n super().load_adapter_fusion(adapter_fusion_name_or_path, load_as, custom_weights_loaders, set_active)\n\n def save_all_heads(self, save_directory):\n os.makedirs(save_directory, exist_ok=True)\n for head_name in self.heads:\n save_path = join(save_directory, head_name)\n self.save_head(save_path, head_name)\n\n def get_labels(self):\n return list(self.config.id2label.values())\n\n def get_labels_dict(self):\n return self.config.id2label\n\n def get_adapter(self, name):\n \"\"\"\n If self.base_model is self, must inherit from a class that implements this method, to preclude infinite\n recursion\n \"\"\"\n if self.base_model is self:\n return super().get_adapter(name)\n else:\n return self.base_model.get_adapter(name)" }, { "identifier": "Activation_Function_Class", "path": "src/models/transformers/parameter-efficient-finetuning/modeling.py", "snippet": "class Activation_Function_Class(nn.Module):\n \"\"\"\n Implementation of various activation function.\n \"\"\"\n\n def __init__(self, hidden_act):\n super().__init__()\n if hidden_act.lower() == \"leakyrelu\":\n self.f = nn.functional.leaky_relu\n else:\n self.f = get_activation(hidden_act.lower())\n\n def forward(self, x):\n return self.f(x)" } ]
import logging import torch from dataclasses import dataclass from typing import List, Optional, Union from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...modeling_outputs import ( ImageClassifierOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...utils import ModelOutput from ..composition import AdapterCompositionBlock, BatchSplit, Parallel, parse_heads_from_composition from ..context import AdapterSetup, ForwardContext from ..model_mixin import ModelWithHeadsAdaptersMixin from ..modeling import Activation_Function_Class
11,320
# with number indices the head output at that position is accessed # e.g output[1] is equivalent to output.head_outputs[1] if isinstance(k, int): return self.head_outputs[k] # with strings the attribute in the underlying dict can be adressed # e.g output["loss"] is equivalent to output.loss else: return super().__getitem__(k) def __setitem__(self, k, v): if isinstance(k, int): self.head_outputs[k] = v else: return super().__setitem__(k, v) def __iter__(self): # iterates over the head outputs return iter(self.head_outputs) def __len__(self): return len(self.head_outputs) # Let this class inherit from nn.Sequential to provide iterable access as before class PredictionHead(nn.Sequential): def __init__(self, name): super().__init__() self.config = {} self.name = name def build(self, model): model_config = model.config pred_head = [] dropout_prob = self.config.get("dropout_prob", model_config.hidden_dropout_prob) bias = self.config.get("bias", True) for l_id in range(self.config["layers"]): if dropout_prob > 0: pred_head.append(nn.Dropout(dropout_prob)) if l_id < self.config["layers"] - 1: pred_head.append(nn.Linear(model_config.hidden_size, model_config.hidden_size)) if self.config["activation_function"]: pred_head.append(Activation_Function_Class(self.config["activation_function"])) else: if "num_labels" in self.config: pred_head.append(nn.Linear(model_config.hidden_size, self.config["num_labels"], bias=bias)) elif "num_choices" in self.config: # used for multiple_choice head pred_head.append(nn.Linear(model_config.hidden_size, 1, bias=bias)) else: pred_head.append(nn.Linear(model_config.hidden_size, model_config.hidden_size, bias=bias)) if self.config["activation_function"]: pred_head.append(Activation_Function_Class(self.config["activation_function"])) for i, module in enumerate(pred_head): self.add_module(str(i), module) self.apply(model._init_weights) self.train(model.training) # make sure training mode is consistent def get_output_embeddings(self): return None # override for heads with output embeddings def get_label_names(self): return ["labels"] class ClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: if self.config["num_labels"] == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else:
logger = logging.getLogger(__name__) @dataclass class MultiHeadOutput(ModelOutput): head_outputs: List[ModelOutput] = None loss: Optional[torch.FloatTensor] = None @property def logits(self): return torch.vstack([outputs["logits"] for outputs in self.head_outputs]) def __getitem__(self, k): # with number indices the head output at that position is accessed # e.g output[1] is equivalent to output.head_outputs[1] if isinstance(k, int): return self.head_outputs[k] # with strings the attribute in the underlying dict can be adressed # e.g output["loss"] is equivalent to output.loss else: return super().__getitem__(k) def __setitem__(self, k, v): if isinstance(k, int): self.head_outputs[k] = v else: return super().__setitem__(k, v) def __iter__(self): # iterates over the head outputs return iter(self.head_outputs) def __len__(self): return len(self.head_outputs) # Let this class inherit from nn.Sequential to provide iterable access as before class PredictionHead(nn.Sequential): def __init__(self, name): super().__init__() self.config = {} self.name = name def build(self, model): model_config = model.config pred_head = [] dropout_prob = self.config.get("dropout_prob", model_config.hidden_dropout_prob) bias = self.config.get("bias", True) for l_id in range(self.config["layers"]): if dropout_prob > 0: pred_head.append(nn.Dropout(dropout_prob)) if l_id < self.config["layers"] - 1: pred_head.append(nn.Linear(model_config.hidden_size, model_config.hidden_size)) if self.config["activation_function"]: pred_head.append(Activation_Function_Class(self.config["activation_function"])) else: if "num_labels" in self.config: pred_head.append(nn.Linear(model_config.hidden_size, self.config["num_labels"], bias=bias)) elif "num_choices" in self.config: # used for multiple_choice head pred_head.append(nn.Linear(model_config.hidden_size, 1, bias=bias)) else: pred_head.append(nn.Linear(model_config.hidden_size, model_config.hidden_size, bias=bias)) if self.config["activation_function"]: pred_head.append(Activation_Function_Class(self.config["activation_function"])) for i, module in enumerate(pred_head): self.add_module(str(i), module) self.apply(model._init_weights) self.train(model.training) # make sure training mode is consistent def get_output_embeddings(self): return None # override for heads with output embeddings def get_label_names(self): return ["labels"] class ClassificationHead(PredictionHead): def __init__( self, model, head_name, num_labels=2, layers=2, activation_function="tanh", id2label=None, use_pooler=False, bias=True, ): super().__init__(head_name) self.config = { "head_type": "classification", "num_labels": num_labels, "layers": layers, "activation_function": activation_function, "label2id": {label: id_ for id_, label in id2label.items()} if id2label is not None else None, "use_pooler": use_pooler, "bias": bias, } self.build(model) def forward(self, outputs, cls_output=None, attention_mask=None, return_dict=False, **kwargs): if cls_output is None: if self.config["use_pooler"]: cls_output = kwargs.pop("pooled_output") else: cls_output = outputs[0][:, 0] logits = super().forward(cls_output) loss = None labels = kwargs.pop("labels", None) if labels is not None: if self.config["num_labels"] == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config["num_labels"]), labels.view(-1)) if return_dict: if isinstance(outputs, Seq2SeqModelOutput): return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) else:
return SequenceClassifierOutput(
6
2023-10-18 18:05:54+00:00
16k
upiterbarg/hihack
models/utils.py
[ { "identifier": "CDGPT5", "path": "models/cdgpt5.py", "snippet": "class CDGPT5(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(CDGPT5, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n if flags.cdgpt5_xxl_policy:\n self.hidden_dim = 1024\n else:\n self.hidden_dim = 512\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n \n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n\n if flags.cdgpt5_xxl_decoder:\n self.policy_hidden_dim = 1024\n self.policy = nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions)\n )\n else:\n self.policy = nn.Linear(self.hidden_dim, self.num_actions)\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length\n\n def initial_state(self, batch_size=1):\n return tuple(\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)\n for _ in range(2)\n )\n\n def forward(self, inputs, core_state=None):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n\n core_input = st.view(T, B, -1)\n core_output_list = []\n notdone = (~inputs[\"done\"]).float()\n\n for input, nd in zip(core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n core_state = tuple(nd * t for t in core_state)\n output, core_state = self.core(input.unsqueeze(0), core_state)\n core_output_list.append(output)\n\n core_output = torch.flatten(torch.cat(core_output_list), 0, 1)\n\n # -- [B' x A]\n policy_logits = self.policy(core_output)\n\n # -- [B' x 1]\n baseline = self.baseline(core_output)\n\n action = torch.multinomial(F.softmax(policy_logits + 1e-5, dim=1), num_samples=1)\n\n policy_logits = policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n version = torch.ones_like(action) * self.version\n\n output = dict(\n policy_logits=policy_logits,\n baseline=baseline,\n action=action,\n version=version,\n )\n\n return (output, core_state)" }, { "identifier": "CleavedHierarchicalPolicy", "path": "models/cleaved_hierarchical_policy.py", "snippet": "class CleavedHierarchicalPolicy(nn.Module):\n def __init__(self, \n flags,\n high_level_model, \n low_level_model):\n super(CleavedHierarchicalPolicy, self).__init__()\n self.high_level_model = high_level_model\n self.low_level_model = low_level_model\n self.num_strategies = self.high_level_model.num_strategies\n\n self.gumbel_softmax_tau = 1\n if 'gumbel_softmax_tau' in flags:\n self.gumbel_softmax_tau = flags.gumbel_softmax_tau\n\n self.disable_high_level_policy_gradients = flags.disable_high_level_policy_gradients\n self.disable_low_level_policy_gradients = flags.disable_low_level_policy_gradients\n self.version = 0\n self.eps_greedy = flags.eps_greedy if 'eps_greedy' in flags else 1\n\n\n def initial_state(self, batch_size=1):\n high_level_core_state = self.high_level_model.initial_state(batch_size)\n low_level_core_state = self.low_level_model.initial_state(batch_size)\n return high_level_core_state + low_level_core_state\n\n def parameters(self):\n if self.disable_high_level_policy_gradients:\n return self.low_level_model.parameters()\n elif self.disable_low_level_policy_gradients:\n return self.high_level_model.parameters()\n return list(self.low_level_model.parameters()) + list(self.high_level_model.parameters())\n\n def buffers(self):\n if self.disable_high_level_policy_gradients:\n return self.low_level_model.buffers()\n elif self.disable_low_level_policy_gradients:\n return self.high_level_model.buffers()\n return list(self.low_level_model.buffers()) + list(self.high_level_model.buffers())\n\n def forward(self, inputs, core_state, last_ttyrec_data=None):\n high_level_core_state, low_level_core_state = core_state[:2], core_state[2:]\n\n if not last_ttyrec_data is None:\n low_level_out, low_level_core_state = self.low_level_model(inputs, low_level_core_state, return_strategywise_logits=True, last_ttyrec_data=last_ttyrec_data)\n else:\n low_level_out, low_level_core_state = self.low_level_model(inputs, low_level_core_state, return_strategywise_logits=True)\n high_level_out, high_level_core_state = self.high_level_model(inputs, high_level_core_state)\n\n policy_logits = low_level_out['strategywise_policy_logits']\n strategy_logits = high_level_out['strategy_logits']\n\n if isinstance(self.low_level_model, HierarchicalTransformerLSTM):\n strategy_logits = torch.cat([strategy_logits[..., -1].unsqueeze(-1), strategy_logits[..., :-1]], axis=-1)\n\n T, B, _ = strategy_logits.shape\n\n sample = True\n\n if self.eps_greedy < 1:\n sample = bool(np.random.binomial(1, self.eps_greedy))\n\n if sample:\n strategies = F.gumbel_softmax(strategy_logits.reshape(T * B, -1), tau=self.gumbel_softmax_tau, hard=True).bool().unsqueeze(-1).expand((-1, -1, policy_logits.shape[-1]))\n sdim = strategy_logits.size(-1)\n out_policy_logits = torch.sum(torch.mul(policy_logits[:sdim], torch.swapaxes(strategies, 0, 1)), axis=0).view(T, B, -1)\n else:\n strategies = torch.argmax(strategy_logits.reshape(T * B, -1), axis=-1)\n out_policy_logits = policy_logits[strategies, torch.arange(strategies.size(0))].view(T, B, -1)\n\n\n out_action = torch.multinomial(F.softmax(out_policy_logits.reshape(T * B, -1), dim=1), num_samples=1).long().view(T, B)\n\n version = torch.ones_like(out_action) * self.version\n\n if self.disable_high_level_policy_gradients:\n baseline = low_level_out['baseline']\n else:\n baseline = high_level_out['baseline']\n\n output = dict(\n policy_logits=out_policy_logits,\n baseline=baseline,\n action=out_action,\n version=version,\n strategy_logits=strategy_logits.view(T, B, -1),\n all_policy_logits=torch.swapaxes(torch.swapaxes(policy_logits, 0, 1), 1, 2),\n )\n\n core_state = high_level_core_state + low_level_core_state\n return (output, core_state)" }, { "identifier": "FlatTransformer", "path": "models/flat_transformer.py", "snippet": "class FlatTransformer(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(FlatTransformer, self).__init__()\n \n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n\n self.num_attention_heads = flags.num_attention_heads \n self.num_transformer_encoder_layers = flags.num_transformer_layers\n core_layer = nn.TransformerEncoderLayer(d_model=self.h_dim, nhead=self.num_attention_heads)\n self.core = nn.TransformerEncoder(core_layer, num_layers=self.num_transformer_encoder_layers)\n self.positional_encoder = PositionalEncoding(self.h_dim)\n\n self.policy_hidden_dim = 1024\n self.policy = nn.Sequential(nn.Linear(self.h_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions)\n )\n self.baseline = nn.Linear(self.h_dim, 1)\n\n self.version = 0\n self.inference_unroll_length = 1\n\n def initial_state(self, batch_size=1):\n return (\n torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length),\n torch.rand(self.inference_unroll_length, batch_size, self.h_dim)\n )\n\n def forward(self, inputs, core_state=None):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n\n core_input = st.reshape(T, B, -1)\n notdone = (~inputs[\"done\"]).float()\n if not self.training:\n prev_mask, prev_encodings = core_state\n prev_mask = prev_mask.squeeze(0)\n core_input = torch.cat([prev_encodings[1:], core_input], axis=0)\n core_mask = torch.stack(\n [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(core_input.device)], axis=1) for i in range(B)]\n )\n core_mask[:, -1, -1] = 1\n core_state = (core_mask.detach().clone().unsqueeze(0), \n core_input.detach().clone()\n )\n for i in range(B):\n core_mask[i].fill_diagonal_(1)\n core_mask = (core_mask.float().masked_fill(core_mask == 0, float(\"-inf\")).masked_fill(core_mask == 1, float(0.0))).to(device=core_input.device)\n\n core_mask = torch.repeat_interleave(core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)\n T = core_input.shape[0]\n else:\n core_mask = generate_square_subsequent_mask(T, core_input.device)\n\n core_input = self.positional_encoder(core_input)\n core_output = self.core(core_input, core_mask)\n core_output = torch.flatten(core_output, 0, 1)\n\n # -- [B' x A]\n policy_logits = self.policy(core_output)\n\n # -- [B' x 1]\n baseline = self.baseline(core_output)\n\n action = torch.multinomial(F.softmax(policy_logits + 1e-5, dim=1), num_samples=1)\n\n policy_logits = policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n version = torch.ones_like(action) * self.version\n\n\n if not self.training:\n action = action[-1].unsqueeze(0)\n baseline = baseline[-1].unsqueeze(0)\n policy_logits = policy_logits[-1].unsqueeze(0)\n version = version[-1].unsqueeze(0)\n\n output = dict(\n policy_logits=policy_logits,\n baseline=baseline,\n action=action,\n version=version,\n )\n \n return (output, core_state)" }, { "identifier": "HierarchicalLSTM", "path": "models/hierarchical_lstm.py", "snippet": "class HierarchicalLSTM(nn.Module):\n def __init__(self, shape, action_space, flags, device, num_strategies=13):\n super(HierarchicalLSTM, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.num_strategies = num_strategies\n\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n self.prev_actions_dim = self.num_actions if self.use_prev_action else 0\n\n self.strategy_dim = self.num_strategies\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n\n self.policy_hidden_dim = 256\n self.strategy_hidden_dim = 128\n self.hidden_dim = 512\n\n self.strategy_encoder = nn.Linear(self.hidden_dim, self.num_strategies)\n\n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n\n self.policies = nn.ModuleDict(\n [[f'{i}', nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions))] for i in range(self.num_strategies)]\n )\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.action_masks = {}\n\n self.gumbel_softmax_tau = 1\n if 'gumbel_softmax_tau' in flags:\n self.gumbel_softmax_tau = flags.gumbel_softmax_tau\n\n def initial_state(self, batch_size=1):\n return tuple(\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size)\n for _ in range(2)\n )\n\n def forward(self, inputs, core_state, last_ttyrec_data=None, return_strategywise_logits=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n st = [\n self.topline_encoder( topline.float(memory_format=torch.contiguous_format).view(T * B, -1)),\n self.bottomline_encoder(bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)),\n self.screen_encoder(inputs[\"screen_image\"].float(memory_format=torch.contiguous_format).view(T * B, C, H, W)),\n ]\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.num_actions).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n\n core_input = st.view(T, B, -1)\n core_output_list = []\n notdone = (~inputs[\"done\"]).float()\n\n for input, nd in zip(core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n core_state = tuple(nd * t for t in core_state)\n output, core_state = self.core(input.unsqueeze(0), core_state)\n core_output_list.append(output)\n\n core_output = torch.flatten(torch.cat(core_output_list), 0, 1)\n strategy_logits = self.strategy_encoder(core_output).view(T * B, -1)\n\n all_policy_logits = torch.stack([self.policies[str(i)](core_output) for i in range(self.num_strategies)], axis=0)\n strategies = F.gumbel_softmax(strategy_logits, tau=self.gumbel_softmax_tau, hard=True).bool().unsqueeze(-1).expand((-1, -1, all_policy_logits.shape[-1]))\n out_policy_logits = torch.sum(torch.mul(all_policy_logits, torch.swapaxes(strategies, 0, 1)), axis=0).view(T, B, -1)\n out_action = torch.multinomial(F.softmax(out_policy_logits.reshape(T * B, -1), dim=1), num_samples=1).long().view(T, B)\n\n\n # -- [B' x 1]\n baseline = self.baseline(core_output)\n baseline = baseline.view(T, B)\n strategy_logits = strategy_logits.view(T, B, -1)\n\n version = torch.ones_like(out_action) * self.version\n\n output = dict(\n policy_logits=out_policy_logits,\n all_policy_logits=torch.swapaxes(torch.swapaxes(all_policy_logits, 0, 1), 1, 2),\n baseline=baseline,\n action=out_action,\n version=version,\n strategy_logits=strategy_logits,\n )\n\n if return_strategywise_logits:\n output['strategywise_policy_logits'] = all_policy_logits\n\n return (output, core_state)" }, { "identifier": "HierarchicalTransformerLSTM", "path": "models/hierarchical_transformer_lstm.py", "snippet": "class HierarchicalTransformerLSTM(nn.Module):\n def __init__(self, shape, action_space, flags, device, num_strategies=20):\n super(HierarchicalTransformerLSTM, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n ## second copy of encoders\n self.topline_encoder2 = TopLineEncoder()\n self.bottomline_encoder2 = torch.jit.script(BottomLinesEncoder())\n self.screen_encoder2 = torch.jit.script(ScreenEncoder(screen_shape))\n ###\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim\n ]\n )\n\n self.hidden_dim = 512\n self.policy_hidden_dim = 256\n self.strategy_dim = num_strategies\n \n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n self.num_attention_heads = flags.num_attention_heads\n self.num_transformer_encoder_layers = flags.num_transformer_layers\n \n self.hidden_dim = self.h_dim + self.hidden_dim\n core_trnsfrmr_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim, nhead=self.num_attention_heads, norm_first=True, activation='gelu')\n self.core_trnsfrmr = nn.TransformerEncoder(core_trnsfrmr_layer, num_layers=self.num_transformer_encoder_layers)\n self.positional_encoder = PositionalEncoding(self.hidden_dim)\n\n self.strategy_encoder = nn.Linear(self.hidden_dim, self.strategy_dim)\n\n self.policies = nn.ModuleDict(\n [[f'{i}', nn.Sequential(nn.Linear(self.hidden_dim, self.policy_hidden_dim),\n nn.ELU(),\n nn.Linear(self.policy_hidden_dim, self.num_actions))] for i in range(self.strategy_dim)]\n )\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length\n\n self.wrapped = False\n\n def initial_state(self, batch_size=1):\n return (\n torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), # transformer portion 0\n torch.rand(self.inference_unroll_length, batch_size, self.hidden_dim), # transformer portion 1\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size), # lstm portion 0\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) # lstm portion 1\n \n \n )\n\n def get_encodings(self, inputs, for_lstm=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n if for_lstm or not hasattr(self, 'topline_encoder2'):\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n else:\n st = [\n self.topline_encoder2(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder2(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder2(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n return st\n\n\n\n def forward(self, inputs, core_state=None, last_ttyrec_data=None, return_strategywise_logits=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n st_lstm = self.get_encodings(inputs, for_lstm=True)\n st_trnsfrmr = self.get_encodings(inputs, for_lstm=False)\n\n T_eff = T\n\n if not last_ttyrec_data is None and self.training:\n last_st_lstm = self.get_encodings(last_ttyrec_data, for_lstm=True)\n last_st_trnsfrmr = self.get_encodings(last_ttyrec_data, for_lstm=False)\n T_eff = T * 2 \n st_lstm = torch.cat([last_st_lstm.reshape(T, B, -1), st_lstm.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n st_trnsfrmr = torch.cat([last_st_trnsfrmr.reshape(T, B, -1), st_trnsfrmr.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n self.wrapped = True\n\n c0, c1, c2, c3 = core_state\n trnsfrmr_core_state = c0, c1\n lstm_core_state = c2, c3\n\n lstm_core_input = st_lstm.view(T_eff, B, -1)\n lstm_core_output_list = []\n \n if self.wrapped:\n notdone = torch.cat([(~last_ttyrec_data[\"done\"]).float(), (~inputs[\"done\"]).float()], axis=0)\n else:\n notdone = (~inputs[\"done\"]).float()\n\n for input, nd in zip(lstm_core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n lstm_core_state = tuple(nd * t for t in lstm_core_state)\n output, lstm_core_state = self.core(input.unsqueeze(0), lstm_core_state)\n lstm_core_output_list.append(output)\n\n lstm_core_output = torch.flatten(torch.cat(lstm_core_output_list), 0, 1)\n\n st = torch.cat([st_trnsfrmr, lstm_core_output], dim=1)\n\n trnsfrmr_core_input = st.reshape(T_eff, B, -1)\n if not self.training:\n prev_mask, prev_encodings = trnsfrmr_core_state\n prev_mask = prev_mask.squeeze(0)\n trnsfrmr_core_input = torch.cat([prev_encodings[1:], trnsfrmr_core_input], axis=0)\n trnsfrmr_core_mask = torch.stack(\n [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(trnsfrmr_core_input.device)], axis=1) for i in range(B)]\n )\n trnsfrmr_core_mask[:, -1, -1] = 1\n trnsfrmr_core_state = (trnsfrmr_core_mask.detach().clone().unsqueeze(0), \n trnsfrmr_core_input.detach().clone()\n )\n for i in range(B):\n trnsfrmr_core_mask[i].fill_diagonal_(1)\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n trnsfrmr_core_mask = torch.repeat_interleave(trnsfrmr_core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)\n T = trnsfrmr_core_input.shape[0]\n elif self.wrapped:\n mask1 = (torch.triu(torch.ones(T_eff, T_eff)) == 1).transpose(0, 1)\n mask2 = F.pad((torch.triu(torch.ones(T, T)) == 1).transpose(0, 1), (0, T, T, 0))\n trnsfrmr_core_mask = mask1.long() + mask2.long()\n trnsfrmr_core_mask[trnsfrmr_core_mask != 1] = 0\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n else:\n trnsfrmr_core_mask = generate_square_subsequent_mask(T, trnsfrmr_core_input.device)\n\n trnsfrmr_core_input = self.positional_encoder(trnsfrmr_core_input)\n trnsfrmr_core_output = self.core_trnsfrmr(trnsfrmr_core_input, trnsfrmr_core_mask)\n\n trnsfrmr_core_output = torch.flatten(trnsfrmr_core_output, 0, 1)\n\n if self.wrapped:\n strategy_logits = self.strategy_encoder(trnsfrmr_core_output).view(2 * T * B, -1)\n else:\n strategy_logits = self.strategy_encoder(trnsfrmr_core_output).view(T * B, -1)\n\n\n all_policy_logits = torch.stack([self.policies[str(i)](trnsfrmr_core_output) for i in range(self.strategy_dim)], axis=0)\n\n\n # -- [B' x 1]\n baseline = self.baseline(trnsfrmr_core_output)\n\n strategy_sample = F.gumbel_softmax(strategy_logits, tau=1.0, hard=True)\n strategies = strategy_sample.bool().unsqueeze(-1).expand((-1, -1, all_policy_logits.shape[-1]))\n\n out_policy_logits = torch.sum(torch.mul(all_policy_logits, torch.swapaxes(strategies, 0, 1)), axis=0)\n action = torch.multinomial(F.softmax(out_policy_logits.reshape(T * B, -1), dim=1), num_samples=1).long()\n\n if self.wrapped:\n out_policy_logits = out_policy_logits.view(2*T, B, -1)[-T:].view(T * B, -1)\n baseline = baseline.view(2*T, B, -1)[-T:].view(T * B, -1)\n strategy_logits = strategy_logits.view(2 * T, B, -1)[-T:].view(T * B, -1)\n all_policy_logits = all_policy_logits.view(self.strategy_dim, 2 * T, B, -1)[:, -T:].view(self.strategy_dim, T * B, -1)\n\n out_policy_logits = out_policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n strategy_logits = strategy_logits.view(T, B, -1)\n strategy = torch.argmax(strategy_logits, axis=-1).long()\n version = torch.ones_like(action) * self.version\n\n\n if not self.training:\n action = action[-1].unsqueeze(0)\n baseline = baseline[-1].unsqueeze(0)\n out_policy_logits = out_policy_logits[-1].unsqueeze(0)\n version = version[-1].unsqueeze(0)\n strategy_logits = strategy_logits[-1].unsqueeze(0)\n strategy = strategy[-1].unsqueeze(0)\n\n output = dict(\n policy_logits=out_policy_logits,\n baseline=baseline,\n action=action,\n strategy=strategy,\n version=version,\n strategy_logits=strategy_logits\n )\n\n if return_strategywise_logits:\n output['strategywise_policy_logits'] = all_policy_logits\n\n c0, c1 = trnsfrmr_core_state\n c2, c3 = lstm_core_state\n\n core_state = (c0, c1, c2, c3)\n\n self.wrapped = False\n return (output, core_state)" }, { "identifier": "TransformerLSTM", "path": "models/transformer_lstm.py", "snippet": "class TransformerLSTM(nn.Module):\n def __init__(self, shape, action_space, flags, device):\n super(TransformerLSTM, self).__init__()\n\n self.flags = flags\n self.num_actions = len(action_space)\n self.use_prev_action = flags.use_prev_action\n\n self.topline_encoder = TopLineEncoder()\n self.bottomline_encoder = torch.jit.script(BottomLinesEncoder())\n\n pixel_size = flags.pixel_size\n if flags.crop_dim == 0:\n screen_shape = (24 * pixel_size, 80 * pixel_size)\n else:\n screen_shape = (flags.crop_dim * pixel_size, flags.crop_dim * pixel_size)\n\n self.screen_encoder = torch.jit.script(ScreenEncoder(screen_shape))\n\n ## second copy of encoders\n self.topline_encoder2 = TopLineEncoder()\n self.bottomline_encoder2 = torch.jit.script(BottomLinesEncoder())\n self.screen_encoder2 = torch.jit.script(ScreenEncoder(screen_shape))\n ###\n\n self.prev_actions_dim = 128 if self.use_prev_action else 0\n\n self.h_dim = sum(\n [\n self.topline_encoder.hidden_dim,\n self.bottomline_encoder.hidden_dim,\n self.screen_encoder.hidden_dim,\n self.prev_actions_dim,\n ]\n )\n\n self.hidden_dim = 512\n \n self.core = nn.LSTM(self.h_dim, self.hidden_dim, num_layers=1)\n\n self.num_attention_heads = flags.num_attention_heads\n self.num_transformer_encoder_layers = flags.num_transformer_layers\n self.hidden_dim = self.h_dim + self.hidden_dim\n core_trnsfrmr_layer = nn.TransformerEncoderLayer(d_model=self.hidden_dim, nhead=self.num_attention_heads, norm_first=True, activation='gelu')\n self.core_trnsfrmr = nn.TransformerEncoder(core_trnsfrmr_layer, num_layers=self.num_transformer_encoder_layers) # test round 1 uses 4 layers\n self.positional_encoder = PositionalEncoding(self.hidden_dim)\n \n self.policy = nn.Linear(self.hidden_dim, self.num_actions)\n\n self.baseline = nn.Linear(self.hidden_dim, 1)\n self.version = 0\n self.inference_unroll_length = flags.unroll_length if not 'inference_unroll_length' in flags else flags.inference_unroll_length\n\n self.wrapped = False\n\n def initial_state(self, batch_size=1):\n return (\n torch.zeros(1, batch_size, self.inference_unroll_length, self.inference_unroll_length), # transformer portion 0\n torch.rand(self.inference_unroll_length, batch_size, self.hidden_dim), # transformer portion 1\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size), # lstm portion 0\n torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) # lstm portion 1\n \n \n )\n\n def get_encodings(self, inputs, for_lstm=False):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n\n topline = inputs[\"tty_chars\"][..., 0, :]\n bottom_line = inputs[\"tty_chars\"][..., -2:, :]\n\n if for_lstm or not hasattr(self, 'topline_encoder2'):\n st = [\n self.topline_encoder(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n else:\n st = [\n self.topline_encoder2(\n topline.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.bottomline_encoder2(\n bottom_line.float(memory_format=torch.contiguous_format).view(T * B, -1)\n ),\n self.screen_encoder2(\n inputs[\"screen_image\"]\n .float(memory_format=torch.contiguous_format)\n .view(T * B, C, H, W)\n ),\n ]\n\n if self.use_prev_action:\n st.append(torch.nn.functional.one_hot(inputs[\"prev_action\"], self.prev_actions_dim).view(T * B, -1))\n\n st = torch.cat(st, dim=1)\n return st\n\n\n\n def forward(self, inputs, core_state=None, last_ttyrec_data=None):\n T, B, C, H, W = inputs[\"screen_image\"].shape\n st_lstm = self.get_encodings(inputs, for_lstm=True)\n st_trnsfrmr = self.get_encodings(inputs, for_lstm=False)\n\n T_eff = T\n\n if not last_ttyrec_data is None and self.training:\n last_st_lstm = self.get_encodings(last_ttyrec_data, for_lstm=True)\n last_st_trnsfrmr = self.get_encodings(last_ttyrec_data, for_lstm=False)\n T_eff = T * 2 \n st_lstm = torch.cat([last_st_lstm.reshape(T, B, -1), st_lstm.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n st_trnsfrmr = torch.cat([last_st_trnsfrmr.reshape(T, B, -1), st_trnsfrmr.reshape(T, B, -1)], axis=0).reshape(T_eff * B, -1)\n self.wrapped = True\n\n c0, c1, c2, c3 = core_state\n trnsfrmr_core_state = c0, c1\n lstm_core_state = c2, c3\n\n lstm_core_input = st_lstm.view(T_eff, B, -1)\n lstm_core_output_list = []\n \n if self.wrapped:\n notdone = torch.cat([(~last_ttyrec_data[\"done\"]).float(), (~inputs[\"done\"]).float()], axis=0)\n else:\n notdone = (~inputs[\"done\"]).float()\n\n notdone_mask = torch.ones((T_eff, T_eff)).repeat(B, 1, 1).to(lstm_core_input.device)\n\n i = 0\n for input, nd in zip(lstm_core_input.unbind(), notdone.unbind()):\n # Reset core state to zero whenever an episode ended.\n # Make `done` broadcastable with (num_layers, B, hidden_size)\n nd = nd.view(1, -1, 1)\n lstm_core_state = tuple(nd * t for t in lstm_core_state)\n output, lstm_core_state = self.core(input.unsqueeze(0), lstm_core_state)\n lstm_core_output_list.append(output)\n\n if i < T_eff-1:\n nd = notdone[i].view(-1, 1, 1)\n notdone_mask[:, i+1:, :i+1] *= nd\n\n i += 1\n\n lstm_core_output = torch.flatten(torch.cat(lstm_core_output_list), 0, 1)\n\n st = torch.cat([st_trnsfrmr, lstm_core_output], dim=1)\n\n trnsfrmr_core_input = st.reshape(T_eff, B, -1)\n if not self.training:\n prev_mask, prev_encodings = trnsfrmr_core_state\n prev_mask = prev_mask.squeeze(0)\n trnsfrmr_core_input = torch.cat([prev_encodings[1:], trnsfrmr_core_input], axis=0)\n trnsfrmr_core_mask = torch.stack(\n [torch.cat([torch.cat([prev_mask[i, 1:, 1:], prev_mask[i, -1, 1:].unsqueeze(0)], axis=0) * notdone[-1, i], torch.zeros((self.inference_unroll_length, 1)).to(trnsfrmr_core_input.device)], axis=1) for i in range(B)]\n )\n trnsfrmr_core_mask[:, -1, -1] = 1\n trnsfrmr_core_state = (trnsfrmr_core_mask.detach().clone().unsqueeze(0), \n trnsfrmr_core_input.detach().clone()\n )\n for i in range(B):\n trnsfrmr_core_mask[i].fill_diagonal_(1)\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n trnsfrmr_core_mask = torch.repeat_interleave(trnsfrmr_core_mask, self.num_attention_heads, dim=1).reshape(B * self.num_attention_heads, self.inference_unroll_length, self.inference_unroll_length)\n T = trnsfrmr_core_input.shape[0]\n elif self.wrapped: \n mask1 = (torch.triu(torch.ones(T_eff, T_eff)) == 1).transpose(0, 1)\n mask2 = F.pad((torch.triu(torch.ones(T, T)) == 1).transpose(0, 1), (0, T, T, 0))\n trnsfrmr_core_mask = mask1.long() + mask2.long()\n trnsfrmr_core_mask[trnsfrmr_core_mask != 1] = 0\n trnsfrmr_core_mask = (trnsfrmr_core_mask.float().masked_fill(trnsfrmr_core_mask == 0, float(\"-inf\")).masked_fill(trnsfrmr_core_mask == 1, float(0.0))).to(device=trnsfrmr_core_input.device)\n else:\n trnsfrmr_core_mask = generate_square_subsequent_mask(T, trnsfrmr_core_input.device)\n\n\n trnsfrmr_core_input = self.positional_encoder(trnsfrmr_core_input)\n trnsfrmr_core_output = self.core_trnsfrmr(trnsfrmr_core_input, trnsfrmr_core_mask)\n trnsfrmr_core_output = torch.flatten(trnsfrmr_core_output, 0, 1)\n\n # -- [B' x A]\n policy_logits = self.policy(trnsfrmr_core_output)\n\n # -- [B' x 1]\n baseline = self.baseline(trnsfrmr_core_output)\n\n if self.wrapped:\n policy_logits = policy_logits.view(2*T, B, -1)[-T:].view(T * B, -1)\n baseline = baseline.view(2*T, B, -1)[-T:].view(T * B, -1)\n\n action = torch.multinomial(F.softmax(policy_logits + 1e-5, dim=1), num_samples=1)\n\n policy_logits = policy_logits.view(T, B, -1)\n baseline = baseline.view(T, B)\n action = action.view(T, B)\n version = torch.ones_like(action) * self.version\n\n\n if not self.training:\n action = action[-1].unsqueeze(0)\n baseline = baseline[-1].unsqueeze(0)\n policy_logits = policy_logits[-1].unsqueeze(0)\n version = version[-1].unsqueeze(0)\n\n output = dict(\n policy_logits=policy_logits,\n baseline=baseline,\n action=action,\n version=version,\n )\n\n c0, c1 = trnsfrmr_core_state\n c2, c3 = lstm_core_state\n\n core_state = (c0, c1, c2, c3)\n\n self.wrapped = False\n return (output, core_state)" } ]
import omegaconf import os import pathlib import pdb import sys import torch from .cdgpt5 import CDGPT5 from .cleaved_hierarchical_policy import CleavedHierarchicalPolicy from .flat_transformer import FlatTransformer from .hierarchical_lstm import HierarchicalLSTM from .hierarchical_transformer_lstm import HierarchicalTransformerLSTM from .transformer_lstm import TransformerLSTM from nle.env.base import DUNGEON_SHAPE from omegaconf import OmegaConf from tasks import ENVS
11,179
base_path = str(pathlib.Path().resolve()) hihack_path = os.path.join(base_path[:base_path.find('hihack')], 'hihack') sys.path.insert(0, os.path.join(hihack_path, 'dungeonsdata-neurips2022/experiment_code/hackrl')) MODELS = [ CDGPT5, HierarchicalLSTM, HierarchicalTransformerLSTM, TransformerLSTM, FlatTransformer ] MODELS_LOOKUP = {c.__name__: c for c in MODELS} def initialize_weights(flags, model): def _initialize_weights(layer): if hasattr(layer, "bias") and isinstance( layer.bias, torch.nn.parameter.Parameter ): layer.bias.data.fill_(0) if flags.initialisation == "orthogonal": if type(layer) in [torch.nn.Conv2d, torch.nn.Linear]: torch.nn.init.orthogonal_(layer.weight.data, gain=1.0) elif flags.initialisation == "xavier_uniform": if type(layer) in [torch.nn.Conv2d, torch.nn.Linear]: torch.nn.init.xavier_uniform_(layer.weight.data, gain=1.0) else: pass else: pass model.apply(_initialize_weights) def load_flags(load_path): out = torch.load(load_path) return omegaconf.OmegaConf.create(out['flags']) def create_model(flags, device, model_type=None): model_type = model_type if not model_type is None else flags.model try: model_cls = MODELS_LOOKUP[model_type] except KeyError: raise NotImplementedError("model=%s" % flags.model) from None action_space = ENVS[flags.env.name](savedir=None).actions model = model_cls(DUNGEON_SHAPE, action_space, flags, device) model.to(device=device) initialize_weights(flags, model) return model def load_pt_model_and_flags(load_path, device): out = torch.load(load_path, map_location=device) flags = omegaconf.OmegaConf.create(out['flags']) if flags.model == 'CleavedHierarchicalPolicy': assert len(out['submodule_flags']) > 0 submodule_flags = omegaconf.OmegaConf.create(out['submodule_flags']) high_level_model = create_model(submodule_flags, device, model_type=flags.high_level_model) low_level_model = create_model(flags, device, model_type=flags.low_level_model)
base_path = str(pathlib.Path().resolve()) hihack_path = os.path.join(base_path[:base_path.find('hihack')], 'hihack') sys.path.insert(0, os.path.join(hihack_path, 'dungeonsdata-neurips2022/experiment_code/hackrl')) MODELS = [ CDGPT5, HierarchicalLSTM, HierarchicalTransformerLSTM, TransformerLSTM, FlatTransformer ] MODELS_LOOKUP = {c.__name__: c for c in MODELS} def initialize_weights(flags, model): def _initialize_weights(layer): if hasattr(layer, "bias") and isinstance( layer.bias, torch.nn.parameter.Parameter ): layer.bias.data.fill_(0) if flags.initialisation == "orthogonal": if type(layer) in [torch.nn.Conv2d, torch.nn.Linear]: torch.nn.init.orthogonal_(layer.weight.data, gain=1.0) elif flags.initialisation == "xavier_uniform": if type(layer) in [torch.nn.Conv2d, torch.nn.Linear]: torch.nn.init.xavier_uniform_(layer.weight.data, gain=1.0) else: pass else: pass model.apply(_initialize_weights) def load_flags(load_path): out = torch.load(load_path) return omegaconf.OmegaConf.create(out['flags']) def create_model(flags, device, model_type=None): model_type = model_type if not model_type is None else flags.model try: model_cls = MODELS_LOOKUP[model_type] except KeyError: raise NotImplementedError("model=%s" % flags.model) from None action_space = ENVS[flags.env.name](savedir=None).actions model = model_cls(DUNGEON_SHAPE, action_space, flags, device) model.to(device=device) initialize_weights(flags, model) return model def load_pt_model_and_flags(load_path, device): out = torch.load(load_path, map_location=device) flags = omegaconf.OmegaConf.create(out['flags']) if flags.model == 'CleavedHierarchicalPolicy': assert len(out['submodule_flags']) > 0 submodule_flags = omegaconf.OmegaConf.create(out['submodule_flags']) high_level_model = create_model(submodule_flags, device, model_type=flags.high_level_model) low_level_model = create_model(flags, device, model_type=flags.low_level_model)
model = CleavedHierarchicalPolicy(flags, high_level_model, low_level_model)
1
2023-10-23 15:44:32+00:00
16k
nchen909/Pass-Tuning
main.py
[ { "identifier": "add_args", "path": "configs.py", "snippet": "def add_args(parser):\n parser.add_argument(\"--task\", type=str, required=True,\n choices=['summarize', 'translate', 'refine', 'generate', 'defect', 'clone'])# without complete\n parser.add_argument(\"--sub_task\", type=str, default='')\n parser.add_argument(\"--add_lang_ids\", action='store_true')\n # plbart unfinished\n parser.add_argument(\"--model_name\", default=\"roberta\",\n type=str, choices=['roberta', 'codebert', 'graphcodebert', 'bart', 'plbart', 't5', 'codet5','unixcoder'])\n parser.add_argument('--seed', type=int, default=1234,\n help=\"random seed for initialization\") # previous one 42\n parser.add_argument(\"--local_rank\", type=int, default=-1,\n help=\"For distributed training: local_rank\")\n parser.add_argument(\"--no_cuda\", action='store_true',\n help=\"Avoid using CUDA when available\")\n parser.add_argument('--huggingface_locals', type=str, default='data/huggingface_locals',\n help=\"directory to save huggingface models\")\n parser.add_argument(\"--cache_path\", type=str, default='cache_data')\n parser.add_argument(\"--res_dir\", type=str, default='results',\n help='directory to save fine-tuning results')\n parser.add_argument(\"--res_fn\", type=str, default='')\n parser.add_argument(\"--model_dir\", type=str, default='saved_models',\n help='directory to save fine-tuned models')\n parser.add_argument(\"--summary_dir\", type=str, default='tensorboard',\n help='directory to save tensorboard summary')\n parser.add_argument(\"--data_num\", type=int, default=-1,\n help='number of data instances to use, -1 for full data')\n parser.add_argument(\"--gpu\", type=int, default=0,\n help='index of the gpu to use in a cluster')\n parser.add_argument(\"--data_dir\", default='data', type=str)\n parser.add_argument(\"--output_dir\", default='outputs', type=str,\n help=\"The output directory where the model predictions and checkpoints will be written.\")\n parser.add_argument(\"--do_train\", action='store_true',\n help=\"Whether to run eval on the train set.\")\n parser.add_argument(\"--do_eval\", action='store_true',\n help=\"Whether to run eval on the dev set.\")\n parser.add_argument(\"--do_test\", action='store_true',\n help=\"Whether to run eval on the test set.\")\n parser.add_argument(\"--add_task_prefix\", action='store_true',\n help=\"Whether to add task prefix for t5 and codet5\")\n parser.add_argument(\"--save_last_checkpoints\", action='store_true')\n parser.add_argument(\"--always_save_model\", action='store_true')\n parser.add_argument(\"--do_eval_bleu\", action='store_true',\n help=\"Whether to evaluate bleu on dev set.\")\n parser.add_argument(\"--start_epoch\", default=0, type=int)\n parser.add_argument(\"--num_train_epochs\", default=100, type=int)\n parser.add_argument(\"--patience\", default=5, type=int)\n parser.add_argument('--gradient_accumulation_steps', type=int, default=1,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--lr\", default=5e-5, type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--beam_size\", default=10, type=int,\n help=\"beam size for beam search\")\n parser.add_argument(\"--weight_decay\", default=0.0, type=float,\n help=\"Weight deay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-8, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--warmup_steps\", default=100, type=int,\n help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\"--batch_size\", default=8, type=int,\n help=\"Batch size per GPU/CPU for training.\")\n parser.add_argument(\"--dev_batch_size\", default=32, type=int,\n help=\"Batch size per GPU/CPU for validating.\")\n parser.add_argument(\"--test_batch_size\", default=32, type=int,\n help=\"Batch size per GPU/CPU for testing.\")\n parser.add_argument(\"--attention_batch_size\", default=100, type=int,\n help=\"Batch size per GPU/CPU for computing attention.\")\n parser.add_argument(\"--is_clone_sample\", default=0, type=int,\n help=\"clone&defect data is large, 0 for not sample and 1 for sample\") \n # parser.add_argument('--layer_num', type=int, default=-1,\n # help=\"layer which attention is concerned, -1 for last layer, else for all 0-11 layers\")\n # parser.add_argument('--quantile_threshold', type=float, default=0.75,\n # help=\"threshold of quantile which we concern attention should be gt and distance should be lt\")\n # parser.add_argument('--frequent_type', default=1, type=int, choices=[0,1],\n # help=\"whether only use frequent_type\")\n # parser.add_argument('--upgraded_ast', default=1, type=int, choices=[0,1],\n # help=\"whether to use upgraded ast\")\n parser.add_argument('--few_shot', default=-1, type=int,\n help=\"use k shot, -1 for full data\")\n\n parser.add_argument(\"--prefix_tuning\", default=False, type=str,\n help=\"parameter-efficient prefix tuning, pass_tuning refers to GAT prefix,\\\n GCN refers to GCN prefix,prefix_tuning refers to MLP prefix\",\\\n choices=['pass_tuning','GCN' ,'prefix_tuning', 'False'])\n parser.add_argument(\"--adapter_tuning\", default=0, type=int,\n help=\"parameter-efficient adapter tuning, 0 for not tuning, 1 for tuning\")#only support codet5 currently\n parser.add_argument(\"--bitfit\", default=0, type=int,\n help=\"parameter-efficient bitfit, 0 for not tuning, 1 for tuning\")\n \n\n parser.add_argument(\"--old_prefix_dir\", default='old_data_prefix', type=str,\n help=\"directory to score graphmetadata.txt\")\n parser.add_argument(\"--prefix_dir\", default='data_prefix', type=str,\n help=\"directory to score graphmetadata.txt\")\n parser.add_argument(\"--prefix_token_level\", default='token', type=str,\n help=\"how to parse initial prefix code, choose 'token' or 'subtoken' level of ids/init_dist_weight\")\n parser.add_argument(\"--gat_token_num\", default=32, type=int,\n help=\"number of tokens to use for gat, must be divided with max_source_length in encoder2decoder with no remainder\")\n parser.add_argument(\"--fix_model_param\", default=1, type=int,\n help=\"when prefix_tuning, fix model param or not \")\n \n parser.add_argument(\"--knowledge_usage\", default='separate', type=str,\n help=\"for t5&bart, how knowledge prefix use: separate or concatenate\")\n parser.add_argument(\"--use_description\", default=0, type=int,\n help=\"use_description or not \")\n parser.add_argument(\"--concatenate_description\", default=0, type=int,\n help=\"concatenate_description or not \")\n parser.add_argument(\"--map_description\", default=0, type=int,\n help=\"map_description or not \")\n parser.add_argument(\"--prefix_dropout\", default=0.0, type=float,\n help=\"prefix_dropout.\")\n parser.add_argument(\"--retriever_mode\", default='retrieve', type=str,\n help=\"how to retrieve code piece to init GAT, choose from random or retrieve\",\n choices=['random', 'retrieve','old'])\n parser.add_argument(\"--qiangtamadeka\", default=0, type=int,\n help=\"qiangtamadeka or not \")\n parser.add_argument(\"--adjcency_mode\", default='sast', type=str,\n help=\"how code distance matrix input as GAT adjcency matrix\",choices=['fully-connected','sast'])\n #######################注意改成真随机!!!!!!\n args = parser.parse_args()\n return args" }, { "identifier": "set_dist", "path": "configs.py", "snippet": "def set_dist(args):\n # Setup CUDA, GPU & distributed training\n if args.local_rank == -1 or args.no_cuda:\n device = torch.device(\n \"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n args.n_gpu = torch.cuda.device_count()\n else:\n # Setup for distributed data parallel\n torch.cuda.set_device(args.local_rank)\n device = torch.device(\"cuda\", args.local_rank)\n torch.distributed.init_process_group(backend='nccl')\n args.n_gpu = 1\n cpu_count = multiprocessing.cpu_count()\n logger.warning(\"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, cpu count: %d\",\n args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), cpu_count)\n args.device = device\n args.cpu_count = cpu_count" }, { "identifier": "set_seed", "path": "configs.py", "snippet": "def set_seed(args):\n \"\"\"set random seed.\"\"\"\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.n_gpu > 0:\n torch.cuda.manual_seed_all(args.seed)" }, { "identifier": "set_hyperparas", "path": "configs.py", "snippet": "def set_hyperparas(args):\n if args.qiangtamadeka:\n args.few_shot = -1\n\n args.adam_epsilon = 1e-8\n args.beam_size = 10\n args.gradient_accumulation_steps = 1\n args.weight_decay = 0.0\n if args.model_name in ['t5', 'codet5']:\n lr=2e-5\n elif args.model_name in ['bart', 'plbart','unixcoder']:\n lr=2e-5# 5e-5\n elif args.model_name in ['graphcodebert']:\n lr=2e-5# 5e-5(in repo)#1e-4(in plbartpaper)\n if args.task == 'summarize':\n args.data_num = args.few_shot if args.few_shot > 0 else -1\n args.lr = lr if not args.prefix_tuning else 1e-4#2e-3\n args.max_source_length = 256\n args.max_target_length = 128\n elif args.task == 'translate':\n args.data_num = args.few_shot if args.few_shot > 0 else -1\n if args.model_name in ['t5', 'codet5'] and args.sub_task == 'java-cs':\n args.lr = lr if not args.prefix_tuning else 5e-4#0224#2e-3\n else:\n args.lr = lr if not args.prefix_tuning else 1e-4#0224#2e-3\n args.max_source_length = 320\n args.max_target_length = 256\n elif args.task == 'refine':\n args.data_num = args.few_shot if args.few_shot > 0 else -1\n args.lr = lr if not args.prefix_tuning else 1e-4#0224#2e-3\n if args.sub_task == 'small':\n args.max_source_length = 130\n args.max_target_length = 120\n else:\n args.max_source_length = 240\n args.max_target_length = 240\n elif args.task == 'generate':\n args.data_num = args.few_shot if args.few_shot > 0 else -1\n args.lr = lr if not args.prefix_tuning else 5e-4#0224#2e-3\n args.max_source_length = 320\n args.max_target_length = 150\n elif args.task == 'complete':\n args.data_num = args.few_shot if args.few_shot > 0 else -1\n args.lr = 1e-5 if not args.prefix_tuning else 1e-4#1e-3\n args.max_source_length = 256\n args.max_target_length = 256\n elif args.task == 'defect':\n args.data_num = args.few_shot * 2 if args.few_shot > 0 else -1\n args.lr = 8e-6 if not args.prefix_tuning else 5e-4#0224 #8e-6#8e-4\n args.max_source_length = 512\n args.max_target_length = 3 # as do not need to add lang ids\n elif args.task == 'clone':\n args.data_num = args.few_shot * 2 if args.few_shot > 0 else -1 \n args.lr = lr if not args.prefix_tuning else 1e-4\n args.max_source_length = 512#512#400\n args.max_target_length = 512#512#400\n\n if args.few_shot == -1:\n if args.task in ['clone']:\n args.num_train_epochs = 2 if not args.prefix_tuning else 1# if not torch.cuda.is_available() else 2*torch.cuda.device_count()//2\n #for clone BCB full data!!!\n if args.is_clone_sample:\n args.num_train_epochs = args.num_train_epochs * 10\n args.patience = args.num_train_epochs*1000#min( 10, args.num_train_epochs//5*5)\n elif args.task in ['defect']:\n args.num_train_epochs = 120 if not args.prefix_tuning else 120 #old40 #if not torch.cuda.is_available() else 10*torch.cuda.device_count()//2*2\n # if args.is_clone_sample:\n # args.num_train_epochs = args.num_train_epochs * 10\n args.patience = args.num_train_epochs*1000#min( 10, args.num_train_epochs//5*5)\n elif args.task in ['generate','translate','summarize']:\n args.num_train_epochs = 30 if not torch.cuda.is_available() else 50*torch.cuda.device_count()#60\n args.patience = min( 10, args.num_train_epochs//5*2)\n else:#refine\n args.num_train_epochs = 30 if not torch.cuda.is_available() else 30*torch.cuda.device_count()#60\n args.patience = min( 10, args.num_train_epochs//5*2)\n if args.model_name in ['t5', 'codet5']:\n args.batch_size = 16 if not torch.cuda.is_available() else 16 * torch.cuda.device_count()\n elif args.model_name in ['bart', 'plbart']:\n args.batch_size = 32 if not torch.cuda.is_available() else 32 * torch.cuda.device_count()\n else:\n args.batch_size = 32 if not torch.cuda.is_available() else 32 * torch.cuda.device_count()\n if args.task=='refine' or args.task=='generate':\n args.batch_size *= 2\n if args.task in ['clone']:#old\n if args.prefix_tuning:\n args.batch_size = args.batch_size // 2 #4\n else:\n args.batch_size = args.batch_size // 2\n if args.task in ['summarize','translate','generate']:#3090\n if args.prefix_tuning:\n args.batch_size = args.batch_size // 2 #4\n else:\n args.batch_size = args.batch_size // 2\n if args.task in ['translate'] and args.model_name in ['bart', 'plbart']:#3090\n if args.prefix_tuning:\n args.batch_size = args.batch_size // 2 #4\n # args.batch_size //= 4\n # args.batch_size = 2#####################################################\n if args.qiangtamadeka:\n args.batch_size = 8\n args.num_train_epochs = 10000\n\n # args.batch_size = 128 if args.model_name not in ['t5', 'codet5'] else 16\n args.warmup_steps = 1000\n args.dev_batch_size = args.batch_size * 1 if not torch.cuda.is_available() else args.batch_size//torch.cuda.device_count()*1\n args.test_batch_size = args.batch_size * 1 if not torch.cuda.is_available() else args.batch_size//torch.cuda.device_count()*1\n if args.task in ['refine','generate'] and args.model_name in ['bart', 'plbart']:#3090\n if args.prefix_tuning:\n args.dev_batch_size = args.dev_batch_size // 2 #4\n args.test_batch_size = args.test_batch_size // 2\n else:\n args.dev_batch_size = args.dev_batch_size // 2\n args.test_batch_size = args.test_batch_size // 2\n\n elif args.few_shot < 128: #16,32,64\n args.num_train_epochs = 64\n # args.lr =5e-8\n args.batch_size = 2\n args.dev_batch_size = args.batch_size\n args.test_batch_size = args.batch_size\n elif args.few_shot < 512: #128,256\n args.num_train_epochs = 48\n args.batch_size = 4 if args.model_name not in ['t5', 'codet5'] else 4\n args.dev_batch_size = args.batch_size\n args.test_batch_size = args.batch_size\n elif args.few_shot < 2048: #512,1024\n args.num_train_epochs = 32\n args.batch_size = 8 if args.model_name not in ['t5', 'codet5'] else 4\n args.dev_batch_size = args.batch_size\n args.test_batch_size = args.batch_size" }, { "identifier": "bulid_or_load_gen_model", "path": "models.py", "snippet": "def bulid_or_load_gen_model(args):\n # checkpoint = MODEL_CHECKPOINTS[args.model_name]\n checkpoint = os.path.join(args.huggingface_locals, MODEL_LOCALS[args.model_name])\n if args.prefix_tuning:\n config_class, model_class, tokenizer_class = MODEL_CLASSES_PLG_PREFIX[args.model_name]\n elif args.adapter_tuning:\n config_class, model_class, tokenizer_class = MODEL_CLASSES_PLG_ADAPTER[args.model_name]\n elif args.bitfit:\n config_class, model_class, tokenizer_class = MODEL_CLASSES_PLG_BITFIT[args.model_name]\n else:\n config_class, model_class, tokenizer_class = MODEL_CLASSES_PLG[args.model_name]\n \n config = config_class.from_pretrained(checkpoint)\n tokenizer = tokenizer_class.from_pretrained(checkpoint)\n print(config.model_type)\n if args.model_name in ['roberta', 'codebert', 'graphcodebert']:\n encoder = model_class.from_pretrained(checkpoint, output_attentions=True)\n decoder_layer = nn.TransformerDecoderLayer(\n d_model=config.hidden_size, nhead=config.num_attention_heads)\n decoder = nn.TransformerDecoder(decoder_layer, num_layers=6)\n model = Seq2Seq(encoder=encoder, decoder=decoder, tokenizer=tokenizer, args=args,\n config=config, beam_size=args.beam_size, max_length=args.max_target_length,\n sos_id=tokenizer.cls_token_id, eos_id=tokenizer.sep_token_id)\n elif args.model_name in ['unixcoder']:\n # import!!!you must set is_decoder as True for generation in unixcoder!!!\n config.is_decoder = True\n encoder = model_class.from_pretrained(checkpoint, config=config)\n if args.task in ['complete']:\n if args.sub_task == \"python\":\n eos_ids = [tokenizer.sep_token_id]\n else:\n eos_ids = [tokenizer.convert_tokens_to_ids('Ġ;'), tokenizer.convert_tokens_to_ids('Ġ}'), tokenizer.convert_tokens_to_ids('Ġ{')]\n model=Seq2Seq4UniXcoder_completion(encoder=encoder,decoder=encoder,config=config, tokenizer=tokenizer, args=args,\n beam_size=args.beam_size,max_length=args.max_target_length,\n sos_id=tokenizer.cls_token_id,eos_id=eos_ids)\n elif args.task in ['generate']:\n model = Seq2Seq4UniXcoder_generation(encoder=encoder,decoder=encoder,config=config, tokenizer=tokenizer, args=args,\n beam_size=args.beam_size,max_length=args.max_target_length,\n sos_id=tokenizer.convert_tokens_to_ids([\"<mask0>\"])[0],eos_id=tokenizer.sep_token_id)\n elif args.task in ['summarize','translate','refine']:\n model = Seq2Seq4UniXcoder_e2d(encoder=encoder,decoder=encoder,config=config, tokenizer=tokenizer, args=args,\n beam_size=args.beam_size,max_length=args.max_target_length,\n sos_id=tokenizer.convert_tokens_to_ids([\"<mask0>\"])[0],eos_id=tokenizer.sep_token_id)\n \n elif args.model_name in ['t5', 'codet5','bart','plbart']:\n if hasattr(model_class,'from_pretrained'):\n model = model_class.from_pretrained(checkpoint, output_attentions=True)#, args=args, tokenizer=tokenizer\n else:# a wrapper model class\n args.pretrained_model_name_or_path= checkpoint\n model = model_class(args=args)\n if args.prefix_tuning:\n if hasattr(model,'code_prefix'):\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model.code_prefix.gat_layer), args.model_name)\n elif hasattr(model,'knowledge_trans'):\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model.knowledge_trans), args.model_name)\n elif hasattr(model.pretrain_model,'adapter'):\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model.pretrain_model.adapter), args.model_name)\n else:\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model), args.model_name)\n # if hasattr(model,'pretrain_model'):\n # model = model.pretrain_model\n # # print(hasattr(model,'parameters'))\n elif args.adapter_tuning or args.bitfit:\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model), args.model_name)\n else:\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model), args.model_name)\n\n return config, model, tokenizer" }, { "identifier": "bulid_or_load_cls_model", "path": "models.py", "snippet": "def bulid_or_load_cls_model(args):\n # checkpoint = MODEL_CHECKPOINTS[args.model_name]\n checkpoint = os.path.join(args.huggingface_locals, MODEL_LOCALS[args.model_name])\n if args.prefix_tuning:\n # config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_name]\n config_class, model_class, tokenizer_class = MODEL_CLASSES_PLG_PREFIX[args.model_name]\n elif args.adapter_tuning:\n config_class, model_class, tokenizer_class = MODEL_CLASSES_PLG_ADAPTER[args.model_name]\n elif args.bitfit:\n config_class, model_class, tokenizer_class = MODEL_CLASSES_PLG_BITFIT[args.model_name]\n else:\n config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_name]\n config = config_class.from_pretrained(checkpoint)\n tokenizer = tokenizer_class.from_pretrained(checkpoint)\n # if args.model_name in ['unixcoder']:\n # model = model_class.from_pretrained(checkpoint, output_attentions=True)\n # model = Model4UniXcoder(model,config,tokenizer,args)\n if args.model_name not in ['t5', 'codet5','bart','plbart']:\n model = model_class.from_pretrained(checkpoint, output_attentions=True)\n else:\n if hasattr(model_class,'from_pretrained'):\n model = model_class.from_pretrained(checkpoint, output_attentions=True)\n else:# a wrapper model class\n args.pretrained_model_name_or_path= checkpoint\n model = model_class(args=args)\n # if not args.adapter_tuning and not args.bitfit:\n if args.task == 'defect':\n model = DefectModel(model, config, tokenizer, args)\n elif args.task == 'clone':\n # model.resize_token_embeddings(32000)\n model = CloneModel(model, config, tokenizer, args)\n\n if args.prefix_tuning:\n if hasattr(model,'code_prefix'):\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model.code_prefix.gat_layer), args.model_name)\n elif hasattr(model,'knowledge_trans'):\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model.knowledge_trans), args.model_name)\n elif hasattr(model,'pretrain_model') and hasattr(model.pretrain_model,'adapter'):\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model.pretrain_model.adapter), args.model_name)\n else:\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model), args.model_name)\n else:\n logger.info(\"Finish loading model [%s] parameters from %s\", get_model_size(\n model), args.model_name)\n\n return config, model, tokenizer" }, { "identifier": "get_filenames", "path": "utils.py", "snippet": "def get_filenames(data_root, task, sub_task, split=''):\n if task == 'generate':\n data_dir = '{}/{}'.format(data_root, task)\n train_fn = '{}/train.json'.format(data_dir)\n dev_fn = '{}/dev.json'.format(data_dir)\n test_fn = '{}/test.json'.format(data_dir)\n prefix_fn = '{}/prefix.json'.format(data_dir)\n elif task == 'summarize':\n data_dir = '{}/{}/{}'.format(data_root, task, sub_task)\n train_fn = '{}/train.jsonl'.format(data_dir)\n dev_fn = '{}/valid.jsonl'.format(data_dir)\n test_fn = '{}/test.jsonl'.format(data_dir)\n prefix_fn = '{}/prefix.jsonl'.format(data_dir)\n elif task == 'refine':\n data_dir = '{}/{}/{}'.format(data_root, task, sub_task)\n train_fn = '{}/train.buggy-fixed.buggy,{}/train.buggy-fixed.fixed'.format(\n data_dir, data_dir)\n dev_fn = '{}/valid.buggy-fixed.buggy,{}/valid.buggy-fixed.fixed'.format(\n data_dir, data_dir)\n test_fn = '{}/test.buggy-fixed.buggy,{}/test.buggy-fixed.fixed'.format(\n data_dir, data_dir)\n prefix_fn = '{}/prefix.java'.format(\n data_dir)\n elif task == 'translate':\n data_dir = '{}/{}'.format(data_root, task)\n if sub_task == 'cs-java':\n train_fn = '{}/train.java-cs.txt.cs,{}/train.java-cs.txt.java'.format(\n data_dir, data_dir)\n dev_fn = '{}/valid.java-cs.txt.cs,{}/valid.java-cs.txt.java'.format(\n data_dir, data_dir)\n test_fn = '{}/test.java-cs.txt.cs,{}/test.java-cs.txt.java'.format(\n data_dir, data_dir)\n prefix_fn = '{}/prefix.txt.java'.format(\n data_dir)\n else:\n train_fn = '{}/train.java-cs.txt.java,{}/train.java-cs.txt.cs'.format(\n data_dir, data_dir)\n dev_fn = '{}/valid.java-cs.txt.java,{}/valid.java-cs.txt.cs'.format(\n data_dir, data_dir)\n test_fn = '{}/test.java-cs.txt.java,{}/test.java-cs.txt.cs'.format(\n data_dir, data_dir)\n prefix_fn = '{}/prefix.txt.cs'.format(\n data_dir)\n elif task == 'clone':\n data_dir = '{}/{}'.format(data_root, task)\n train_fn = '{}/train.txt'.format(data_dir)\n dev_fn = '{}/valid.txt'.format(data_dir)\n test_fn = '{}/test.txt'.format(data_dir)\n prefix_fn = '{}/prefix.txt'.format(data_dir)\n elif task == 'defect':\n data_dir = '{}/{}'.format(data_root, task)\n train_fn = '{}/train.jsonl'.format(data_dir)\n dev_fn = '{}/valid.jsonl'.format(data_dir)\n test_fn = '{}/test.jsonl'.format(data_dir)\n prefix_fn = '{}/prefix.jsonl'.format(data_dir)\n if split == 'train':\n return train_fn\n elif split == 'dev':\n return dev_fn\n elif split == 'test':\n return test_fn\n elif split == 'prefix':\n return prefix_fn\n else:\n return train_fn, dev_fn, test_fn" }, { "identifier": "get_elapse_time", "path": "utils.py", "snippet": "def get_elapse_time(t0):\n elapse_time = time.time() - t0\n if elapse_time > 3600:\n hour = int(elapse_time // 3600)\n minute = int((elapse_time % 3600) // 60)\n return \"{}h{}m\".format(hour, minute)\n else:\n minute = int((elapse_time % 3600) // 60)\n return \"{}m\".format(minute)" }, { "identifier": "load_and_cache_gen_data", "path": "utils.py", "snippet": "def load_and_cache_gen_data(args, filename, pool, tokenizer, split_tag, only_src=False, is_sample=False):\n # cache the data into args.cache_path except it is sampled\n # only_src: control whether to return only source ids for bleu evaluating (dev/test)\n # return: examples (Example object), data (TensorDataset)\n data_tag = '_all' if args.data_num == -1 else '_%d' % args.data_num\n cache_fn = '{}/{}.pt'.format(args.cache_path,\n split_tag + ('_src' if only_src else '') + data_tag)\n\n examples = read_examples(filename, -1, args.task)\n \n # if is_sample and is_attention:\n # if args.few_shot <= len(examples):\n # examples = random.sample(examples, min(3000, len(examples)) if args.few_shot == -1 else args.few_shot)\n # else:\n # # for CodeTrans dataset, dev&test example len = 500, may smaller than few-shot case\n # # we compensate some examples from train set to fill examples to args.few_shot\n # examples_train = read_examples(args.train_filename, -1, args.task)\n # examples += random.sample(examples_train, args.few_shot - len(examples))\n # assert len(examples) == args.few_shot\n \n # args.warmup_steps = len(examples) / 100\n if split_tag!='test' and is_sample or args.few_shot != -1 :\n if args.few_shot <= len(examples):\n sample_num = min(5000, len(examples))\n # if args.task=='generate':#evalnum_before2000\n # sample_num = min(1500, len(examples)//2)\n # elif args.task=='refine':#evalnum_before5000\n # sample_num = min(1500, len(examples)//4)\n if split_tag=='train':\n examples = random.sample(examples, sample_num if args.few_shot == -1 else args.few_shot)\n else:\n examples = random.sample(examples, sample_num if args.few_shot == -1 else args.few_shot)\n else:\n # for CodeTrans dataset, dev&test example len = 500, may smaller than few-shot case\n # we compensate some examples from train set to fill examples to args.few_shot\n examples_train = read_examples(args.train_filename, -1, args.task)\n examples += random.sample(examples_train, args.few_shot - len(examples))\n assert len(examples) == args.few_shot\n args.warmup_steps = len(examples) / 100\n if split_tag == 'train':\n calc_stats(examples, tokenizer, is_tokenize=True)\n else:\n calc_stats(examples)\n if os.path.exists(cache_fn) and not is_sample and args.few_shot == -1:\n logger.info(\"Load cache data from %s\", cache_fn)\n data = torch.load(cache_fn)\n else:\n if is_sample:\n logger.info(\n \"Sample %d data for computing bleu/attention from %s\", len(examples),filename)\n elif args.data_num == -1:\n logger.info(\"Create cache data into %s\", cache_fn)\n tuple_examples = [(example, idx, tokenizer, args, split_tag)\n for idx, example in enumerate(examples)]\n f_=partial(convert_examples_to_features,args)\n features = pool.map(f_, tqdm(\n tuple_examples, total=len(tuple_examples),desc=\"Convert examples to features\"))\n all_source_ids = torch.tensor(\n [f.source_ids for f in features], dtype=torch.long)\n if split_tag == 'test' or only_src:\n data = TensorDataset(all_source_ids)\n else:\n all_target_ids = torch.tensor(\n [f.target_ids for f in features], dtype=torch.long)\n data = TensorDataset(all_source_ids, all_target_ids)\n if args.local_rank in [-1, 0] and not is_sample and args.few_shot == -1:\n torch.save(data, cache_fn)\n return examples, data" }, { "identifier": "load_and_cache_defect_data", "path": "utils.py", "snippet": "def load_and_cache_defect_data(args, filename, pool, tokenizer, split_tag, is_sample=False):\n cache_fn = os.path.join(args.cache_path, split_tag)\n examples = read_examples(filename, -1, args.task)\n if is_sample:\n sample_num = min(5000, len(examples))\n examples = random.sample(examples, sample_num)\n elif split_tag!='test' and args.few_shot != -1:\n examples_True = [e for e in examples if e.target == 1]\n examples_False = [e for e in examples if e.target == 0]\n examples_True = random.sample(examples_True,args.few_shot)\n examples_False = random.sample(examples_False,args.few_shot)\n examples = examples_True + examples_False\n calc_stats(examples, tokenizer, is_tokenize=True)\n if os.path.exists(cache_fn) and args.few_shot == -1:\n logger.info(\"Load cache data from %s\", cache_fn)\n data = torch.load(cache_fn)\n else:\n if split_tag!='test' and is_sample:\n logger.info(\"Sample min(5000, len(examples)) of data from %s\", filename)\n elif args.data_num == -1:\n logger.info(\"Create cache data into %s\", cache_fn)\n tuple_examples = [(example, idx, tokenizer, args) for idx, example in enumerate(examples)]\n f_=partial(convert_defect_examples_to_features,args)\n features = pool.map(f_, tqdm(tuple_examples, total=len(tuple_examples),desc=\"Convert examples to features\"))\n # if args.sub_task == \"POJ\":\n # train_dataset = TextDataset_POJ104(features, args)\n # return train_dataset, train_dataset\n if args.model_name in ['unixcoder']:\n train_dataset = TextDataset_BCB(features, args)\n return examples, train_dataset\n # features = [convert_clone_examples_to_features(x) for x in tuple_examples]\n all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)\n all_labels = torch.tensor([f.label for f in features], dtype=torch.long)\n data = TensorDataset(all_source_ids, all_labels)\n\n if args.local_rank in [-1, 0] and args.data_num == -1 and args.few_shot == -1:\n torch.save(data, cache_fn)\n return examples, data" }, { "identifier": "load_and_cache_clone_data", "path": "utils.py", "snippet": "def load_and_cache_clone_data(args, filename, pool, tokenizer, split_tag, is_sample=False):\n\n cache_fn = '{}/{}.pt'.format(args.cache_path, split_tag +\n '_all' if args.data_num == -1 else '_%d' % args.data_num)\n examples = read_examples(filename, -1, args.task)\n if is_sample or args.is_clone_sample:\n examples = random.sample(examples, int(len(examples) * 0.1))\n if split_tag!='test' and args.few_shot!=-1:\n examples_True = [e for e in examples if e.label == 1]\n examples_False = [e for e in examples if e.label == 0]\n examples_True = random.sample(examples_True,args.few_shot)\n examples_False = random.sample(examples_False,args.few_shot)\n examples = examples_True + examples_False\n\n if split_tag!='test' and args.few_shot != -1:\n calc_stats(examples, tokenizer, is_tokenize=True)\n\n if os.path.exists(cache_fn) and args.few_shot == -1:\n logger.info(\"Load cache data from %s\", cache_fn)\n data = torch.load(cache_fn)\n else:\n if split_tag!='test' and args.few_shot == -1:\n logger.info(\"Sample 10 percent of data from %s\", filename)\n elif args.data_num == -1:\n logger.info(\"Create cache data into %s\", cache_fn)\n tuple_examples = [(example, idx, tokenizer, args)\n for idx, example in enumerate(examples)]\n f_=partial(convert_clone_examples_to_features,args)\n features = pool.map(f_, tqdm(\n tuple_examples, total=len(tuple_examples),desc=\"Convert examples to features\"))\n \n # if args.sub_task == \"POJ\":\n # train_dataset = TextDataset_POJ104(features, args)\n # return train_dataset, train_dataset\n # features = [convert_clone_examples_to_features(x) for x in tuple_examples]\n if args.model_name in ['unixcoder']:\n train_dataset = TextDataset_BCB(features, args)\n return examples, train_dataset\n all_source_ids = torch.tensor(\n [f.source_ids for f in features], dtype=torch.long)\n all_labels = torch.tensor(\n [f.label for f in features], dtype=torch.long)\n data = TensorDataset(all_source_ids, all_labels)\n\n if args.local_rank in [-1, 0] and args.data_num == -1 and args.few_shot == -1:\n torch.save(data, cache_fn)\n return examples, data" }, { "identifier": "get_lang_by_task", "path": "utils.py", "snippet": "def get_lang_by_task(task, sub_task):\n if task in ['summarize','complete']:\n return sub_task\n elif task in ['refine','generate','clone']:\n return 'java'\n elif task == 'translate':\n if sub_task == 'cs-java':\n return 'c_sharp'\n else:\n return 'java'\n elif task == 'defect':\n return 'c'\n else:\n raise 'java'" }, { "identifier": "smooth_bleu", "path": "evaluator/smooth_bleu.py", "snippet": "def normalize(s):\ndef count_ngrams(words, n=4):\ndef cook_refs(refs, n=4):\ndef cook_test(test, item, n=4):\ndef score_cooked(allcomps, n=4, ground=0, smooth=1):\ndef bleu(refs, candidate, ground=0, smooth=1):\ndef splitPuncts(line):\ndef computeMaps(predictions, goldfile):\ndef bleuFromMaps(m1, m2):" }, { "identifier": "calc_code_bleu", "path": "evaluator/CodeBLEU/calc_code_bleu.py", "snippet": "def get_codebleu(refs, hyp, lang, params='0.25,0.25,0.25,0.25',args=None):\r\n def make_weights(reference_tokens, key_word_list):\r" }, { "identifier": "_bleu", "path": "evaluator/bleu.py", "snippet": "def _bleu(ref_file, trans_file, subword_option=None):\n max_order = 4\n smooth = True\n ref_files = [ref_file]\n reference_text = []\n for reference_filename in ref_files:\n with open(reference_filename,encoding='utf-8') as fh:\n reference_text.append(fh.readlines())\n per_segment_references = []\n for references in zip(*reference_text):\n reference_list = []\n for reference in references:\n reference_list.append(reference.strip().split())\n per_segment_references.append(reference_list)\n translations = []\n with open(trans_file,encoding='utf-8') as fh:\n for line in fh:\n translations.append(line.strip().split())\n bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)\n return round(100 * bleu_score,2)" }, { "identifier": "retrieve2file", "path": "utils.py", "snippet": "def retrieve2file(args, examples, data, k=1):\n print('\\n Retrieving...')\n if args.retriever_mode == 'random':\n retrieve_id_list = np.random.choice(len(examples), k, replace=False)\n elif args.retriever_mode == 'retrieve':\n retrieve_id_list = get_retrieve_id_list(args, examples, data, k)\n filename = get_filenames(\n args.old_prefix_dir, args.task, args.sub_task, 'prefix')\n with open(filename,'w',encoding=\"utf-8\") as f:\n for id_ in retrieve_id_list:\n print(\"retrieve case:\\n\")\n print(examples[id_].source,'\\n')\n print('Writing retrieve file...')\n f.write(examples[id_].raw_line+'\\n')" } ]
import logging import torch import argparse import time import multiprocessing import os import numpy as np import math import sys from torch.utils.tensorboard import SummaryWriter from torch.utils.data import DataLoader, SequentialSampler, RandomSampler from torch.utils.data.distributed import DistributedSampler from transformers import AdamW, get_linear_schedule_with_warmup from tqdm import tqdm from configs import add_args, set_dist, set_seed, set_hyperparas from models import bulid_or_load_gen_model,bulid_or_load_cls_model from utils import get_filenames, get_elapse_time, load_and_cache_gen_data, load_and_cache_defect_data,load_and_cache_clone_data, get_lang_by_task from evaluator import smooth_bleu from evaluator.CodeBLEU import calc_code_bleu from evaluator.bleu import _bleu from sklearn.metrics import recall_score, precision_score, f1_score from tree_sitter import Language, Parser from utils import retrieve2file
13,043
elif args.model_name in ['unixcoder']: preds = model(source_ids=source_ids) # preds shape: [batch_size, self.beam_size, max_target_len] top_preds = [pred[0].cpu().numpy() for pred in preds]# top_preds shape: batch_size * [max_target_len] else: preds = model.generate(source_ids, attention_mask=source_mask, use_cache=True, num_beams=args.beam_size, early_stopping=args.task == 'summarize', max_length=args.max_target_length) top_preds = list(preds.cpu().numpy()) pred_ids.extend(top_preds) # pdb.set_trace() pred_nls = [tokenizer.decode( id, skip_special_tokens=True, clean_up_tokenization_spaces=False) for id in pred_ids] # unixcoder in fewshot will generate '\n' in small batch, and gradually disappear if args.model_name in ['unixcoder']: pred_nls = [id.replace('\n',' ').replace(" "," ").replace(" "," ").replace("\t"," ") for id in pred_nls] output_fn = os.path.join(args.res_dir, "test_{}.output".format(criteria)) gold_fn = os.path.join(args.res_dir, "test_{}.gold".format(criteria)) src_fn = os.path.join(args.res_dir, "test_{}.src".format(criteria)) if args.task in ['defect']: target_dict = {0: 'false', 1: 'true'} golds = [target_dict[ex.target] for ex in eval_examples] eval_acc = np.mean([int(p == g) for p, g in zip(pred_nls, golds)]) result = {'em': eval_acc, 'bleu': 0, 'codebleu': 0} with open(output_fn, 'w',encoding='utf-8') as f, open(gold_fn, 'w',encoding='utf-8') as f1, open(src_fn, 'w',encoding='utf-8') as f2: for pred_nl, gold in zip(pred_nls, eval_examples): f.write(pred_nl.strip() + '\n') f1.write(target_dict[gold.target] + '\n') f2.write(gold.source.strip() + '\n') logger.info("Save the predictions into %s", output_fn) else: dev_accs, predictions = [], [] with open(output_fn, 'w',encoding='utf-8') as f, open(gold_fn, 'w',encoding='utf-8') as f1, open(src_fn, 'w',encoding='utf-8') as f2: for pred_nl, gold in zip(pred_nls, eval_examples): dev_accs.append(pred_nl.strip() == gold.target.strip()) if args.task in ['summarize']: predictions.append(str(gold.idx) + '\t' + pred_nl) f.write(str(gold.idx) + '\t' + pred_nl.strip().encode('utf8').decode() + '\n') f1.write(str(gold.idx) + '\t' + gold.target.strip().encode('utf8').decode() + '\n') f2.write(str(gold.idx) + '\t' + gold.source.strip().encode('utf8').decode() + '\n') else: f.write(pred_nl.strip().encode('utf8').decode() + '\n') f1.write(gold.target.strip().encode( 'utf8').decode() + '\n') f2.write(gold.source.strip().encode( 'utf8').decode() + '\n') if args.task in ['summarize']: (goldMap, predictionMap) = smooth_bleu.computeMaps(predictions, gold_fn) bleu = round(smooth_bleu.bleuFromMaps( goldMap, predictionMap)[0], 2) else: bleu = round(_bleu(gold_fn, output_fn), 2) if split_tag == 'test' and args.task in ['refine', 'translate', 'generate' , 'clone']: args.lang = get_lang_by_task(args.task, args.sub_task) codebleu = calc_code_bleu.get_codebleu( gold_fn, output_fn, args.lang,args=args) # except: # bleu = 0.0 # codebleu = 0.0 em = np.mean(dev_accs) * 100 result = {'em': em, 'bleu': bleu} if not args.task == 'summarize' and split_tag == 'test': result['codebleu'] = codebleu * 100 logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(round(result[key], 4))) return result def main(): parser = argparse.ArgumentParser() args = add_args(parser) t0 = time.time() set_dist(args) set_seed(args) set_hyperparas(args) logger.info(args) logger.info("************* args: *************") logger.info("args.model_name: "+str(args.model_name)) logger.info("args.few_shot: "+str(args.few_shot)) logger.info("args.task: "+str(args.task)) logger.info("args.sub_task: "+str(args.sub_task)) logger.info("*********************************") if args.task in ['summarize', 'translate', 'refine', 'generate','complete']: config, model, tokenizer = bulid_or_load_gen_model(args) elif args.task in ['defect','clone']: config, model, tokenizer = bulid_or_load_cls_model(args) model.to(args.device) if args.n_gpu > 1: model = torch.nn.DataParallel(model) pool = multiprocessing.Pool(args.cpu_count) args.train_filename, args.dev_filename, args.test_filename = get_filenames( args.data_dir, args.task, args.sub_task) fa = open(os.path.join(args.output_dir, 'summary.log'), 'a+',encoding='utf-8') if args.do_train: if args.local_rank in [-1, 0] and args.data_num == -1: summary_fn = '{}/{}'.format(args.summary_dir, '/'.join(args.output_dir.split('/')[1:])) tb_writer = SummaryWriter(summary_fn) # Prepare training data loader if args.task in ['summarize', 'translate', 'refine', 'generate','complete']: train_examples, train_data = load_and_cache_gen_data( args, args.train_filename, pool, tokenizer, 'train') elif args.task in ['defect']: train_examples, train_data = load_and_cache_defect_data(args, args.train_filename, pool, tokenizer, 'train') elif args.task in ['clone']:
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) def evaluate_cls(args, model, eval_examples, eval_data, write_to_pred=False): eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.dev_batch_size) # Eval! if write_to_pred == False: logger.info("***** Running evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Num batches = %d", len(eval_dataloader)) logger.info(" Batch size = %d", args.dev_batch_size) eval_loss = 0.0 nb_eval_steps = 0 model.eval() logits = [] labels = [] for batch in tqdm(eval_dataloader, total=len(eval_dataloader), desc="Evaluating"): if args.sub_task == "POJ": inputs = batch[0].to(args.device) p_inputs = batch[1].to(args.device) n_inputs = batch[2].to(args.device) label = batch[3].to(args.device) else: inputs = batch[0].to(args.device) # inputs shape: [batch_size, args.max_source_length+args.max_target_length] label = batch[1].to(args.device) # label shape: [batch_size] with torch.no_grad(): if args.sub_task == "POJ": lm_loss, logit = model(inputs,p_inputs,n_inputs,label) else: lm_loss, logit = model(inputs, label) # logit shape:[batch_size] eval_loss += lm_loss.mean().item() logits.append(logit.cpu().numpy())#logit shape: [batch_size] labels.append(label.cpu().numpy())#label shape: [batch_size] nb_eval_steps += 1 logits = np.concatenate(logits, 0) labels = np.concatenate(labels, 0) if args.model_name == "unixcoder" and args.task == "clone": preds = logits > 0.5 else: preds = logits[:, 1] > 0.5 if args.task == 'defect': eval_acc = np.mean(labels == preds) eval_loss = eval_loss / nb_eval_steps perplexity = torch.tensor(eval_loss) result = { "eval_loss": float(perplexity), "eval_acc": round(eval_acc, 4) * 100, } elif args.task == 'clone': recall = recall_score(labels, preds) precision = precision_score(labels, preds) f1 = f1_score(labels, preds) result = { "eval_recall": float(recall) * 100, "eval_precision": float(precision) * 100, "eval_f1": float(f1) * 100, "eval_threshold": 0.5, } if write_to_pred == False: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(round(result[key], 4))) if write_to_pred: with open(os.path.join(args.output_dir, "predictions.txt"), 'w') as f: for example, pred in zip(eval_examples, preds): if args.task == 'defect': if args.model_name == "unixcoder": if pred: f.write(str(example.idx) + '\t1\n') else: f.write(str(example.idx) + '\t0\n') else: if pred: f.write(str(example.idx) + '\t1\n') else: f.write(str(example.idx) + '\t0\n') elif args.task == 'clone': if pred: f.write(example.url1 + '\t' + example.url2 + '\t' + '1' + '\n') else: f.write(example.url1 + '\t' + example.url2 + '\t' + '0' + '\n') return result def eval_ppl_epoch(args, eval_data, eval_examples, model, tokenizer): eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.dev_batch_size, num_workers=4, pin_memory=True) # Start evaluating model logger.info(" " + "***** Running ppl evaluation *****") logger.info(" Num examples = %d", len(eval_examples)) logger.info(" Batch size = %d", args.dev_batch_size) model.eval() eval_loss, batch_num = 0, 0 for batch in tqdm(eval_dataloader, total=len(eval_dataloader), desc="Eval ppl"): batch = tuple(t.to(args.device) for t in batch) source_ids, target_ids = batch source_mask = source_ids.ne(tokenizer.pad_token_id) target_mask = target_ids.ne(tokenizer.pad_token_id) with torch.no_grad(): if args.model_name in ['roberta', 'codebert', 'graphcodebert']: # loss, _, _, attention = model(source_ids=source_ids, source_mask=source_mask, # target_ids=target_ids, target_mask=target_mask) loss, _, _, _ = model(source_ids=source_ids, source_mask=source_mask, target_ids=target_ids, target_mask=target_mask) if args.n_gpu > 1: loss = loss.mean() eval_loss += loss.item() batch_num += 1 elif args.model_name in ['unixcoder']: _,loss,num = model(source_ids=source_ids,target_ids=target_ids) if args.n_gpu > 1: loss = loss.mean() eval_loss += loss.sum().item() batch_num += num.sum().item() else: outputs = model(input_ids=source_ids, attention_mask=source_mask, labels=target_ids, decoder_attention_mask=target_mask) if isinstance(outputs,dict): loss=outputs['loss'] else: loss = outputs.loss if args.n_gpu > 1: loss = loss.mean() eval_loss += loss.item() batch_num += 1 eval_loss = eval_loss / batch_num eval_ppl = round(np.exp(eval_loss), 5) return eval_ppl def eval_bleu_epoch(args, eval_data, eval_examples, model, tokenizer, split_tag, criteria): logger.info( " ***** Running bleu evaluation on {} data*****".format(split_tag)) logger.info(" Num examples = %d", len(eval_examples)) if split_tag == 'dev': batch_size = args.dev_batch_size elif split_tag == 'test': batch_size = args.test_batch_size else: batch_size = args.batch_size logger.info(" Batch size = %d", batch_size) eval_sampler = SequentialSampler(eval_data) if args.data_num == -1: eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=batch_size, num_workers=4, pin_memory=True) else: eval_dataloader = DataLoader( eval_data, sampler=eval_sampler, batch_size=batch_size) model.eval() pred_ids = [] bleu, codebleu = 0.0, 0.0 for batch in tqdm(eval_dataloader, total=len(eval_dataloader), desc="Eval bleu for {} set".format(split_tag)): source_ids = batch[0].to(args.device) #shape: (batch_size, max_source_len) source_mask = source_ids.ne(tokenizer.pad_token_id) if hasattr(model, 'module'): model = model.module # extract the model from the DataParallel wrapper with torch.no_grad(): if args.model_name in ['roberta', 'codebert', 'graphcodebert']: preds, _ = model(source_ids=source_ids, source_mask=source_mask) top_preds = [pred[0].cpu().numpy() for pred in preds] elif args.model_name in ['unixcoder']: preds = model(source_ids=source_ids) # preds shape: [batch_size, self.beam_size, max_target_len] top_preds = [pred[0].cpu().numpy() for pred in preds]# top_preds shape: batch_size * [max_target_len] else: preds = model.generate(source_ids, attention_mask=source_mask, use_cache=True, num_beams=args.beam_size, early_stopping=args.task == 'summarize', max_length=args.max_target_length) top_preds = list(preds.cpu().numpy()) pred_ids.extend(top_preds) # pdb.set_trace() pred_nls = [tokenizer.decode( id, skip_special_tokens=True, clean_up_tokenization_spaces=False) for id in pred_ids] # unixcoder in fewshot will generate '\n' in small batch, and gradually disappear if args.model_name in ['unixcoder']: pred_nls = [id.replace('\n',' ').replace(" "," ").replace(" "," ").replace("\t"," ") for id in pred_nls] output_fn = os.path.join(args.res_dir, "test_{}.output".format(criteria)) gold_fn = os.path.join(args.res_dir, "test_{}.gold".format(criteria)) src_fn = os.path.join(args.res_dir, "test_{}.src".format(criteria)) if args.task in ['defect']: target_dict = {0: 'false', 1: 'true'} golds = [target_dict[ex.target] for ex in eval_examples] eval_acc = np.mean([int(p == g) for p, g in zip(pred_nls, golds)]) result = {'em': eval_acc, 'bleu': 0, 'codebleu': 0} with open(output_fn, 'w',encoding='utf-8') as f, open(gold_fn, 'w',encoding='utf-8') as f1, open(src_fn, 'w',encoding='utf-8') as f2: for pred_nl, gold in zip(pred_nls, eval_examples): f.write(pred_nl.strip() + '\n') f1.write(target_dict[gold.target] + '\n') f2.write(gold.source.strip() + '\n') logger.info("Save the predictions into %s", output_fn) else: dev_accs, predictions = [], [] with open(output_fn, 'w',encoding='utf-8') as f, open(gold_fn, 'w',encoding='utf-8') as f1, open(src_fn, 'w',encoding='utf-8') as f2: for pred_nl, gold in zip(pred_nls, eval_examples): dev_accs.append(pred_nl.strip() == gold.target.strip()) if args.task in ['summarize']: predictions.append(str(gold.idx) + '\t' + pred_nl) f.write(str(gold.idx) + '\t' + pred_nl.strip().encode('utf8').decode() + '\n') f1.write(str(gold.idx) + '\t' + gold.target.strip().encode('utf8').decode() + '\n') f2.write(str(gold.idx) + '\t' + gold.source.strip().encode('utf8').decode() + '\n') else: f.write(pred_nl.strip().encode('utf8').decode() + '\n') f1.write(gold.target.strip().encode( 'utf8').decode() + '\n') f2.write(gold.source.strip().encode( 'utf8').decode() + '\n') if args.task in ['summarize']: (goldMap, predictionMap) = smooth_bleu.computeMaps(predictions, gold_fn) bleu = round(smooth_bleu.bleuFromMaps( goldMap, predictionMap)[0], 2) else: bleu = round(_bleu(gold_fn, output_fn), 2) if split_tag == 'test' and args.task in ['refine', 'translate', 'generate' , 'clone']: args.lang = get_lang_by_task(args.task, args.sub_task) codebleu = calc_code_bleu.get_codebleu( gold_fn, output_fn, args.lang,args=args) # except: # bleu = 0.0 # codebleu = 0.0 em = np.mean(dev_accs) * 100 result = {'em': em, 'bleu': bleu} if not args.task == 'summarize' and split_tag == 'test': result['codebleu'] = codebleu * 100 logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info(" %s = %s", key, str(round(result[key], 4))) return result def main(): parser = argparse.ArgumentParser() args = add_args(parser) t0 = time.time() set_dist(args) set_seed(args) set_hyperparas(args) logger.info(args) logger.info("************* args: *************") logger.info("args.model_name: "+str(args.model_name)) logger.info("args.few_shot: "+str(args.few_shot)) logger.info("args.task: "+str(args.task)) logger.info("args.sub_task: "+str(args.sub_task)) logger.info("*********************************") if args.task in ['summarize', 'translate', 'refine', 'generate','complete']: config, model, tokenizer = bulid_or_load_gen_model(args) elif args.task in ['defect','clone']: config, model, tokenizer = bulid_or_load_cls_model(args) model.to(args.device) if args.n_gpu > 1: model = torch.nn.DataParallel(model) pool = multiprocessing.Pool(args.cpu_count) args.train_filename, args.dev_filename, args.test_filename = get_filenames( args.data_dir, args.task, args.sub_task) fa = open(os.path.join(args.output_dir, 'summary.log'), 'a+',encoding='utf-8') if args.do_train: if args.local_rank in [-1, 0] and args.data_num == -1: summary_fn = '{}/{}'.format(args.summary_dir, '/'.join(args.output_dir.split('/')[1:])) tb_writer = SummaryWriter(summary_fn) # Prepare training data loader if args.task in ['summarize', 'translate', 'refine', 'generate','complete']: train_examples, train_data = load_and_cache_gen_data( args, args.train_filename, pool, tokenizer, 'train') elif args.task in ['defect']: train_examples, train_data = load_and_cache_defect_data(args, args.train_filename, pool, tokenizer, 'train') elif args.task in ['clone']:
train_examples, train_data = load_and_cache_clone_data(args, args.train_filename, pool, tokenizer, 'train')
10
2023-10-20 09:24:44+00:00
16k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES = getattr(settings, 'DJANGO_LEDGER_USE_CLOSING_ENTRIES', False)\nDJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT = getattr(settings,\n 'DJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT', 3600)\nDJANGO_LEDGER_LOGIN_URL = getattr(settings, 'DJANGO_LEDGER_LOGIN_URL', settings.LOGIN_URL)\nDJANGO_LEDGER_BILL_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_INVOICE_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_FORM_INPUT_CLASSES = getattr(settings, 'DJANGO_LEDGER_FORM_INPUT_CLASSES', 'input')\nDJANGO_LEDGER_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_CURRENCY_SYMBOL', '$')\nDJANGO_LEDGER_SPACED_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_SPACED_CURRENCY_SYMBOL', False)\nDJANGO_LEDGER_SHOW_FEEDBACK_BUTTON = getattr(settings, 'DJANGO_LEDGER_SHOW_FEEDBACK_BUTTON', False)\nDJANGO_LEDGER_FEEDBACK_EMAIL_LIST = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_EMAIL_LIST', [])\nDJANGO_LEDGER_FEEDBACK_FROM_EMAIL = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_FROM_EMAIL', None)\nDJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME = getattr(settings, 'DJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME', False)\nDJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE', Decimal('0.02'))\nDJANGO_LEDGER_TRANSACTION_CORRECTION = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_CORRECTION', Decimal('0.01'))\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE', True)\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', 5)\nDJANGO_LEDGER_ACCOUNT_CODE_USE_PREFIX = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', True)\nDJANGO_LEDGER_JE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_PREFIX', 'JE')\nDJANGO_LEDGER_PO_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PO_NUMBER_PREFIX', 'PO')\nDJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX', 'E')\nDJANGO_LEDGER_INVOICE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_PREFIX', 'I')\nDJANGO_LEDGER_BILL_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_PREFIX', 'B')\nDJANGO_LEDGER_VENDOR_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_VENDOR_NUMBER_PREFIX', 'V')\nDJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX', 'C')\nDJANGO_LEDGER_EXPENSE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_EXPENSE_NUMBER_PREFIX', 'IEX')\nDJANGO_LEDGER_INVENTORY_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVENTORY_NUMBER_PREFIX', 'INV')\nDJANGO_LEDGER_PRODUCT_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PRODUCT_NUMBER_PREFIX', 'IPR')\nDJANGO_LEDGER_DOCUMENT_NUMBER_PADDING = getattr(settings, 'DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING', 10)\nDJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX', '000')\nDJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.bill.BillModelAbstract')\nDJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.invoice.InvoiceModelAbstract')\nDJANGO_LEDGER_DEFAULT_COA = getattr(settings, 'DJANGO_LEDGER_DEFAULT_COA', None)\nDJANGO_LEDGER_FINANCIAL_ANALYSIS = {\n 'ratios': {\n 'current_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'quick_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'debt_to_equity': {\n 'good_incremental': False,\n 'ranges': {\n 'healthy': 0,\n 'watch': .25,\n 'warning': .5,\n 'critical': 1\n }\n },\n 'return_on_equity': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .07,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'return_on_assets': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'net_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'gross_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n }\n}" }, { "identifier": "InvalidDateInputError", "path": "django_ledger/exceptions.py", "snippet": "class InvalidDateInputError(ValidationError):\n pass" }, { "identifier": "TransactionNotInBalanceError", "path": "django_ledger/exceptions.py", "snippet": "class TransactionNotInBalanceError(ValidationError):\n pass" }, { "identifier": "roles", "path": "django_ledger/io/roles.py", "snippet": "DEBIT = 'debit'\nCREDIT = 'credit'\nASSET_CA_CASH = 'asset_ca_cash'\nASSET_CA_MKT_SECURITIES = 'asset_ca_mkt_sec'\nASSET_CA_RECEIVABLES = 'asset_ca_recv'\nASSET_CA_INVENTORY = 'asset_ca_inv'\nASSET_CA_UNCOLLECTIBLES = 'asset_ca_uncoll'\nASSET_CA_PREPAID = 'asset_ca_prepaid'\nASSET_CA_OTHER = 'asset_ca_other'\nASSET_LTI_NOTES_RECEIVABLE = 'asset_lti_notes'\nASSET_LTI_LAND = 'asset_lti_land'\nASSET_LTI_SECURITIES = 'asset_lti_sec'\nASSET_PPE_BUILDINGS = 'asset_ppe_build'\nASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION = 'asset_ppe_build_accum_depr'\nASSET_PPE_EQUIPMENT = 'asset_ppe_equip'\nASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION = 'asset_ppe_equip_accum_depr'\nASSET_PPE_PLANT = 'asset_ppe_plant'\nASSET_PPE_PLANT_ACCUM_DEPRECIATION = 'asset_ppe_plant_depr'\nASSET_INTANGIBLE_ASSETS = 'asset_ia'\nASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION = 'asset_ia_accum_amort'\nASSET_ADJUSTMENTS = 'asset_adjustment'\nLIABILITY_CL_ACC_PAYABLE = 'lia_cl_acc_payable'\nLIABILITY_CL_WAGES_PAYABLE = 'lia_cl_wages_payable'\nLIABILITY_CL_TAXES_PAYABLE = 'lia_cl_taxes_payable'\nLIABILITY_CL_INTEREST_PAYABLE = 'lia_cl_int_payable'\nLIABILITY_CL_ST_NOTES_PAYABLE = 'lia_cl_st_notes_payable'\nLIABILITY_CL_LTD_MATURITIES = 'lia_cl_ltd_mat'\nLIABILITY_CL_DEFERRED_REVENUE = 'lia_cl_def_rev'\nLIABILITY_CL_OTHER = 'lia_cl_other'\nLIABILITY_LTL_NOTES_PAYABLE = 'lia_ltl_notes'\nLIABILITY_LTL_BONDS_PAYABLE = 'lia_ltl_bonds'\nLIABILITY_LTL_MORTGAGE_PAYABLE = 'lia_ltl_mortgage'\nEQUITY_CAPITAL = 'eq_capital'\nEQUITY_ADJUSTMENT = 'eq_adjustment'\nEQUITY_COMMON_STOCK = 'eq_stock_common'\nEQUITY_PREFERRED_STOCK = 'eq_stock_preferred'\nEQUITY_DIVIDENDS = 'eq_dividends'\nINCOME_OPERATIONAL = 'in_operational'\nINCOME_PASSIVE = 'in_passive'\nINCOME_CAPITAL_GAIN_LOSS = 'in_gain_loss'\nINCOME_INTEREST = 'in_interest'\nINCOME_OTHER = 'in_other'\nCOGS = 'cogs_regular'\nEXPENSE_OPERATIONAL = 'ex_regular'\nEXPENSE_CAPITAL = 'ex_capital'\nEXPENSE_DEPRECIATION = 'ex_depreciation'\nEXPENSE_AMORTIZATION = 'ex_amortization'\nEXPENSE_TAXES = 'ex_taxes'\nEXPENSE_INTEREST_ST = 'ex_interest_st'\nEXPENSE_INTEREST_LT = 'ex_interest'\nEXPENSE_OTHER = 'ex_other'\nROOT_COA = 'root_coa'\nROOT_ASSETS = 'root_assets'\nROOT_LIABILITIES = 'root_liabilities'\nROOT_CAPITAL = 'root_capital'\nROOT_INCOME = 'root_income'\nROOT_COGS = 'root_cogs'\nROOT_EXPENSES = 'root_expenses'\nROOT_GROUP = [\n ROOT_COA,\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_LEVEL_2 = [\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_META = {\n ROOT_COA: {\n 'code': '00000',\n 'title': 'CoA Root Node',\n 'balance_type': DEBIT\n },\n ROOT_ASSETS: {\n 'code': '01000',\n 'title': 'Asset Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_LIABILITIES: {\n 'code': '02000',\n 'title': 'Liability Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_CAPITAL: {\n 'code': '03000',\n 'title': 'Capital Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_INCOME: {\n 'code': '04000',\n 'title': 'Income Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_COGS: {\n 'code': '05000',\n 'title': 'COGS Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_EXPENSES: {\n 'code': '06000',\n 'title': 'Expense Accounts Root Node',\n 'balance_type': DEBIT\n },\n}\nGROUP_QUICK_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES\n]\nGROUP_CURRENT_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES,\n ASSET_CA_INVENTORY,\n ASSET_CA_RECEIVABLES,\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_NON_CURRENT_ASSETS = [\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_LAND,\n ASSET_LTI_SECURITIES,\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION,\n ASSET_INTANGIBLE_ASSETS,\n ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION,\n ASSET_ADJUSTMENTS\n]\nGROUP_ASSETS = GROUP_CURRENT_ASSETS + GROUP_NON_CURRENT_ASSETS\nGROUP_CURRENT_LIABILITIES = [\n LIABILITY_CL_ACC_PAYABLE,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_OTHER,\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE\n]\nGROUP_LT_LIABILITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n]\nGROUP_LIABILITIES = GROUP_CURRENT_LIABILITIES + GROUP_LT_LIABILITIES\nGROUP_CAPITAL = [\n EQUITY_CAPITAL,\n EQUITY_COMMON_STOCK,\n EQUITY_PREFERRED_STOCK,\n EQUITY_DIVIDENDS,\n EQUITY_ADJUSTMENT\n]\nGROUP_INCOME = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_COGS = [\n COGS\n]\nGROUP_EXPENSES = [\n EXPENSE_OPERATIONAL,\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_NET_PROFIT = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER,\n COGS\n]\nGROUP_GROSS_PROFIT = [\n INCOME_OPERATIONAL,\n COGS\n]\nGROUP_NET_SALES = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE\n]\nGROUP_PPE_ACCUM_DEPRECIATION = [\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION\n]\nGROUP_EXPENSE_DEP_AND_AMT = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_EARNINGS = GROUP_INCOME + GROUP_COGS + GROUP_EXPENSES\nGROUP_EQUITY = GROUP_CAPITAL + GROUP_EARNINGS\nGROUP_LIABILITIES_EQUITY = GROUP_LIABILITIES + GROUP_EQUITY\nGROUP_INVOICE = [ASSET_CA_CASH, ASSET_CA_RECEIVABLES, LIABILITY_CL_DEFERRED_REVENUE]\nGROUP_BILL = [ASSET_CA_CASH, ASSET_CA_PREPAID, LIABILITY_CL_ACC_PAYABLE]\nGROUP_IC_OPERATING_REVENUES = [INCOME_OPERATIONAL]\nGROUP_IC_OPERATING_COGS = [COGS]\nGROUP_IC_OPERATING_EXPENSES = [EXPENSE_OPERATIONAL]\nGROUP_IC_OTHER_REVENUES = [\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_IC_OTHER_EXPENSES = [\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_CFS_NET_INCOME = GROUP_EARNINGS\nGROUP_CFS_OP_DEPRECIATION_AMORTIZATION = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_CFS_OP_INVESTMENT_GAINS = [\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_OP_ACCOUNTS_RECEIVABLE = [\n ASSET_CA_RECEIVABLES\n]\nGROUP_CFS_OP_INVENTORY = [\n ASSET_CA_INVENTORY\n]\nGROUP_CFS_OP_ACCOUNTS_PAYABLE = [\n LIABILITY_CL_ACC_PAYABLE\n]\nGROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT = [\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT = [\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_OTHER,\n]\nGROUP_CFS_OPERATING = list(chain.from_iterable([\n GROUP_CFS_NET_INCOME,\n GROUP_CFS_OP_DEPRECIATION_AMORTIZATION,\n GROUP_CFS_OP_INVESTMENT_GAINS,\n GROUP_CFS_OP_ACCOUNTS_RECEIVABLE,\n GROUP_CFS_OP_INVENTORY,\n GROUP_CFS_OP_ACCOUNTS_PAYABLE,\n GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT,\n GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT\n]))\nGROUP_CFS_FIN_ISSUING_EQUITY = [EQUITY_CAPITAL, EQUITY_COMMON_STOCK, EQUITY_PREFERRED_STOCK]\nGROUP_CFS_FIN_DIVIDENDS = [EQUITY_DIVIDENDS]\nGROUP_CFS_FIN_ST_DEBT_PAYMENTS = [\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_ACC_PAYABLE,\n EXPENSE_INTEREST_ST\n]\nGROUP_CFS_FIN_LT_DEBT_PAYMENTS = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n EXPENSE_INTEREST_LT\n]\nGROUP_CFS_FINANCING = GROUP_CFS_FIN_ISSUING_EQUITY + GROUP_CFS_FIN_DIVIDENDS\nGROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE = [\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_PLANT,\n ASSET_PPE_EQUIPMENT,\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_INV_LTD_OF_PPE = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n]\nGROUP_CFS_INVESTING_PPE = GROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE + GROUP_CFS_INV_LTD_OF_PPE\nGROUP_CFS_INV_PURCHASE_OF_SECURITIES = [\n ASSET_CA_MKT_SECURITIES,\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_SECURITIES,\n INCOME_INTEREST,\n INCOME_PASSIVE,\n]\nGROUP_CFS_INV_LTD_OF_SECURITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE\n]\nGROUP_CFS_INVESTING_SECURITIES = GROUP_CFS_INV_PURCHASE_OF_SECURITIES + GROUP_CFS_INV_LTD_OF_SECURITIES\nGROUP_CFS_INVESTING = GROUP_CFS_INVESTING_PPE + GROUP_CFS_INVESTING_SECURITIES\nGROUP_CFS_INVESTING_AND_FINANCING = GROUP_CFS_INVESTING + GROUP_CFS_FINANCING\nBS_ASSET_ROLE = 'assets'\nBS_LIABILITIES_ROLE = 'liabilities'\nBS_EQUITY_ROLE = 'equity'\nACCOUNT_ROLE_CHOICES = [\n (BS_ASSET_ROLE.capitalize(), (\n # CURRENT ASSETS ----\n (ASSET_CA_CASH, _('Current Asset')),\n (ASSET_CA_MKT_SECURITIES, _('Marketable Securities')),\n (ASSET_CA_RECEIVABLES, _('Receivables')),\n (ASSET_CA_INVENTORY, _('Inventory')),\n (ASSET_CA_UNCOLLECTIBLES, _('Uncollectibles')),\n (ASSET_CA_PREPAID, _('Prepaid')),\n (ASSET_CA_OTHER, _('Other Liquid Assets')),\n\n # LONG TERM INVESTMENTS ---\n (ASSET_LTI_NOTES_RECEIVABLE, _('Notes Receivable')),\n (ASSET_LTI_LAND, _('Land')),\n (ASSET_LTI_SECURITIES, _('Securities')),\n\n # PPE ...\n (ASSET_PPE_BUILDINGS, _('Buildings')),\n (ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION, _('Buildings - Accum. Depreciation')),\n (ASSET_PPE_PLANT, _('Plant')),\n (ASSET_PPE_PLANT_ACCUM_DEPRECIATION, _('Plant - Accum. Depreciation')),\n (ASSET_PPE_EQUIPMENT, _('Equipment')),\n (ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION, _('Equipment - Accum. Depreciation')),\n\n # Other Assets ...\n (ASSET_INTANGIBLE_ASSETS, _('Intangible Assets')),\n (ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION, _('Intangible Assets - Accum. Amortization')),\n (ASSET_ADJUSTMENTS, _('Other Assets')),\n )),\n (BS_LIABILITIES_ROLE.capitalize(), (\n\n # CURRENT LIABILITIES ---\n (LIABILITY_CL_ACC_PAYABLE, _('Accounts Payable')),\n (LIABILITY_CL_WAGES_PAYABLE, _('Wages Payable')),\n (LIABILITY_CL_INTEREST_PAYABLE, _('Interest Payable')),\n (LIABILITY_CL_TAXES_PAYABLE, _('Taxes Payable')),\n (LIABILITY_CL_ST_NOTES_PAYABLE, _('Short Term Notes Payable')),\n (LIABILITY_CL_LTD_MATURITIES, _('Current Maturities of Long Tern Debt')),\n (LIABILITY_CL_DEFERRED_REVENUE, _('Deferred Revenue')),\n (LIABILITY_CL_OTHER, _('Other Liabilities')),\n\n # LONG TERM LIABILITIES ----\n (LIABILITY_LTL_NOTES_PAYABLE, _('Long Term Notes Payable')),\n (LIABILITY_LTL_BONDS_PAYABLE, _('Bonds Payable')),\n (LIABILITY_LTL_MORTGAGE_PAYABLE, _('Mortgage Payable')),\n )),\n (BS_EQUITY_ROLE.capitalize(), (\n\n # EQUITY ---\n (EQUITY_CAPITAL, _('Capital')),\n (EQUITY_COMMON_STOCK, _('Common Stock')),\n (EQUITY_PREFERRED_STOCK, _('Preferred Stock')),\n (EQUITY_ADJUSTMENT, _('Other Equity Adjustments')),\n (EQUITY_DIVIDENDS, _('Dividends & Distributions to Shareholders')),\n\n # INCOME ---\n (INCOME_OPERATIONAL, _('Operational Income')),\n (INCOME_PASSIVE, _('Investing/Passive Income')),\n (INCOME_INTEREST, _('Interest Income')),\n (INCOME_CAPITAL_GAIN_LOSS, _('Capital Gain/Loss Income')),\n (INCOME_OTHER, _('Other Income')),\n\n # COGS ----\n (COGS, _('Cost of Goods Sold')),\n\n # EXPENSES ----\n (EXPENSE_OPERATIONAL, _('Regular Expense')),\n (EXPENSE_INTEREST_ST, _('Interest Expense - Short Term Debt')),\n (EXPENSE_INTEREST_LT, _('Interest Expense - Long Term Debt')),\n (EXPENSE_TAXES, _('Tax Expense')),\n (EXPENSE_CAPITAL, _('Capital Expense')),\n (EXPENSE_DEPRECIATION, _('Depreciation Expense')),\n (EXPENSE_AMORTIZATION, _('Amortization Expense')),\n (EXPENSE_OTHER, _('Other Expense')),\n )),\n ('Root', (\n (ROOT_COA, 'CoA Root Account'),\n (ROOT_ASSETS, 'Assets Root Account'),\n (ROOT_LIABILITIES, 'Liabilities Root Account'),\n (ROOT_CAPITAL, 'Capital Root Account'),\n (ROOT_INCOME, 'Income Root Account'),\n (ROOT_COGS, 'COGS Root Account'),\n (ROOT_EXPENSES, 'Expenses Root Account'),\n ))\n]\nACCOUNT_CHOICES_NO_ROOT = [c for c in ACCOUNT_ROLE_CHOICES if c[0] != 'Root']\nROLES_ORDER_ASSETS = [a[0] for a in ACCOUNT_ROLE_CHOICES[0][1]]\nROLES_ORDER_LIABILITIES = [a[0] for a in ACCOUNT_ROLE_CHOICES[1][1]]\nROLES_ORDER_CAPITAL = [a[0] for a in ACCOUNT_ROLE_CHOICES[2][1]]\nROLES_ORDER_ALL = list(chain.from_iterable([ROLES_ORDER_ASSETS, ROLES_ORDER_LIABILITIES, ROLES_ORDER_CAPITAL]))\nACCOUNT_LIST_ROLE_ORDER = list(r[0] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT]))\nACCOUNT_LIST_ROLE_VERBOSE = {r[0]: r[1] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT])}\nROLE_TUPLES = sum([[(r[0].lower(), s[0]) for s in r[1]] for r in ACCOUNT_ROLE_CHOICES], list())\nROLE_DICT = dict([(t[0].lower(), [r[0] for r in t[1]]) for t in ACCOUNT_ROLE_CHOICES])\nVALID_ROLES = [r[1] for r in ROLE_TUPLES]\nBS_ROLES = dict((r[1], r[0]) for r in ROLE_TUPLES)\nBS_BUCKETS = {\n '0': 'Root',\n '1': 'Asset',\n '2': 'Liability',\n '3': 'Capital',\n '4': 'Income',\n '5': 'COGS',\n '6': 'Expenses'\n}\nBS_BUCKETS_ORDER = [v for _, v in BS_BUCKETS.items() if v != 'Root']\nROLES_VARS = locals().keys()\nROLES_DIRECTORY = dict()\nROLES_CATEGORIES = ['ASSET', 'LIABILITY', 'EQUITY', 'INCOME', 'COGS', 'EXPENSE']\nROLES_GROUPS = [g for g in ROLES_VARS if g.split('_')[0] == 'GROUP']\nGROUPS_DIRECTORY = dict()\ndef validate_roles(roles: Union[str, List[str]], raise_exception: bool = True) -> Set[str]:" }, { "identifier": "RoleContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class RoleContextManager:\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.DIGEST = io_data\n self.DIGEST['role_account'] = None\n self.DIGEST['role_balance'] = None\n\n self.ACCOUNTS = io_data['accounts']\n\n self.ROLES_ACCOUNTS = dict()\n self.ROLES_BALANCES = dict()\n self.ROLES_BALANCE_SHEET = dict()\n\n if self.BY_PERIOD:\n self.ROLES_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_period'] = None\n if self.BY_UNIT:\n self.ROLES_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_unit'] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_roles()\n self.DIGEST['role_account'] = self.ROLES_ACCOUNTS\n self.DIGEST['role_balance'] = self.ROLES_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['role_balance_by_period'] = self.ROLES_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['role_balance_by_unit'] = self.ROLES_BALANCES_BY_UNIT\n\n return self.DIGEST\n\n def process_roles(self):\n\n for c, l in roles_module.ROLES_DIRECTORY.items():\n for r in l:\n acc_list = list(acc for acc in self.ACCOUNTS if acc['role'] == getattr(roles_module, r))\n\n self.ROLES_ACCOUNTS[r] = acc_list\n self.ROLES_BALANCES[r] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ROLES_BALANCES_BY_PERIOD[key][r] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ROLES_BALANCES_BY_UNIT[key][r] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "GroupContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class GroupContextManager:\n GROUP_ACCOUNTS_KEY = 'group_account'\n GROUP_BALANCE_KEY = 'group_balance'\n GROUP_BALANCE_BY_UNIT_KEY = 'group_balance_by_unit'\n GROUP_BALANCE_BY_PERIOD_KEY = 'group_balance_by_period'\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.IO_DIGEST = io_data\n\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = None\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = None\n\n self.DIGEST_ACCOUNTS = io_data['accounts']\n\n self.GROUPS_ACCOUNTS = dict()\n self.GROUPS_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.GROUPS_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n if self.BY_UNIT:\n self.GROUPS_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.GROUPS_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n def digest(self):\n\n self.process_groups()\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = self.GROUPS_ACCOUNTS\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = self.GROUPS_BALANCES\n\n if self.BY_PERIOD:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = self.GROUPS_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = self.GROUPS_BALANCES_BY_UNIT\n return self.IO_DIGEST\n\n def get_accounts_generator(self, mod, g):\n return (acc for acc in self.DIGEST_ACCOUNTS if acc['role'] in getattr(mod, g))\n\n def process_groups(self):\n for g in roles_module.ROLES_GROUPS:\n acc_list = list(self.get_accounts_generator(roles_module, g))\n self.GROUPS_ACCOUNTS[g] = acc_list\n self.GROUPS_BALANCES[g] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.GROUPS_BALANCES_BY_PERIOD[key][g] = sum(\n acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.GROUPS_BALANCES_BY_UNIT[key][g] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0]\n )" }, { "identifier": "ActivityContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class ActivityContextManager:\n\n def __init__(self,\n io_data: dict,\n by_unit: bool = False,\n by_period: bool = False):\n\n self.DIGEST = io_data\n self.DIGEST['activity_account'] = None\n self.DIGEST['activity_balance'] = None\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.ACCOUNTS = io_data['accounts']\n self.ACTIVITY_ACCOUNTS = dict()\n self.ACTIVITY_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.ACTIVITY_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_period'] = None\n if self.BY_UNIT:\n self.ACTIVITY_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_unit'] = None\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_activity()\n self.DIGEST['activity_account'] = self.ACTIVITY_ACCOUNTS\n self.DIGEST['activity_balance'] = self.ACTIVITY_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['activity_balance_by_period'] = self.ACTIVITY_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['activity_balance_by_unit'] = self.ACTIVITY_BALANCES_BY_PERIOD\n\n def get_accounts_generator(self, activity: str):\n return (acc for acc in self.ACCOUNTS if acc['activity'] == activity)\n\n def process_activity(self):\n JournalEntryModel = lazy_importer.get_journal_entry_model()\n for act in JournalEntryModel.VALID_ACTIVITIES:\n acc_list = list(self.get_accounts_generator(act))\n self.ACTIVITY_ACCOUNTS[act] = acc_list\n self.ACTIVITY_BALANCES[act] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ACTIVITY_BALANCES_BY_PERIOD[key][act] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ACTIVITY_BALANCES_BY_UNIT[key][act] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "BalanceSheetStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class BalanceSheetStatementContextManager:\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n gb_bs = {\n bsr: list(l) for bsr, l in groupby(\n chain.from_iterable(\n [\n self.DIGEST['group_account']['GROUP_ASSETS'],\n self.DIGEST['group_account']['GROUP_LIABILITIES'],\n self.DIGEST['group_account']['GROUP_CAPITAL'],\n ]\n ),\n key=lambda acc: acc['role_bs'])\n }\n\n bs_context = {\n bs_role: {\n 'total_balance': sum(a['balance'] for a in gb),\n 'is_block': True,\n 'roles': {\n r: {\n 'accounts': list(a)\n } for r, a in groupby(list(gb), key=lambda acc: acc['role'])\n }\n } for bs_role, gb in gb_bs.items()\n }\n\n for bs_role, bs_role_data in bs_context.items():\n for acc_role, role_data in bs_role_data['roles'].items():\n role_data['total_balance'] = sum(a['balance'] for a in role_data['accounts'])\n role_data['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc_role]\n\n bs_context['equity_balance'] = self.DIGEST['group_balance']['GROUP_EQUITY']\n bs_context['retained_earnings_balance'] = self.DIGEST['group_balance']['GROUP_EARNINGS']\n bs_context['liabilities_equity_balance'] = self.DIGEST['group_balance']['GROUP_LIABILITIES_EQUITY']\n\n self.DIGEST['balance_sheet'] = bs_context\n\n return self.DIGEST" }, { "identifier": "IncomeStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class IncomeStatementContextManager:\n\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n self.DIGEST['income_statement'] = {\n 'operating': {\n 'revenues': [\n acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_REVENUES\n ],\n 'cogs': [\n acc for acc in self.DIGEST['group_account']['GROUP_COGS'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_COGS\n ],\n 'expenses': [\n acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_EXPENSES\n ]\n },\n 'other': {\n 'revenues': [acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_REVENUES],\n 'expenses': [acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_EXPENSES],\n }\n }\n\n for activity, ic_section in self.DIGEST['income_statement'].items():\n for section, acc_list in ic_section.items():\n for acc in acc_list:\n acc['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc['role']]\n\n # OPERATING INCOME...\n self.DIGEST['income_statement']['operating']['gross_profit'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs']\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs'],\n self.DIGEST['income_statement']['operating']['expenses'],\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_revenue'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['revenues']\n )\n self.DIGEST['income_statement']['operating']['net_cogs'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['cogs']\n )\n self.DIGEST['income_statement']['operating']['net_operating_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['expenses']\n )\n\n # OTHER INCOME....\n self.DIGEST['income_statement']['other']['net_other_revenues'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['revenues']\n )\n self.DIGEST['income_statement']['other']['net_other_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['expenses']\n )\n self.DIGEST['income_statement']['other']['net_other_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['other']['revenues'],\n self.DIGEST['income_statement']['other']['expenses']\n ]\n ))\n\n # NET INCOME...\n self.DIGEST['income_statement']['net_income'] = self.DIGEST['income_statement']['operating'][\n 'net_operating_income']\n self.DIGEST['income_statement']['net_income'] += self.DIGEST['income_statement']['other'][\n 'net_other_income']\n return self.DIGEST" }, { "identifier": "CashFlowStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class CashFlowStatementContextManager:\n CFS_DIGEST_KEY = 'cash_flow_statement'\n\n # todo: implement by period and by unit...\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n self.IO_DIGEST = io_data\n self.CASH_ACCOUNTS = [a for a in self.IO_DIGEST['accounts'] if a['role'] == roles_module.ASSET_CA_CASH]\n self.JE_MODEL = lazy_loader.get_journal_entry_model()\n\n def check_io_digest(self):\n if GroupContextManager.GROUP_BALANCE_KEY not in self.IO_DIGEST:\n raise ValidationError(\n 'IO Digest must have groups for Cash Flow Statement'\n )\n\n def operating(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n operating_activities = dict()\n operating_activities['GROUP_CFS_NET_INCOME'] = {\n 'description': 'Net Income',\n 'balance': group_balances['GROUP_CFS_NET_INCOME']\n }\n operating_activities['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION'] = {\n 'description': 'Depreciation & Amortization of Assets',\n 'balance': -group_balances['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION']\n }\n operating_activities['GROUP_CFS_OP_INVESTMENT_GAINS'] = {\n 'description': 'Gain/Loss Sale of Assets',\n 'balance': group_balances['GROUP_CFS_OP_INVESTMENT_GAINS']\n }\n operating_activities['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE'] = {\n 'description': 'Accounts Receivable',\n 'balance': -group_balances['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE']\n }\n operating_activities['GROUP_CFS_OP_INVENTORY'] = {\n 'description': 'Inventories',\n 'balance': -group_balances['GROUP_CFS_OP_INVENTORY']\n }\n\n operating_activities['GROUP_CFS_OP_ACCOUNTS_PAYABLE'] = {\n 'description': 'Accounts Payable',\n 'balance': group_balances['GROUP_CFS_OP_ACCOUNTS_PAYABLE']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT'] = {\n 'description': 'Other Current Assets',\n 'balance': -group_balances['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT'] = {\n 'description': 'Other Current Liabilities',\n 'balance': group_balances['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT']\n }\n\n net_cash_by_op_activities = sum(i['balance'] for g, i in operating_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['operating'] = operating_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'] = dict(\n OPERATING=net_cash_by_op_activities\n )\n\n def financing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n financing_activities = dict()\n financing_activities['GROUP_CFS_FIN_ISSUING_EQUITY'] = {\n 'description': 'Common Stock, Preferred Stock and Capital Raised',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_EQUITY)\n }\n financing_activities['GROUP_CFS_FIN_DIVIDENDS'] = {\n 'description': 'Dividends Payed Out to Shareholders',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_DIVIDENDS)\n }\n financing_activities['GROUP_CFS_FIN_ST_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Short-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_STD)\n }\n financing_activities['GROUP_CFS_FIN_LT_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Long-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_LTD)\n }\n\n net_cash = sum(i['balance'] for g, i in financing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['financing'] = financing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['FINANCING'] = net_cash\n\n def investing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n investing_activities = dict()\n investing_activities['GROUP_CFS_INVESTING_SECURITIES'] = {\n 'description': 'Purchase, Maturity and Sales of Investments & Securities',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_SECURITIES)\n }\n investing_activities['GROUP_CFS_INVESTING_PPE'] = {\n 'description': 'Addition and Disposition of Property, Plant & Equipment',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_PPE)\n }\n\n net_cash = sum(i['balance'] for g, i in investing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['investing'] = investing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['INVESTING'] = net_cash\n\n def net_cash(self):\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash'] = sum([\n bal for act, bal in self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'].items()\n ])\n\n def digest(self):\n self.check_io_digest()\n self.operating()\n self.financing()\n self.investing()\n self.net_cash()\n return self.IO_DIGEST" }, { "identifier": "IODigestContextManager", "path": "django_ledger/io/io_digest.py", "snippet": "class IODigestContextManager:\n\n def __init__(self, io_data: defaultdict):\n self.IO_DATA: defaultdict = io_data\n self.IO_MODEL = self.IO_DATA['io_model']\n self.TXS_QS = self.IO_DATA['txs_qs']\n self.STRFTIME_FORMAT = '%B %d, %Y'\n\n def get_io_data(self) -> defaultdict:\n return self.IO_DATA\n\n def get_strftime_format(self):\n return self.STRFTIME_FORMAT\n\n def get_from_date(self, as_str: bool = False, fmt=None) -> Optional[date]:\n from_date = self.IO_DATA['from_date']\n if from_date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return from_date.strftime(fmt)\n return from_date\n\n def get_to_date(self, as_str: bool = False, fmt=None) -> date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return self.IO_DATA['to_date'].strftime(fmt)\n return self.IO_DATA['to_date']\n\n def is_entity_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_entity_model()\n )\n\n def is_ledger_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_ledger_model()\n )\n\n def is_unit_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_unit_model()\n )\n\n def is_by_unit(self) -> bool:\n return self.IO_DATA['by_unit']\n\n def is_by_period(self) -> bool:\n return self.IO_DATA['by_period']\n\n def is_by_activity(self) -> bool:\n return self.IO_DATA['by_activity']\n\n # Balance Sheet Data...\n def has_balance_sheet(self) -> bool:\n return 'balance_sheet' in self.IO_DATA\n\n def get_balance_sheet_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['balance_sheet']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have balance sheet information available.'\n )\n\n # Income Statement Data...\n def has_income_statement(self) -> bool:\n return 'income_statement' in self.IO_DATA\n\n def get_income_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['income_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have income statement information available.'\n )\n\n # Cash Flow Statement Data...\n def has_cash_flow_statement(self):\n return 'cash_flow_statement' in self.IO_DATA\n\n def get_cash_flow_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['cash_flow_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have cash flow statement information available.'\n )\n\n # CLOSING ENTRIES...\n\n def get_closing_entry_data(self):\n io_data = self.get_io_data()\n return io_data['accounts']" }, { "identifier": "FinancialRatioManager", "path": "django_ledger/io/ratios.py", "snippet": "class FinancialRatioManager:\n\n def __init__(self, io_data):\n self.DIGEST = io_data\n self.ACCOUNTS = io_data['accounts']\n self.RATIO_NA = RATIO_NA\n\n self.quick_assets = io_data['group_balance']['GROUP_QUICK_ASSETS']\n self.assets = io_data['group_balance']['GROUP_ASSETS']\n self.current_liabilities = io_data['group_balance']['GROUP_CURRENT_LIABILITIES']\n self.current_assets = io_data['group_balance']['GROUP_CURRENT_ASSETS']\n self.equity = io_data['group_balance']['GROUP_CAPITAL']\n self.liabilities = io_data['group_balance']['GROUP_LIABILITIES']\n self.net_income = io_data['group_balance']['GROUP_EARNINGS']\n self.net_sales = io_data['group_balance']['GROUP_NET_SALES']\n self.net_profit = io_data['group_balance']['GROUP_NET_PROFIT']\n self.gross_profit = io_data['group_balance']['GROUP_GROSS_PROFIT']\n self.RATIOS = dict()\n\n def digest(self):\n self.quick_ratio()\n self.current_ratio()\n self.debt_to_equity()\n self.return_on_equity()\n self.return_on_assets()\n self.net_profit_margin()\n self.gross_profit_margin()\n self.DIGEST['ratios'] = self.RATIOS\n return self.DIGEST\n\n # ------> SOLVENCY RATIOS <------\n def quick_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = self.RATIO_NA\n else:\n cr = self.quick_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['quick_ratio'] = cr\n\n def current_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = RATIO_NA\n else:\n cr = self.current_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['current_ratio'] = cr\n\n # ------> LEVERAGE RATIOS <------\n def debt_to_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.liabilities / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['debt_to_equity'] = cr\n\n # ------> PROFITABILITY RATIOS <------\n def return_on_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_equity'] = cr\n\n def return_on_assets(self, as_percent=False):\n if self.assets == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.assets\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_assets'] = cr\n\n def net_profit_margin(self, as_percent=False):\n if self.net_sales == 0:\n npm = RATIO_NA\n else:\n npm = self.net_profit / self.net_sales\n if as_percent:\n npm = npm * 100\n self.RATIOS['net_profit_margin'] = npm\n\n def gross_profit_margin(self, as_percent=False):\n if self.gross_profit == 0:\n gpm = RATIO_NA\n else:\n gpm = self.gross_profit / self.net_sales\n if as_percent:\n gpm = gpm * 100\n self.RATIOS['gross_profit_margin'] = gpm" }, { "identifier": "lazy_loader", "path": "django_ledger/models/utils.py", "snippet": "class LazyLoader:\n ENTITY_MODEL = None\n ENTITY_STATE_MODEL = None\n UNIT_MODEL = None\n ACCOUNT_MODEL = None\n BANK_ACCOUNT_MODEL = None\n LEDGER_MODEL = None\n TXS_MODEL = None\n JE_MODEL = None\n ITEM_MODEL = None\n ITEM_TRANSACTION_MODEL = None\n CUSTOMER_MODEL = None\n INVOICE_MODEL = None\n BILL_MODEL = None\n UOM_MODEL = None\n VENDOR_MODEL = None\n TRANSACTION_MODEL = None\n ENTITY_UNIT_MODEL = None\n PURCHASE_ORDER_MODEL = None\n ESTIMATE_MODEL = None\n CLOSING_ENTRY_MODEL = None\n CLOSING_ENTRY_TRANSACTION_MODEL = None\n ENTITY_DATA_GENERATOR = None\n BALANCE_SHEET_REPORT_CLASS = None\n INCOME_STATEMENT_REPORT_CLASS = None\n CASH_FLOW_STATEMENT_REPORT_CLASS = None\n def get_entity_model(self):\n def get_entity_state_model(self):\n def get_bank_account_model(self):\n def get_account_model(self):\n def get_txs_model(self):\n def get_purchase_order_model(self):\n def get_ledger_model(self):\n def get_unit_model(self):\n def get_journal_entry_model(self):\n def get_item_model(self):\n def get_item_transaction_model(self):\n def get_customer_model(self):\n def get_bill_model(self):\n def get_invoice_model(self):\n def get_uom_model(self):\n def get_vendor_model(self):\n def get_transaction_model(self):\n def get_entity_unit_model(self):\n def get_estimate_model(self):\n def get_entity_data_generator(self):\n def get_closing_entry_model(self):\n def get_closing_entry_transaction_model(self):\n def get_balance_sheet_report_class(self):\n def get_income_statement_report_class(self):\n def get_cash_flow_statement_report_class(self):" } ]
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
13,645
""" Django Ledger created by Miguel Sanda <[email protected]>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <[email protected]> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS
""" Django Ledger created by Miguel Sanda <[email protected]>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <[email protected]> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS
if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE:
0
2023-10-20 01:07:20+00:00
16k
hitz-zentroa/This-is-not-a-Dataset
run.py
[ { "identifier": "load_model", "path": "load_model.py", "snippet": "def load_model(\n inference: bool,\n model_weights_name_or_path: str,\n quantization: Optional[int] = None,\n use_lora: bool = False,\n lora_weights_name_or_path: Optional[str] = None,\n lora_target_modules: Optional[List[str]] = [\"all\"],\n lora_r: Optional[int] = 8,\n lora_alpha: Optional[int] = 16,\n lora_dropout: Optional[float] = 0.05,\n torch_dtype: Optional[str] = None,\n force_auto_device_map: bool = False,\n use_gradient_checkpointing: bool = False,\n trust_remote_code: bool = False,\n use_flash_attention: bool = False,\n use_better_transformer: bool = False,\n fsdp_training: bool = False,\n max_memory_MB: Optional[int] = None,\n) -> Tuple[PreTrainedModel, PreTrainedTokenizerBase]:\n \"\"\"\n Load any Decoder model for training.\n\n Args:\n inference (`bool`):\n Whether to load the model for inference or training. If set to `True`, the model will be loaded\n in evaluation mode. In this case, if use_lora is set to `True`, you must provide the path to the\n LoRA weights. Defaults to `False`.\n model_weights_name_or_path (`str`):\n The path to your local model weights and tokenizer or huggingface model name.\n The list of labels to add to the tokenizer. Defaults to `None`.\n quantization (`int`, optional):\n '4' or '8' for 4 bits or 8 bits quantization or None for 16/32bits training. Defaults to `None`.\n\n Requires bitsandbytes library: https://github.com/TimDettmers/bitsandbytes\n use_lora (`bool`, optional):\n Whether to use LORA. Defaults to False.\n\n See https://arxiv.org/pdf/2106.09685.pdf for more details.\n\n Requires huggingface PEFT library: https://github.com/huggingface/peft\n lora_weights_name_or_path (`Optional[str]`, optional):\n The name or path to the pre-trained LORA model weights. You can also provide\n a huggingface hub model name to load the weights from there. If not provided,\n the weights will be initialized randomly, this requires training the model.\n Defaults to `None`.\n lora_target_modules (`Optional[List[str]]`, optional):\n The list of modules to apply LORA to. If not provided, we will use PEFT\n default modules. Defaults to `None`.\n lora_r (`Optional[int]`, optional):\n Lora attention dimension. Defaults to `8`.\n lora_alpha (`Optional[int]`, optional):\n The alpha parameter for Lora scaling. Defaults to `16`.\n lora_dropout (`Optional[float]`, optional):\n The dropout probability for Lora layers. Defaults to 0.05.\n torch_dtype (`Optional[str]`, optional):\n Override the default `torch.dtype` and load the model under this dtype. If\n `auto` is passed, the dtype will be automatically derived from the model's\n weights. Defaults to `None`.\n force_auto_device_map (`bool`, optional):\n Whether to force the use of the auto device map. If set to True, the model will be split across\n GPUs and CPU to fit the model in memory. If set to False, a full copy of the model will be loaded\n into each GPU. Defaults to False.\n use_gradient_checkpointing (`bool`, optiona):\n Whether to use gradient checkpointing for training\n trust_remote_code (`bool`, optional):\n Trust the remote code from HuggingFace model hub. Defaults to False.\n use_flash_attention (`bool`, optional):\n Whether to use Flash Attention. Defaults to True. Flash attention must be installed, see:\n 'https://github.com/Dao-AILab/flash-attention' for more details.\n use_better_transformer (`bool`, optional):\n Whether to transform the model using Better Transformer library:\n https://huggingface.co/docs/optimum/bettertransformer/overview. Requires optimum\n 'https://huggingface.co/docs/optimum/installation'. Only supported for inference!\n Defaults to False.\n fsdp_training: (`bool`, optional):\n Whether Fully Sharded Data Parallelism is enabled for training. Defaults to False.\n Used to prevent casting layers to fp32 if the model is already in fp16, which causes\n an error: ValueError: Must flatten tensors with uniform dtype but got torch.float16 and torch.float32\n max_memory_MB (`int`):\n Free memory per gpu in MB. Used to compute the device map when force_auto_device_map is set to True.\n Raises:\n `ValueError`:\n is raised when `int8_quantization=True` but `use_lora=False`.\n\n Returns:\n `Tuple[PreTrainedModel, PreTrainedTokenizerBase]`:\n The loaded model and tokenizer.\n \"\"\"\n\n # Sanity checks\n\n if isinstance(quantization, str):\n quantization = int(quantization)\n assert (quantization is None) or (\n quantization in [4, 8]\n ), f\"Quantization must be 4 or 8, or None for FP32/FP16 training. You passed: {quantization}\"\n\n if not inference and quantization is not None and not use_lora:\n raise ValueError(\n \"'Quantization' == 4/8 is only supported with LoRA. If you want \"\n \"to train a 4/8bits quantified model, you must set `use_lora=True`. If you want to \"\n \"use a 4/8 bits optimizer, set `quantization=None` and choose a 4/8 bit optimizer using 'optim' \"\n \"argument (e.g 'adamw_bnb_8bit', 'lion_8bit', 'paged_adamw_8bit', ...).\"\n )\n\n if inference and use_lora and lora_weights_name_or_path is None:\n raise ValueError(\n \"You must provide the path to the LoRA weights when loading the model for inference.\"\n )\n\n if use_better_transformer and not inference:\n logging.warning(\n \"Better Transformer is only supported for inference. Better Transformers does not support \"\n \"attention mask for training, therefore it is not compatible with CoLLIE training. See \"\n \"https://huggingface.co/docs/optimum/bettertransformer/overview for more details. We will \"\n \"set use_better_transformer=False.\"\n )\n use_better_transformer = False\n\n if use_better_transformer and use_flash_attention:\n raise ValueError(\n \"You cannot use both Flash Attention and Better Transformer flags. Flash Attention is already part of\"\n \" Better Transformers, so you can just set use_better_transformer=True to use Flash Attention. The Flash\"\n \" Attention flag is intended for patching HuggingFace models.\"\n )\n\n if lora_weights_name_or_path is not None and not use_lora:\n logging.warning(\n \"You provided a path to LoRA weights but use_lora is set to False. We will set use_lora=True.\"\n )\n use_lora = True\n\n logging.info(f\"Loading model model from {model_weights_name_or_path}\")\n\n MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.update(\n {\n \"stablelm_epoch\": \"LlamaForCausalLM\",\n }\n )\n\n # Get the device map config\n\n device_map, max_memory = get_device_map(\n force_auto_device_map=force_auto_device_map,\n max_memory_MB=max_memory_MB,\n use_better_transformer=use_better_transformer,\n )\n\n # Load the model config\n\n if use_lora:\n config = AutoConfig.from_pretrained(\n model_weights_name_or_path,\n trust_remote_code=trust_remote_code,\n pretraining_tp=1, # Fix mat1 and mat2 shapes cannot be multiplied error with LLaMA-2\n # See https://github.com/huggingface/transformers/pull/24906\n )\n else:\n config = AutoConfig.from_pretrained(\n model_weights_name_or_path,\n trust_remote_code=trust_remote_code,\n )\n\n # Load the model tokenizer\n\n tokenizer: PreTrainedTokenizerBase = AutoTokenizer.from_pretrained(\n model_weights_name_or_path,\n add_eos_token=True,\n trust_remote_code=trust_remote_code,\n legacy=True, # This library was developed with the legacy tokenizer.\n # It might or might not work with the latest updates to the T5 tokenizers. So we set legacy=True to be safe.\n )\n\n if tokenizer.pad_token_id is None:\n if \"<|padding|>\" in tokenizer.get_vocab():\n # StabilityLM specific fix\n tokenizer.add_special_tokens({\"pad_token\": \"<|padding|>\"})\n elif tokenizer.unk_token is not None:\n logging.warning(\n \"Tokenizer does not have a pad token, we will use the unk token as pad token.\"\n )\n tokenizer.pad_token_id = tokenizer.unk_token_id\n else:\n logging.warning(\n \"Tokenizer does not have a pad token. We will use the eos token as pad token.\"\n )\n tokenizer.pad_token_id = tokenizer.eos_token_id\n\n # Load the model weights\n\n # Get the quantization config\n quant_args = {}\n torch_dtype = (\n torch_dtype if torch_dtype in [\"auto\", None] else getattr(torch, torch_dtype)\n )\n\n if quantization is not None:\n quant_args = (\n {\"load_in_4bit\": True} if quantization == 4 else {\"load_in_8bit\": True}\n )\n if quantization == 4:\n bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n if torch_dtype in [\"auto\", None]\n else torch_dtype,\n )\n\n else:\n bnb_config = BitsAndBytesConfig(\n load_in_8bit=True,\n )\n logging.info(\n f\"Bits and Bytes config: {json.dumps(bnb_config.to_dict(),indent=4,ensure_ascii=False)}\"\n )\n else:\n logging.info(f\"Loading model with dtype: {torch_dtype}\")\n bnb_config = None\n\n # Get the correct load function for each model_type\n if config.model_type in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES:\n logging.warning(\n f\"Model {model_weights_name_or_path} is a encoder-decoder model. We will load it as a Seq2SeqLM model.\"\n )\n\n load_fn = AutoModelForSeq2SeqLM\n model_type = \"seq2seq\"\n\n elif config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:\n logging.warning(\n f\"Model {model_weights_name_or_path} is an decoder-only model. We will load it as a CausalLM model.\"\n )\n\n load_fn = AutoModelForCausalLM\n tokenizer.padding_side = \"left\"\n model_type = \"causal\"\n\n else:\n raise ValueError(\n f\"Model {model_weights_name_or_path} of type {config.model_type} is not supported by CoLLIE.\"\n \"Supported models are:\\n\"\n f\"Seq2SeqLM: {MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES}\\n\"\n f\"CausalLM: {MODEL_FOR_CAUSAL_LM_MAPPING_NAMES}\\n\"\n )\n\n # Load the model weights\n # Flash attention 2 was added to HuggingFace transformers very recently. Let's add it as kwargs to the load function\n # so if it is set to False, we can load the model in older versions of transformers.\n if use_flash_attention:\n kwargs = {\"use_flash_attention_2\": True}\n else:\n kwargs = {}\n\n model: PreTrainedModel = load_fn.from_pretrained(\n pretrained_model_name_or_path=model_weights_name_or_path,\n device_map=device_map,\n max_memory=max_memory,\n quantization_config=bnb_config,\n torch_dtype=torch_dtype,\n config=config,\n trust_remote_code=trust_remote_code,\n **quant_args,\n **kwargs,\n )\n\n logging.info(f\"Model dtype: {model.dtype}\")\n logging.info(\n \"Total model memory footprint: \"\n + str(model.get_memory_footprint() / 1e6)\n + \" MB\"\n )\n\n # Prepare the model for k-bit training and enable gradient checkpointing\n if quantization is not None and not inference:\n from peft import prepare_model_for_kbit_training\n\n model = prepare_model_for_kbit_training(\n model, use_gradient_checkpointing=use_gradient_checkpointing\n )\n else:\n if use_gradient_checkpointing and not inference:\n model.gradient_checkpointing_enable()\n\n # Load LoRA weights\n if use_lora:\n from peft import LoraConfig, PeftModel, TaskType, get_peft_model\n\n if not inference:\n model.enable_input_require_grads() # Enables the gradients for the input embeddings\n\n if lora_weights_name_or_path is None:\n logging.info(\n \"No pretrained LORA weights provided, we will initialize the weights randomly.\"\n )\n\n if lora_target_modules is None or (\n lora_target_modules is not None and len(lora_target_modules) == 0\n ):\n logging.warning(\n \"No target modules provided, will use the default modules for the\"\n \" model in huggingface PEFT library. \"\n )\n lora_target_modules = None\n\n if lora_target_modules == [\"all\"]:\n logging.warning(\n \"You provided 'all' as target modules, we will use all the model to which LoRA can be applied.\"\n )\n lora_target_modules = find_all_linear_names(\n model, quantization=quantization\n )\n\n lora_config = LoraConfig(\n r=lora_r,\n lora_alpha=lora_alpha,\n lora_dropout=lora_dropout,\n bias=\"none\",\n task_type=TaskType.CAUSAL_LM\n if model_type == \"causal\"\n else TaskType.SEQ_2_SEQ_LM,\n target_modules=lora_target_modules,\n )\n\n model = get_peft_model(model, lora_config)\n\n else:\n logging.info(\n f\"Loading pretrained LORA weights from {lora_weights_name_or_path}\"\n )\n\n model = PeftModel.from_pretrained(model, lora_weights_name_or_path)\n\n logging.info(f\"\\nLoRA config:\\n{model.peft_config}\\n\")\n\n if inference:\n if use_lora:\n if quantization is None:\n # If we are not using quantization, we merge the LoRA layers into the model for faster inference.\n # This is not possible if we are using 4/8 bit quantization.\n logging.info(\"Merging LoRA layers into the model for faster inference.\")\n model = model.merge_and_unload()\n else:\n logging.info(\n \"Quantization is enabled, we will not merge LoRA layers into the model. Inference will be slower.\"\n )\n else:\n trainable_params, total_params, trainable_percentage = get_trainable_parameters(\n model\n )\n logging.info(\n f\"---> Trainable params: {trainable_params} || all params: {total_params} ||\"\n f\" trainable%: {round(trainable_percentage,6)}\\n\"\n )\n\n return model, tokenizer" }, { "identifier": "get_dataloader", "path": "dataset.py", "snippet": "def get_dataloader(\n tokenizer: PreTrainedTokenizerBase,\n split: str,\n is_encoder_decoder: bool = False,\n max_length: int = 512,\n conv_template: str = None,\n batch_size: int = 1,\n prompt_loss_weight: float = 0.05,\n add_bos_token: bool = False,\n num_workers: int = min(8, os.cpu_count()),\n pattern: str = None,\n only_affirmative: bool = False,\n only_negative: bool = False,\n only_non_distractor: bool = False,\n only_distractor: bool = False,\n) -> DataLoader:\n \"\"\"\n Get a dataloader for a dataset.\n\n Args:\n tokenizer (`PreTrainedTokenizerBase`):\n The tokenizer to use.\n split ('list'):\n The split to load (train, dev, test, all).\n is_encoder_decoder (`bool`, optional):\n Whether the model is an encoder-decoder model. Defaults to `False`.\n max_length (`int`, optional):\n The maximum length of the input. Defaults to `2048`.\n conv_template (`str`, optional):\n The conversation template to use. Defaults to `None`. If `None` we will return the prompt.\n batch_size (`int`, optional):\n The batch size. Defaults to `1`.\n prompt_loss_weight (`float`, optional):\n The weight of the prompt tokens in the loss. If set to '0.05' the prompt tokens will have a total weight\n of 5% in the loss while the result tokens will have a total weight of 95%. Defaults to `0.05`.\n add_bos_token (`bool`, optional):\n Whether to add the beginning of sentence token to the input. Defaults to `False`.\n num_workers (`int`, optional):\n The number of workers to use for the dataloader. Defaults to `0`.\n pattern (`str`, optional):\n The pattern to use for training. Defaults to `None`.\n only_affirmative (`bool`, optional):\n Whether to only load affirmative examples for training. Defaults to `False`.\n only_negative (`bool`, optional):\n Whether to only load negative examples for training. Defaults to `False`.\n only_non_distractor (`bool`, optional):\n Whether to only load non-distractor examples for training. Defaults to `False`.\n only_distractor (`bool`, optional):\n Whether to only load distractor examples for training. Defaults to `False`.\n\n\n Returns:\n `DataLoader`: The dataloader.\n \"\"\"\n\n data_collator = DataCollatorForSeq2Seq(\n tokenizer,\n padding=True,\n label_pad_token_id=-100, # tokenizer.pad_token_id,\n # pad_to_multiple_of=8, # May be faster on some hardware\n )\n\n dataset = ThisIsNotADataset(\n tokenizer=tokenizer,\n split=split,\n is_encoder_decoder=is_encoder_decoder,\n max_length=max_length,\n conv_template=conv_template,\n prompt_loss_weight=prompt_loss_weight,\n add_bos_token=add_bos_token,\n pattern=pattern,\n only_affirmative=only_affirmative,\n only_negative=only_negative,\n only_non_distractor=only_non_distractor,\n only_distractor=only_distractor,\n )\n\n return DataLoader(\n dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=split == \"train\",\n collate_fn=data_collator,\n pin_memory=True,\n )" }, { "identifier": "evaluate", "path": "evaluate.py", "snippet": "def evaluate(predictions_path: str, output_path: Optional[str] = None) -> dict:\n \"\"\"\n Evaluate the predictions of a model\n Args:\n predictions_path: Path to the predictions file. It should be a jsonl with the fields: 'pattern_id',\n 'pattern', 'test_id', 'negation_type', 'semantic_type', 'syntactic_scope', 'isDistractor',\n 'label', 'sentence', 'prediction'\n output_path: Path to the output file. If None, the output will be printed to stdout\n Returns:\n A dictionary with the scores\n The scorer will output the following metrics:\n - **all_affirmations**: Accuracy of the model in affirmative sentences\n - **all_negations**: Accuracy of the model in negative sentences\n - **all**: (Overall) Accuracy of the model in all sentences\n - **input_affirmation**: Accuracy of the model in affirmative sentences without distractors\n - **input_negation**: Accuracy of the model in negative sentences without distractors\n - **distractor_affirmation**: Accuracy of the model in affirmative sentences with distractors\n - **distractor_negation**: Accuracy of the model in negative sentences with distractors\n - **Negation_analysis**: Fine-grained analysis of the model in negative sentences (verbal, analytic,\n clausal, non_verbal, synthetic, subclausal negation types)\n - **Synonymy1, Hypernymy, Part...**: Fine-grained analysis of the model in each pattern\n \"\"\"\n\n print(\n \"\"\"\n*************************************** Running evaluation ***************************************\nThe scorer will output the following metrics:\n - **all_affirmations**: Accuracy of the model in affirmative sentences\n - **all_negations**: Accuracy of the model in negative sentences\n - **all**: (Overall) Accuracy of the model in all sentences\n - **input_affirmation**: Accuracy of the model in affirmative sentences without distractors\n - **input_negation**: Accuracy of the model in negative sentences without distractors\n - **distractor_affirmation**: Accuracy of the model in affirmative sentences with distractors\n - **distractor_negation**: Accuracy of the model in negative sentences with distractors\n - **Negation_analysis**: Fine-grained analysis of the model in negative sentences (verbal, analytic,\n clausal, non_verbal, synthetic, subclausal negation types)\n - **Synonymy1, Hypernymy, Part...**: Fine-grained analysis of the model in each pattern\n**************************************************************************************************\n \"\"\"\n )\n dataset_pattern = {\n \"Synonymy1\": [],\n \"Antonymy1\": [],\n \"Synonymy2\": [],\n \"Antonymy2\": [],\n \"Hypernymy\": [],\n \"Part\": [],\n \"Substance\": [],\n \"Member\": [],\n \"Agent\": [],\n \"Instrument\": [],\n \"Result\": [],\n }\n\n scorer = Scorer()\n coherence_scorer = Coherence_Scorer()\n\n coherence_scorer.from_file(predictions_path)\n with open(predictions_path, \"r\", encoding=\"utf8\") as file:\n for line in file:\n example = json.loads(line.strip())\n pattern = example[\"pattern\"]\n dataset_pattern[pattern].append(example)\n scorer.add_example(\n negation_type=example[\"negation_type\"],\n semantic_type=example[\"semantic_type\"],\n syntactic_scope=example[\"syntactic_scope\"],\n isDistractor=example[\"isDistractor\"],\n gold_label=example[\"label\"],\n predicted_label=example[\"prediction\"],\n )\n\n scores = scorer.compute_scores()\n coherence_scorer = Coherence_Scorer.from_file(predictions_path)\n scores[\"coherence_scores\"] = coherence_scorer.compute_scores()\n\n for pattern in dataset_pattern:\n scorer = Scorer()\n coherence_scorer = Coherence_Scorer()\n coherence_scorer.add_pattern(dataset_pattern[pattern])\n for example in dataset_pattern[pattern]:\n scorer.add_example(\n negation_type=example[\"negation_type\"],\n semantic_type=example[\"semantic_type\"],\n syntactic_scope=example[\"syntactic_scope\"],\n isDistractor=example[\"isDistractor\"],\n gold_label=example[\"label\"],\n predicted_label=example[\"prediction\"],\n )\n scores[pattern] = scorer.compute_scores()\n scores[pattern][\"coherence_scores\"] = coherence_scorer.compute_scores()\n\n if output_path is not None:\n print(f\"Saving scores to {output_path}\")\n with open(output_path, \"w\", encoding=\"utf8\") as file:\n print(json.dumps(scores, ensure_ascii=False, indent=4), file=file)\n else:\n print(json.dumps(scores, ensure_ascii=False, indent=4))\n\n print(\"*** Evaluation finished ***\")\n return scores" }, { "identifier": "DataTrainingArguments", "path": "config.py", "snippet": "class DataTrainingArguments:\n \"\"\"\n Arguments pertaining to what data we are going to input our model for training and eval.\n \"\"\"\n\n do_predict_full_dataset: bool = field(\n default=False,\n metadata={\n \"help\": \"Whether to run predictions on the full dataset. If True, the model will be evaluated on the \"\n \"full dataset. If False, the model will be evaluated on the test set. Defaults to False.\"\n },\n )\n max_seq_length: int = field(\n default=512,\n metadata={\n \"help\": (\n \"The maximum total input sequence length after tokenization. Sequences\"\n \" longer than this will be truncated, sequences shorter will be padded.\"\n )\n },\n )\n\n prompt_loss_weight: float = field(\n default=0.05,\n metadata={\n \"help\": (\n \"The weight of the prompt tokens in the loss. If set to '0.05' the prompt tokens will have a total\"\n \" weight of 5% in the loss while the result tokens will have a total weight of 95%. Only used for\"\n \" computing the loss in the training data. Defaults to `0.05`.\"\n )\n },\n )\n\n force_auto_device_map: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to force the use of the auto device map. If set to True, the model will be split across \"\n \"GPUs and CPU to fit the model in memory. If set to False, a full copy of the model will be loaded \"\n \"into each GPU. Defaults to False.\"\n )\n },\n )\n\n pattern: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"The pattern to use for training. If not specified, all patterns will be used.\"\n ),\n \"choices\": [\n \"Synonymy1\",\n \"Antonymy1\",\n \"Synonymy2\",\n \"Antonymy2\",\n \"Hypernymy\",\n \"Part\",\n \"Substance\",\n \"Member\",\n \"Agent\",\n \"Instrument\",\n \"Result\",\n ],\n },\n )\n\n only_affirmative: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load affirmative examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_negative: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load negative examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_non_distractor: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load non-distractor examples for training. Defaults to `False`.\"\n )\n },\n )\n\n only_distractor: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to only load distractor examples for training. Defaults to `False`.\"\n )\n },\n )" }, { "identifier": "ModelArguments", "path": "config.py", "snippet": "class ModelArguments:\n \"\"\"\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.\n \"\"\"\n\n model_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The local path or huggingface hub name of the model and tokenizer to use.\"\n },\n )\n\n torch_dtype: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"Override the default `torch.dtype` and load the model under this\"\n \" dtype. If `auto` is passed, the dtype will be automatically derived\"\n \" from the model's weights.\"\n ),\n \"choices\": [\"auto\", \"bfloat16\", \"float16\", \"float32\"],\n },\n )\n\n use_lora: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to use LoRA. If True, the model will be trained with LoRA: https://arxiv.org/abs/2106.09685\"\n )\n },\n )\n\n quantization: Optional[int] = field(\n default=None,\n metadata={\n \"help\": (\n \"Whether to use '4' or '8' bit quantization. Requires bitsandbytes library:\"\n \" https://github.com/TimDettmers/bitsandbytes\"\n )\n },\n )\n lora_weights_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": (\n \"If the model has been trained with LoRA, \"\n \"path or huggingface hub name or local path to the pretrained weights.\"\n )\n },\n )\n\n lora_r: Optional[int] = field(\n default=8,\n metadata={\"help\": \"Lora attention dimension.\"},\n )\n\n lora_alpha: Optional[float] = field(\n default=16,\n metadata={\"help\": \"The alpha parameter for Lora scaling.\"},\n )\n lora_dropout: Optional[float] = field(\n default=0.05,\n metadata={\"help\": \"The dropout probability for Lora layers.\"},\n )\n\n lora_target_modules: Optional[List[str]] = field(\n default_factory=list,\n metadata={\n \"help\": (\n \"The target modules to which LoRA will be applied. If not specified, We\"\n \" will use the default modules for the model in huggingface PEFT library.\"\n )\n },\n )\n\n conversation_template: str = field(\n default=None,\n metadata={\n \"help\": (\n \"The config template to use to generate conversations. See \"\n \"https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py for more details\"\n )\n },\n )\n\n add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to add the BOS token to the beginning of the prompt (Encoder-only models). Defaults to False.\"\n )\n },\n )\n\n use_flash_attention: bool = field(\n default=False,\n metadata={\n \"help\": (\n \"Whether to use the FlashAttention. If True, we will use FlashAttention. Be careful, not all models \"\n \"support FlashAttention. See https://github.com/huggingface/transformers/issues/26350. \"\n \"Defaults to False.\"\n )\n },\n )" }, { "identifier": "get_optimizer", "path": "optimizer.py", "snippet": "def get_optimizer(training_args: Seq2SeqTrainingArguments, model: PreTrainedModel):\n decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)\n decay_parameters = [name for name in decay_parameters if \"bias\" not in name]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if (n in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": training_args.weight_decay,\n },\n {\n \"params\": [\n p\n for n, p in model.named_parameters()\n if (n not in decay_parameters and p.requires_grad)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n\n optimizer_kwargs = {\"lr\": training_args.learning_rate}\n\n adam_kwargs = {\n \"betas\": (training_args.adam_beta1, training_args.adam_beta2),\n \"eps\": training_args.adam_epsilon,\n }\n if training_args.optim == OptimizerNames.ADAFACTOR:\n from transformers.optimization import Adafactor\n\n optimizer_cls = Adafactor\n optimizer_kwargs.update({\"scale_parameter\": False, \"relative_step\": False})\n elif training_args.optim == OptimizerNames.ADAMW_HF:\n from transformers.optimization import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n elif training_args.optim in [\n OptimizerNames.ADAMW_TORCH,\n OptimizerNames.ADAMW_TORCH_FUSED,\n ]:\n from torch.optim import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n if training_args.optim == OptimizerNames.ADAMW_TORCH_FUSED:\n optimizer_kwargs.update({\"fused\": True})\n elif training_args.optim == OptimizerNames.ADAMW_TORCH_XLA:\n try:\n from torch_xla.amp.syncfree import AdamW\n\n optimizer_cls = AdamW\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\"Trainer failed to import syncfree AdamW from torch_xla.\")\n elif training_args.optim == OptimizerNames.ADAMW_APEX_FUSED:\n try:\n from apex.optimizers import FusedAdam\n\n optimizer_cls = FusedAdam\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate apex FusedAdam but apex is not installed!\"\n )\n elif training_args.optim in [\n OptimizerNames.ADAMW_BNB,\n OptimizerNames.ADAMW_8BIT,\n OptimizerNames.PAGED_ADAMW,\n OptimizerNames.PAGED_ADAMW_8BIT,\n OptimizerNames.LION,\n OptimizerNames.LION_8BIT,\n OptimizerNames.PAGED_LION,\n OptimizerNames.PAGED_LION_8BIT,\n ]:\n try:\n from bitsandbytes.optim import AdamW, Lion\n\n is_paged = False\n optim_bits = 32\n optimizer_cls = None\n additional_optim_kwargs = adam_kwargs\n if \"paged\" in training_args.optim:\n is_paged = True\n if \"8bit\" in training_args.optim:\n optim_bits = 8\n if \"adam\" in training_args.optim:\n optimizer_cls = AdamW\n elif \"lion\" in training_args.optim:\n optimizer_cls = Lion\n additional_optim_kwargs = {\n \"betas\": (training_args.adam_beta1, training_args.adam_beta2)\n }\n\n bnb_kwargs = {\"is_paged\": is_paged, \"optim_bits\": optim_bits}\n optimizer_kwargs.update(additional_optim_kwargs)\n optimizer_kwargs.update(bnb_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate bnb optimizer but bnb is not installed!\"\n )\n elif training_args.optim == OptimizerNames.ADAMW_BNB:\n try:\n from bitsandbytes.optim import Adam8bit\n\n optimizer_cls = Adam8bit\n optimizer_kwargs.update(adam_kwargs)\n except ImportError:\n raise ValueError(\n \"Trainer tried to instantiate bnb Adam8bit but bnb is not installed!\"\n )\n elif training_args.optim == OptimizerNames.ADAMW_ANYPRECISION:\n raise NotImplementedError(\"AdamWAnyprecision is not supported\")\n elif training_args.optim == OptimizerNames.SGD:\n optimizer_cls = torch.optim.SGD\n elif training_args.optim == OptimizerNames.ADAGRAD:\n optimizer_cls = torch.optim.Adagrad\n else:\n raise ValueError(\n f\"Trainer cannot instantiate unsupported optimizer: {training_args.optim}\"\n )\n\n optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n if optimizer_cls.__name__ == \"Adam8bit\":\n import bitsandbytes\n\n manager = bitsandbytes.optim.GlobalOptimManager.get_instance()\n\n skipped = 0\n for module in model.modules():\n if isinstance(module, nn.Embedding):\n skipped += sum(\n {p.data_ptr(): p.numel() for p in module.parameters()}.values()\n )\n print(f\"skipped {module}: {skipped / 2 ** 20}M params\")\n manager.register_module_override(module, \"weight\", {\"optim_bits\": 32})\n print(f\"bitsandbytes: will optimize {module} in fp32\")\n print(f\"skipped: {skipped / 2 ** 20}M params\")\n\n return optimizer" } ]
from load_model import load_model from dataset import get_dataloader from evaluate import evaluate from config import DataTrainingArguments, ModelArguments from transformers import ( HfArgumentParser, Seq2SeqTrainingArguments, set_seed, get_scheduler, ) from tqdm import tqdm from accelerate import Accelerator, find_executable_batch_size from typing import List from optimizer import get_optimizer from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from transformers.modeling_utils import unwrap_model import torch import os import wandb import gc import json import math import sys import logging
10,966
# Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # print(preds) for pred in preds: pred = pred.lower() if "true" in pred: all_preds.append(True) else: all_preds.append(False) if accelerator.is_local_main_process: with open(output_path, "w", encoding="utf8") as f: for pred in all_preds if not return_scores else all_scores: print(pred, file=f) if not return_scores: json_dataset = dataloader.dataset.get_jsonl() assert len(json_dataset) == len(all_preds) with open( os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8" ) as f: for json_line, pred in zip(json_dataset, all_preds): json_line["prediction"] = bool(pred) print(json.dumps(json_line, ensure_ascii=False), file=f) model.train() def main( model_args: ModelArguments, data_args: DataTrainingArguments, training_args: Seq2SeqTrainingArguments, ): assert ( training_args.do_train or training_args.do_predict ), "You must specify do_train or do_predict" assert not (training_args.do_train and data_args.do_predict_full_dataset), ( "You cannot do both training and predict_full_dataset, " "as the model will be evaluated on the full dataset, which" " includes the training set." ) logging.basicConfig(level=logging.INFO) accelerator = Accelerator() print(f"Accelerator State: {accelerator.state}") set_seed(training_args.seed) if training_args.do_train: model, tokenizer = load_model( inference=False, model_weights_name_or_path=model_args.model_name_or_path, lora_weights_name_or_path=model_args.lora_weights_name_or_path, quantization=model_args.quantization, use_lora=model_args.use_lora, lora_target_modules=model_args.lora_target_modules, torch_dtype=model_args.torch_dtype, force_auto_device_map=data_args.force_auto_device_map, use_flash_attention=model_args.use_flash_attention, use_gradient_checkpointing=model_args.use_lora, ) true_tokens_ids = tokenizer.encode("True", add_special_tokens=False) false_tokens_ids = tokenizer.encode("False", add_special_tokens=False) train_dataloader = get_dataloader( tokenizer=tokenizer, split="train", is_encoder_decoder=model.config.is_encoder_decoder, max_length=data_args.max_seq_length, conv_template=model_args.conversation_template, batch_size=training_args.per_device_train_batch_size, prompt_loss_weight=data_args.prompt_loss_weight, add_bos_token=model_args.add_bos_token, pattern=data_args.pattern, only_negative=data_args.only_negative, only_affirmative=data_args.only_affirmative, only_distractor=data_args.only_non_distractor, only_non_distractor=data_args.only_non_distractor, ) dev_dataloader = None if training_args.do_eval: dev_dataloader = get_dataloader( tokenizer=tokenizer, split="validation", is_encoder_decoder=model.config.is_encoder_decoder, max_length=data_args.max_seq_length, conv_template=model_args.conversation_template, batch_size=training_args.per_device_train_batch_size, prompt_loss_weight=data_args.prompt_loss_weight, add_bos_token=model_args.add_bos_token, pattern=data_args.pattern, only_negative=data_args.only_negative, only_affirmative=data_args.only_affirmative, only_distractor=data_args.only_non_distractor, only_non_distractor=data_args.only_non_distractor, ) if accelerator.is_main_process: wandb.init( project="ThisIsNotADataset", name=f"{os.path.basename(training_args.output_dir)}", config=vars(training_args), ) num_update_steps_per_epoch = math.ceil( len(train_dataloader) / training_args.gradient_accumulation_steps ) max_train_steps = int( training_args.num_train_epochs * num_update_steps_per_epoch )
def clean_cache(): """Clean cache to avoid memory leak. This fixes this issue: https://github.com/huggingface/transformers/issues/22801""" print(f"Cleaning GPU memory. Current memory usage: {torch.cuda.memory_allocated()}") torch.cuda.empty_cache() gc.collect() torch.cuda.empty_cache() print(f"GPU memory usage after cleaning: {torch.cuda.memory_allocated()}") def compute_loss(model, inputs, return_outputs=False): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for custom behavior. """ if "labels" in inputs: labels = inputs.pop("labels") else: raise ValueError("You should supply a labels key to compute the loss") if "loss_weight_mask" in inputs: loss_weight_mask = inputs.pop("loss_weight_mask") else: raise ValueError("You should supply a loss_weight_mask key to compute the loss") if unwrap_model(model).config.is_encoder_decoder: outputs = model(labels=labels, **inputs) else: outputs = model(**inputs) logits = outputs["logits"] if isinstance(outputs, dict) else outputs[0] model_name = unwrap_model(model)._get_name() if ( model_name in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values() or model_name == "PeftModelForCausalLM" ): logits = logits[..., :-1, :].contiguous() labels = labels[..., 1:].contiguous() loss_weight_mask = loss_weight_mask[..., 1:].contiguous() logits = logits.view(-1, logits.size(-1)) labels = labels.view(-1) loss_weight_mask = loss_weight_mask.view(-1) loss_fct = torch.nn.CrossEntropyLoss(reduction="none", ignore_index=-100) loss = loss_fct(logits, labels) loss = torch.sum(loss * loss_weight_mask) / torch.sum(loss_weight_mask) return (loss, outputs) if return_outputs else loss def gen_predictions( model, tokenizer, true_tokens_ids: List[int], false_tokens_ids: List[int], dataloader, output_path, accelerator, print_first=False, predict_with_generate=False, return_scores=False, ): if predict_with_generate and return_scores: raise ValueError( "return_scores is not supported when predict_with_generate is True" ) model.eval() with torch.no_grad(): samples_seen: int = 0 yes_id = true_tokens_ids[0] no_id = false_tokens_ids[0] all_preds = [] all_scores = [] first = True for step, batch in enumerate( tqdm(dataloader, f"Inference on {os.path.basename(output_path)}") ): if print_first and accelerator.is_local_main_process: ### DEBUG ### if print_first and first and accelerator.is_main_process: decodeable_inputs = batch.input_ids.clone() decodeable_inputs[ decodeable_inputs == -100 ] = tokenizer.pad_token_id model_inputs = "\n".join( tokenizer.batch_decode( decodeable_inputs, skip_special_tokens=False, clean_up_tokenization_spaces=False, ) ) print(f"*** Sample of batch 0 ***") print(f"-- Model inputs --\n{model_inputs}") print(f"*** End of sample ***\n") first = False if not predict_with_generate: if not model.config.is_encoder_decoder: logits = model( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], ).logits else: encoder_output = model.get_encoder()( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], ) decoder_args = { "attention_mask": batch["attention_mask"], "use_cache": False, "encoder_outputs": encoder_output, } gen_inputs = model.prepare_inputs_for_generation( input_ids=torch.tensor( [[tokenizer.pad_token_id]] * len(batch["input_ids"]) ).to(batch["input_ids"].device), **decoder_args, ) logits = model( **gen_inputs, ).logits logits = logits[:, -1, :] logits = torch.nn.functional.softmax(logits, dim=-1) logits = logits[:, [yes_id, no_id]] logits = logits[:, 0] / (logits[:, 0] + logits[:, 1]) preds = logits > 0.5 preds = accelerator.gather(preds).cpu().tolist() logits = accelerator.gather(logits).cpu().tolist() if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] logits = logits[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) all_preds.extend(preds) all_scores.extend(logits) else: preds = model.generate( input_ids=batch["input_ids"], attention_mask=batch["attention_mask"], max_new_tokens=6, ) preds = accelerator.gather( accelerator.pad_across_processes( preds, dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() inputs_ids = accelerator.gather( accelerator.pad_across_processes( batch["input_ids"], dim=1, pad_index=tokenizer.pad_token_id, ) ).cpu() preds = preds[:, len(inputs_ids[0]) :] if accelerator.is_local_main_process: if accelerator.num_processes > 1: # Remove duplicated in last batch if we are in a distributed setting if step == len(dataloader) - 1: preds = preds[: (len(dataloader.dataset) - samples_seen)] else: samples_seen += len(batch) preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # print(preds) for pred in preds: pred = pred.lower() if "true" in pred: all_preds.append(True) else: all_preds.append(False) if accelerator.is_local_main_process: with open(output_path, "w", encoding="utf8") as f: for pred in all_preds if not return_scores else all_scores: print(pred, file=f) if not return_scores: json_dataset = dataloader.dataset.get_jsonl() assert len(json_dataset) == len(all_preds) with open( os.path.splitext(output_path)[0] + ".jsonl", "w", encoding="utf8" ) as f: for json_line, pred in zip(json_dataset, all_preds): json_line["prediction"] = bool(pred) print(json.dumps(json_line, ensure_ascii=False), file=f) model.train() def main( model_args: ModelArguments, data_args: DataTrainingArguments, training_args: Seq2SeqTrainingArguments, ): assert ( training_args.do_train or training_args.do_predict ), "You must specify do_train or do_predict" assert not (training_args.do_train and data_args.do_predict_full_dataset), ( "You cannot do both training and predict_full_dataset, " "as the model will be evaluated on the full dataset, which" " includes the training set." ) logging.basicConfig(level=logging.INFO) accelerator = Accelerator() print(f"Accelerator State: {accelerator.state}") set_seed(training_args.seed) if training_args.do_train: model, tokenizer = load_model( inference=False, model_weights_name_or_path=model_args.model_name_or_path, lora_weights_name_or_path=model_args.lora_weights_name_or_path, quantization=model_args.quantization, use_lora=model_args.use_lora, lora_target_modules=model_args.lora_target_modules, torch_dtype=model_args.torch_dtype, force_auto_device_map=data_args.force_auto_device_map, use_flash_attention=model_args.use_flash_attention, use_gradient_checkpointing=model_args.use_lora, ) true_tokens_ids = tokenizer.encode("True", add_special_tokens=False) false_tokens_ids = tokenizer.encode("False", add_special_tokens=False) train_dataloader = get_dataloader( tokenizer=tokenizer, split="train", is_encoder_decoder=model.config.is_encoder_decoder, max_length=data_args.max_seq_length, conv_template=model_args.conversation_template, batch_size=training_args.per_device_train_batch_size, prompt_loss_weight=data_args.prompt_loss_weight, add_bos_token=model_args.add_bos_token, pattern=data_args.pattern, only_negative=data_args.only_negative, only_affirmative=data_args.only_affirmative, only_distractor=data_args.only_non_distractor, only_non_distractor=data_args.only_non_distractor, ) dev_dataloader = None if training_args.do_eval: dev_dataloader = get_dataloader( tokenizer=tokenizer, split="validation", is_encoder_decoder=model.config.is_encoder_decoder, max_length=data_args.max_seq_length, conv_template=model_args.conversation_template, batch_size=training_args.per_device_train_batch_size, prompt_loss_weight=data_args.prompt_loss_weight, add_bos_token=model_args.add_bos_token, pattern=data_args.pattern, only_negative=data_args.only_negative, only_affirmative=data_args.only_affirmative, only_distractor=data_args.only_non_distractor, only_non_distractor=data_args.only_non_distractor, ) if accelerator.is_main_process: wandb.init( project="ThisIsNotADataset", name=f"{os.path.basename(training_args.output_dir)}", config=vars(training_args), ) num_update_steps_per_epoch = math.ceil( len(train_dataloader) / training_args.gradient_accumulation_steps ) max_train_steps = int( training_args.num_train_epochs * num_update_steps_per_epoch )
optimizer = get_optimizer(training_args=training_args, model=model)
5
2023-10-18 10:24:48+00:00
16k
Glasgow-AI4BioMed/GenKIE
tasks/pretrain_tasks/unify_task.py
[ { "identifier": "OFATask", "path": "tasks/ofa_task.py", "snippet": "class OFATask(FairseqTask):\n def __init__(self, cfg: OFAConfig, src_dict, tgt_dict):\n super().__init__(cfg)\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, cfg: DictConfig, **kwargs):\n \"\"\"Setup the task.\"\"\"\n\n # load dictionaries\n src_dict = cls.load_dictionary(\n os.path.join(cfg.bpe_dir, \"dict.txt\")\n )\n tgt_dict = cls.load_dictionary(\n os.path.join(cfg.bpe_dir, \"dict.txt\")\n )\n src_dict.add_symbol(\"<mask>\")\n tgt_dict.add_symbol(\"<mask>\")\n for i in range(cfg.code_dict_size):\n src_dict.add_symbol(\"<code_{}>\".format(i))\n tgt_dict.add_symbol(\"<code_{}>\".format(i))\n # quantization\n for i in range(cfg.num_bins):\n src_dict.add_symbol(\"<bin_{}>\".format(i))\n tgt_dict.add_symbol(\"<bin_{}>\".format(i))\n\n src_dict.add_symbol(\"<dsep>\")\n tgt_dict.add_symbol(\"<dsep>\")\n\n # self.sep_index = self.add_symbol('50257')\n src_dict.add_symbol(\"50257\")\n tgt_dict.add_symbol(\"50257\")\n\n logger.info(\"source dictionary: {} types\".format(len(src_dict)))\n logger.info(\"target dictionary: {} types\".format(len(tgt_dict)))\n return cls(cfg, src_dict, tgt_dict)\n\n def get_batch_iterator(\n self,\n dataset,\n max_tokens=None,\n max_sentences=None,\n max_positions=None,\n ignore_invalid_inputs=False,\n required_batch_size_multiple=1,\n seed=1,\n num_shards=1,\n shard_id=0,\n num_workers=0,\n epoch=1,\n data_buffer_size=0,\n disable_iterator_cache=False,\n ):\n assert isinstance(dataset, FairseqDataset)\n\n # initialize the dataset with the correct starting epoch\n dataset.set_epoch(epoch)\n\n # create mini-batches with given size constraints\n batch_sampler = [\n [j for j in range(i, min(i + max_sentences, len(dataset)))]\n for i in range(0, len(dataset), max_sentences)\n ]\n total_row_count = dataset.dataset.get_total_row_count()\n num_batches = math.ceil(math.ceil(total_row_count / num_shards) / max_sentences)\n if len(batch_sampler) < num_batches:\n batch_sampler.append([])\n\n # return a reusable, sharded iterator\n epoch_iter = iterators.EpochBatchIterator(\n dataset=dataset,\n collate_fn=dataset.collater,\n batch_sampler=batch_sampler,\n seed=seed,\n num_shards=1,\n shard_id=0,\n num_workers=num_workers,\n epoch=epoch,\n buffer_size=data_buffer_size\n )\n\n return epoch_iter\n\n def build_model(self, cfg: FairseqDataclass):\n model = super().build_model(cfg)\n if self.cfg.bpe == 'bert': # self.cfg.bpe=None\n bpe_dict = {\n \"_name\": \"bert\",\n \"bpe_vocab_file\": os.path.join(self.cfg.bpe_dir, \"vocab.txt\"),\n \"bpe_cased\": False\n }\n bpe_dict = DictConfig(bpe_dict)\n self.bpe = self.build_bpe(bpe_dict)\n else:\n bpe_dict = {\n \"_name\": \"gpt2\",\n \"gpt2_encoder_json\": os.path.join(self.cfg.bpe_dir, \"encoder.json\"),\n \"gpt2_vocab_bpe\": os.path.join(self.cfg.bpe_dir, \"vocab.bpe\")\n }\n bpe_dict = DictConfig(bpe_dict)\n self.bpe = self.build_bpe(bpe_dict)\n return model\n\n def build_generator(\n self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, prefix_allowed_tokens_fn=None,\n ):\n \"\"\"\n Build a :class:`~fairseq.SequenceGenerator` instance for this\n task.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models\n args (fairseq.dataclass.configs.GenerationConfig):\n configuration object (dataclass) for generation\n extra_gen_cls_kwargs (Dict[str, Any]): extra options to pass\n through to SequenceGenerator\n prefix_allowed_tokens_fn (Callable[[int, torch.Tensor], List[int]]):\n If provided, this function constrains the beam search to\n allowed tokens only at each step. The provided function\n should take 2 arguments: the batch ID (`batch_id: int`)\n and a unidimensional tensor of token ids (`inputs_ids:\n torch.Tensor`). It has to return a `List[int]` with the\n allowed tokens for the next generation step conditioned\n on the previously generated tokens (`inputs_ids`) and\n the batch ID (`batch_id`). This argument is useful for\n constrained generation conditioned on the prefix, as\n described in \"Autoregressive Entity Retrieval\"\n (https://arxiv.org/abs/2010.00904) and\n https://github.com/facebookresearch/GENRE.\n \"\"\"\n if getattr(args, \"score_reference\", False):\n from fairseq.sequence_scorer import SequenceScorer\n\n return SequenceScorer(\n self.target_dictionary,\n compute_alignment=getattr(args, \"print_alignment\", False),\n )\n\n from fairseq.sequence_generator import (\n # SequenceGenerator,\n SequenceGeneratorWithAlignment,\n )\n from models.sequence_generator import SequenceGenerator\n\n # Choose search strategy. Defaults to Beam Search.\n sampling = getattr(args, \"sampling\", False)\n sampling_topk = getattr(args, \"sampling_topk\", -1)\n sampling_topp = getattr(args, \"sampling_topp\", -1.0)\n diverse_beam_groups = getattr(args, \"diverse_beam_groups\", -1)\n diverse_beam_strength = getattr(args, \"diverse_beam_strength\", 0.5)\n match_source_len = getattr(args, \"match_source_len\", False)\n diversity_rate = getattr(args, \"diversity_rate\", -1)\n constrained = getattr(args, \"constraints\", False)\n if prefix_allowed_tokens_fn is None:\n prefix_allowed_tokens_fn = getattr(args, \"prefix_allowed_tokens_fn\", None)\n if (\n sum(\n int(cond)\n for cond in [\n sampling,\n diverse_beam_groups > 0,\n match_source_len,\n diversity_rate > 0,\n ]\n )\n > 1\n ):\n raise ValueError(\"Provided Search parameters are mutually exclusive.\")\n assert sampling_topk < 0 or sampling, \"--sampling-topk requires --sampling\"\n assert sampling_topp < 0 or sampling, \"--sampling-topp requires --sampling\"\n\n if sampling:\n search_strategy = search.Sampling(\n self.target_dictionary, sampling_topk, sampling_topp\n )\n elif diverse_beam_groups > 0:\n search_strategy = search.DiverseBeamSearch(\n self.target_dictionary, diverse_beam_groups, diverse_beam_strength\n )\n elif match_source_len:\n # this is useful for tagging applications where the output\n # length should match the input length, so we hardcode the\n # length constraints for simplicity\n search_strategy = search.LengthConstrainedBeamSearch(\n self.target_dictionary,\n min_len_a=1,\n min_len_b=0,\n max_len_a=1,\n max_len_b=0,\n )\n elif diversity_rate > -1:\n search_strategy = search.DiverseSiblingsSearch(\n self.target_dictionary, diversity_rate\n )\n elif constrained:\n search_strategy = search.LexicallyConstrainedBeamSearch(\n self.target_dictionary, args.constraints\n )\n elif prefix_allowed_tokens_fn:\n search_strategy = search.PrefixConstrainedBeamSearch(\n self.target_dictionary, prefix_allowed_tokens_fn\n )\n else:\n search_strategy = search.BeamSearch(self.target_dictionary)\n\n extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}\n if seq_gen_cls is None:\n if getattr(args, \"print_alignment\", False):\n seq_gen_cls = SequenceGeneratorWithAlignment\n extra_gen_cls_kwargs[\"print_alignment\"] = args.print_alignment\n else:\n seq_gen_cls = SequenceGenerator\n\n return seq_gen_cls(\n models,\n self.target_dictionary,\n beam_size=getattr(args, \"beam\", 5),\n max_len_a=getattr(args, \"max_len_a\", 0),\n max_len_b=getattr(args, \"max_len_b\", 200),\n min_len=getattr(args, \"min_len\", 1),\n normalize_scores=(not getattr(args, \"unnormalized\", False)),\n len_penalty=getattr(args, \"lenpen\", 1),\n unk_penalty=getattr(args, \"unkpen\", 0),\n temperature=getattr(args, \"temperature\", 1.0),\n match_source_len=getattr(args, \"match_source_len\", False),\n no_repeat_ngram_size=getattr(args, \"no_repeat_ngram_size\", 0),\n search_strategy=search_strategy,\n constraint_range=self.cfg.constraint_range,\n **extra_gen_cls_kwargs,\n )\n\n def train_step(\n self, sample, model, criterion, optimizer, update_num, ignore_grad=False, **extra_kwargs\n ):\n \"\"\"\n Do forward and backward, and return the loss as computed by *criterion*\n for the given *model* and *sample*.\n\n Args:\n sample (dict): the mini-batch. The format is defined by the\n :class:`~fairseq.data.FairseqDataset`.\n model (~fairseq.models.BaseFairseqModel): the model\n criterion (~fairseq.criterions.FairseqCriterion): the criterion\n optimizer (~fairseq.optim.FairseqOptimizer): the optimizer\n update_num (int): the current update\n ignore_grad (bool): multiply loss by 0 if this is set to True\n\n Returns:\n tuple:\n - the loss\n - the sample size, which is used as the denominator for the\n gradient\n - logging outputs to display while training\n \"\"\"\n model.train()\n model.set_num_updates(update_num)\n with torch.autograd.profiler.record_function(\"forward\"):\n with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):\n loss, sample_size, logging_output = criterion(model, sample, update_num=update_num)\n if ignore_grad:\n loss *= 0\n with torch.autograd.profiler.record_function(\"backward\"):\n optimizer.backward(loss)\n return loss, sample_size, logging_output\n\n def max_positions(self):\n \"\"\"Return the max sentence length allowed by the task.\"\"\"\n return (self.cfg.max_source_positions, self.cfg.max_target_positions)\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.src_dict\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.tgt_dict" }, { "identifier": "OFAConfig", "path": "tasks/ofa_task.py", "snippet": "class OFAConfig(FairseqDataclass):\n data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"comma separated path to data list, will be iterated upon during epochs \"\n \"in round-robin manner; valid data are always in the last\"\n },\n )\n selected_cols: Optional[str] = field(\n default=None,\n metadata={\"help\": \"selected cols\"},\n )\n bpe: Optional[str] = field(\n default='gpt2',\n metadata={\"help\": \"which bpe to use\"},\n )\n bpe_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"bpe dir\"},\n )\n max_source_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the source sequence\"}\n )\n max_target_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the target sequence\"}\n )\n max_src_length: int = field(\n default=128, metadata={\"help\": \"the maximum src sequence length\"}\n )\n max_tgt_length: int = field(\n default=30, metadata={\"help\": \"the maximum target sequence length\"}\n )\n\n code_dict_size: int = field(\n default=8192, metadata={\"help\": \"code dict size\"}\n )\n patch_image_size: int = field(\n default=480, metadata={\"help\": \"patch image size\"}\n )\n orig_patch_image_size: int = field(\n default=256, metadata={\"help\": \"patch image size\"}\n )\n num_bins: int = field(\n default=1000, metadata={\"help\": \"number of quantization bins\"}\n )\n\n imagenet_default_mean_and_std: bool = field(\n default=False,\n metadata={\"help\": \"imagenet normalize\"},\n )\n constraint_range: Optional[str] = field(\n default=None,\n metadata={\"help\": \"constraint range\"}\n )" }, { "identifier": "UnifyDataset", "path": "data/pretrain_data/unify_dataset.py", "snippet": "class UnifyDataset(OFADataset):\n def __init__(\n self,\n split,\n dataset,\n bpe,\n src_dict,\n tgt_dict=None,\n max_src_length=128,\n max_tgt_length=30,\n seed=7,\n code_dict_size=8192,\n num_bins=1000,\n patch_image_size=384,\n code_image_size=128,\n pure_text_dataset=None,\n pure_image_dataset=None,\n detection_dataset=None,\n all_object_list=None,\n all_caption_list=None,\n type2ans_dict=None,\n ans2type_dict=None,\n max_image_size=512,\n mask_ratio=0.3,\n random_ratio=0.0,\n keep_ratio=0.0,\n mask_length=\"span-poisson\",\n poisson_lambda=3.0,\n replace_length=1\n ):\n super().__init__(split, dataset, bpe, src_dict, tgt_dict)\n self.max_src_length = max_src_length\n self.max_tgt_length = max_tgt_length\n self.seed = seed\n self.code_dict_size = code_dict_size\n self.num_bins = num_bins\n self.patch_image_size = patch_image_size\n self.code_image_size = code_image_size\n\n self.pure_text_dataset = pure_text_dataset\n self.pure_image_dataset = pure_image_dataset\n self.detection_dataset = detection_dataset\n self.epoch = 0\n\n self.all_object_list = all_object_list\n self.all_caption_list = all_caption_list\n self.type2ans_dict = type2ans_dict\n self.ans2type_dict = ans2type_dict\n\n self.mask_ratio = mask_ratio\n self.random_ratio = random_ratio\n self.keep_ratio = keep_ratio\n self.mask_length = mask_length\n self.poisson_lambda = poisson_lambda\n self.replace_length = replace_length\n if self.replace_length not in [-1, 0, 1]:\n raise ValueError(f\"invalid arg: replace_length={self.replace_length}\")\n if self.mask_length not in [\"subword\", \"word\", \"span-poisson\"]:\n raise ValueError(f\"invalid arg: mask-length={self.mask_length}\")\n if self.mask_length == \"subword\" and self.replace_length not in [0, 1]:\n raise ValueError(f\"if using subwords, use replace-length=1 or 0\")\n\n self.mask_idx = src_dict.index(\"<mask>\")\n self.mask_whole_word = (\n get_whole_word_mask(self.bpe, self.src_dict)\n if self.mask_length != \"subword\"\n else None\n )\n self.mask_span_distribution = None\n if self.mask_length == \"span-poisson\":\n _lambda = self.poisson_lambda\n lambda_to_the_k = 1\n e_to_the_minus_lambda = math.exp(-_lambda)\n k_factorial = 1\n ps = []\n for k in range(0, 128):\n ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)\n lambda_to_the_k *= _lambda\n k_factorial *= k + 1\n if ps[-1] < 0.0000001:\n break\n ps = torch.FloatTensor(ps)\n self.mask_span_distribution = torch.distributions.Categorical(ps)\n\n self.pos_tgt_item = self.encode_text(\" yes\")\n self.neg_tgt_item = self.encode_text(\" no\")\n\n self.mask_left = self.mask_top = int(0.5 * self.code_image_size)\n self.mask_right = self.mask_bottom = int(1.5 * self.code_image_size)\n self.mask_ids = [\n i*self.code_image_size*2+j\n for i in range(self.code_image_size*2) for j in range(self.code_image_size*2)\n if not (self.mask_left <= i < self.mask_right and self.mask_top <= j < self.mask_bottom)\n ]\n\n scales = np.arange(patch_image_size, 481).tolist()\n\n # for image-text pair\n self.patch_resize_transform = transforms.Compose([\n T.RandomResize(scales, max_size=672),\n transforms.CenterCrop(patch_image_size),\n RandomAugment(2, 7, isPIL=True, augs=['Identity', 'AutoContrast', 'Equalize', 'Brightness', 'Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n # for pure image\n self.patch_crop_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n # for detection\n self.detection_transform = T.Compose([\n T.RandomHorizontalFlip(),\n T.LargeScaleJitter(output_size=self.code_image_size*2, aug_scale_min=1.0, aug_scale_max=1.5),\n T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], max_image_size=max_image_size)\n ])\n # for visual grounding\n self.visual_grounding_transform = T.Compose([\n T.RandomResize(scales, max_size=672),\n T.ObjectCenterCrop((patch_image_size, patch_image_size)),\n T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], max_image_size=max_image_size)\n ])\n\n def set_epoch(self, epoch, **unused):\n self.epoch = epoch\n\n def get_negative_caption(self, caption, gt_objects):\n prob = random.random()\n if gt_objects is not None and gt_objects != '' and prob > 0.6:\n gt_object = random.choice(gt_objects.strip().split('&&'))\n negative_object = random.choice(self.all_object_list[:-1])\n negative_object = self.all_object_list[-1] if negative_object == gt_object else negative_object\n negative_caption = caption.replace(gt_object, negative_object)\n else:\n negative_caption = random.choice(self.all_caption_list)\n return negative_caption\n\n def get_negative_answer(self, answer, conf):\n prob = random.random()\n if conf > (prob + 0.1) and answer in self.ans2type_dict:\n negative_answer_type = self.ans2type_dict[answer]\n if negative_answer_type == 'how many' and answer.isdigit() and prob > 0.5:\n negative_answer = int(answer) + random.choice([-1, 1]) if answer != 0 else 1\n else:\n negative_answer_list = self.type2ans_dict[negative_answer_type]\n negative_answer = random.choice(negative_answer_list[:-1])\n negative_answer = negative_answer_list[-1] if negative_answer == answer else negative_answer\n return negative_answer\n\n negative_answer_list = self.type2ans_dict['other']\n negative_answer = random.choice(negative_answer_list[:-1])\n negative_answer = negative_answer_list[-1] if negative_answer == answer else negative_answer\n return negative_answer\n\n def process_image_text_pair(self, index):\n uniq_id, image, caption, question, refs, gt_objects, dataset_name, type = self.dataset[index]\n\n image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert(\"RGB\")\n patch_image = self.patch_resize_transform(image) if type != 'visual_grounding' else None\n patch_mask = torch.tensor([True])\n conf = torch.tensor([1.0])\n if type == 'caption':\n tgt_caption = self.pre_caption(caption, self.max_tgt_length)\n pos_src_caption = self.pre_caption(caption, self.max_src_length)\n neg_src_caption = self.pre_caption(self.get_negative_caption(caption, gt_objects), self.max_src_length)\n src_item = self.encode_text(\" what does the image describe?\")\n tgt_item = self.encode_text(\" {}\".format(tgt_caption))\n pos_src_item = self.encode_text(' does the image describe \" {} \"?'.format(pos_src_caption))\n neg_src_item = self.encode_text(' does the image describe \" {} \"?'.format(neg_src_caption))\n elif type == 'qa':\n question = self.pre_question(question, self.max_src_length)\n ref_dict = {item.split('|!+')[1]: float(item.split('|!+')[0]) for item in refs.split('&&')}\n answer = max(ref_dict, key=ref_dict.get)\n conf = ref_dict[answer]\n src_item = self.encode_text(\" {}\".format(question))\n tgt_item = self.encode_text(\" {}\".format(answer))\n conf = torch.tensor([conf])\n pos_src_item = self.encode_text(' what is the answer to question \" {} \". is \" {} \"?'.format(question, answer))\n neg_src_item = self.encode_text(\n ' what is the answer to question \" {} \". is \" {} \"?'.format(question, self.get_negative_answer(answer, conf))\n )\n elif type == 'visual_grounding':\n conf = torch.tensor([1.0])\n w, h = image.size\n boxes_target = {\"boxes\": [], \"labels\": [], \"area\": [], \"size\": torch.tensor([h, w])}\n x0, y0, x1, y1 = refs.strip().split(',')\n boxes_target[\"boxes\"] = torch.tensor([[float(x0), float(y0), float(x1), float(y1)]])\n boxes_target[\"labels\"] = np.array([0])\n boxes_target[\"area\"] = torch.tensor([(float(x1) - float(x0)) * (float(y1) - float(y0))])\n patch_image, boxes_target = self.visual_grounding_transform(image, boxes_target)\n quant_x0 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][0] * (self.num_bins - 1)).round()))\n quant_y0 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][1] * (self.num_bins - 1)).round()))\n quant_x1 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][2] * (self.num_bins - 1)).round()))\n quant_y1 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][3] * (self.num_bins - 1)).round()))\n region_coord = \"{} {} {} {}\".format(quant_x0, quant_y0, quant_x1, quant_y1)\n src_caption = self.pre_caption(caption, self.max_src_length)\n src_item = self.encode_text(' which region does the text \" {} \" describe?'.format(src_caption))\n tgt_item = self.encode_text(region_coord, use_bpe=False)\n else:\n logger.info('type {} is not implemented'.format(type))\n raise NotImplementedError\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n pos_src_item = torch.cat([self.bos_item, pos_src_item, self.eos_item]) if type != 'visual_grounding' else None\n neg_src_item = torch.cat([self.bos_item, neg_src_item, self.eos_item]) if type != 'visual_grounding' else None\n\n if type == 'caption' and dataset_name == 'cc12m':\n target_item[:2] = self.src_dict.pad()\n target_item[-1] = self.eos_item\n\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n\n examples = [example]\n prob = random.random()\n if type == 'visual_grounding':\n region_example = example.copy()\n region_prefix_item = self.encode_text(' what does the region describe? region:')\n region_coord_item = self.encode_text('{}'.format(region_coord), use_bpe=False)\n region_src_item = torch.cat([region_prefix_item, region_coord_item])\n region_tgt_item = self.encode_text(' {}'.format(self.pre_caption(caption, self.max_tgt_length)))\n region_example[\"source\"] = torch.cat([self.bos_item, region_src_item, self.eos_item])\n region_example[\"target\"] = torch.cat([region_tgt_item, self.eos_item])\n region_example[\"prev_output_tokens\"] = torch.cat([self.bos_item, region_tgt_item])\n region_example[\"conf\"] = torch.tensor([1.0])\n examples.append(region_example)\n elif prob >= 0.5 and self.split == 'train':\n pos_example = example.copy()\n pos_example[\"source\"] = pos_src_item\n pos_example[\"target\"] = torch.cat([self.pos_tgt_item, self.eos_item])\n pos_example[\"prev_output_tokens\"] = torch.cat([self.bos_item, self.pos_tgt_item])\n examples.append(pos_example)\n elif self.split == 'train':\n neg_example = example.copy()\n neg_example[\"source\"] = neg_src_item\n neg_example[\"target\"] = torch.cat([self.neg_tgt_item, self.eos_item])\n neg_example[\"prev_output_tokens\"] = torch.cat([self.bos_item, self.neg_tgt_item])\n examples.append(neg_example)\n return examples\n\n def process_pure_text(self, index):\n patch_image = torch.zeros((3, self.code_image_size*2, self.code_image_size*2))\n patch_mask = torch.tensor([False])\n code_mask = torch.tensor([False])\n conf = torch.tensor([2.0])\n\n examples = []\n for _ in range(2):\n uniq_id, text = self.pure_text_dataset[index]\n text = text.strip().lower()\n text_item = self.encode_text(\" {}\".format(text), length=512)\n text_item = text_item[-256:]\n text_item = torch.cat([self.bos_item, text_item, self.eos_item])\n mask_text_item = self.add_whole_word_mask(text_item.clone(), self.mask_ratio)\n prefix_item = self.encode_text(' what is the complete text of \" \"?')\n src_item = torch.cat([prefix_item[:-2], mask_text_item[1:-1], prefix_item[-2:]])\n tgt_item = text_item[1:-1]\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"code_mask\": code_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n examples.append(example)\n\n return examples\n\n def process_pure_image(self, index):\n image_id, image, code = self.pure_image_dataset[index]\n image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert(\"RGB\")\n patch_image = self.patch_crop_transform(image)\n patch_image[:, self.mask_top:self.mask_bottom, self.mask_left:self.mask_right] = 0\n patch_mask = torch.tensor([True])\n src_item = self.encode_text(\" what is the image in the middle part?\")\n image_code = torch.LongTensor([int(num) for num in code.strip().split()])\n tgt_item = image_code + len(self.src_dict) - self.code_dict_size - self.num_bins\n code_mask = torch.tensor([True])\n conf = torch.tensor([2.0])\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n\n example = {\n \"id\": image_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"code_mask\": code_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n return [example]\n\n def process_detection(self, index):\n image_id, image, label = self.detection_dataset[index]\n image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert(\"RGB\")\n\n w, h = image.size\n boxes_target = {\"boxes\": [], \"labels\": [], \"area\": [], \"size\": torch.tensor([h, w])}\n label_list = label.strip().split('&&')\n for label in label_list:\n x0, y0, x1, y1, cat_id, cat = label.strip().split(',', 5)\n boxes_target[\"boxes\"].append([float(x0), float(y0), float(x1), float(y1)])\n boxes_target[\"labels\"].append(cat)\n boxes_target[\"area\"].append((float(x1) - float(x0)) * (float(y1) - float(y0)))\n boxes_target[\"boxes\"] = torch.tensor(boxes_target[\"boxes\"])\n boxes_target[\"labels\"] = np.array(boxes_target[\"labels\"])\n boxes_target[\"area\"] = torch.tensor(boxes_target[\"area\"])\n\n patch_image, boxes_target = self.detection_transform(image, boxes_target)\n patch_mask = torch.tensor([True])\n code_mask = torch.tensor([False])\n conf = torch.tensor([2.0])\n\n quant_boxes = []\n for i, box in enumerate(boxes_target[\"boxes\"]):\n quant_boxes.extend([\"<bin_{}>\".format(int((pos * (self.num_bins - 1)).round())) for pos in box[:4]])\n quant_boxes.append(self.bpe.encode(' {}'.format(boxes_target[\"labels\"][i])))\n src_item = self.encode_text(' what are the objects in the image?')\n tgt_item = self.encode_text(' '.join(quant_boxes), use_bpe=False)\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n\n example = {\n \"id\": image_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"code_mask\": code_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n return [example]\n\n def __getitem__(self, index):\n with data_utils.numpy_seed(self.seed, self.epoch):\n pair_samples = self.process_image_text_pair(index)\n extra_samples = []\n if self.split == 'train' and self.dataset.data_cnt % 8 == 0:\n extra_samples += self.process_pure_text(0) if self.pure_text_dataset else []\n extra_samples += self.process_pure_image(0) if self.pure_image_dataset else []\n extra_samples += self.process_detection(0) if self.detection_dataset else []\n return pair_samples, extra_samples\n\n def word_starts(self, source):\n if self.mask_whole_word is not None:\n is_word_start = self.mask_whole_word.gather(0, source)\n else:\n is_word_start = torch.ones(source.size())\n is_word_start[0] = 0\n is_word_start[-1] = 0\n return is_word_start\n\n def add_whole_word_mask(self, source, p):\n is_word_start = self.word_starts(source)\n num_to_mask = int(math.ceil(is_word_start.float().sum() * p))\n num_inserts = 0\n if num_to_mask == 0:\n return source\n\n if self.mask_span_distribution is not None:\n lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))\n\n # Make sure we have enough to mask\n cum_length = torch.cumsum(lengths, 0)\n while cum_length[-1] < num_to_mask:\n lengths = torch.cat(\n [\n lengths,\n self.mask_span_distribution.sample(sample_shape=(num_to_mask,)),\n ],\n dim=0,\n )\n cum_length = torch.cumsum(lengths, 0)\n\n # Trim to masking budget\n i = 0\n while cum_length[i] < num_to_mask:\n i += 1\n lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])\n num_to_mask = i + 1\n lengths = lengths[:num_to_mask]\n\n # Handle 0-length mask (inserts) separately\n lengths = lengths[lengths > 0]\n num_inserts = num_to_mask - lengths.size(0)\n num_to_mask -= num_inserts\n if num_to_mask == 0:\n return self.add_insertion_noise(source, num_inserts / source.size(0))\n\n assert (lengths > 0).all()\n else:\n lengths = torch.ones((num_to_mask,)).long()\n assert is_word_start[-1] == 0\n word_starts = is_word_start.nonzero(as_tuple=False)\n indices = word_starts[\n torch.randperm(word_starts.size(0))[:num_to_mask]\n ].squeeze(1)\n mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio\n\n source_length = source.size(0)\n assert source_length - 1 not in indices\n to_keep = torch.ones(source_length, dtype=torch.bool)\n is_word_start[\n -1\n ] = 255 # acts as a long length, so spans don't go over the end of doc\n if self.replace_length == 0:\n to_keep[indices] = 0\n else:\n # keep index, but replace it with [MASK]\n source[indices] = self.mask_idx\n source[indices[mask_random]] = torch.randint(\n 4, len(self.tgt_dict) - self.code_dict_size - self.num_bins, size=(mask_random.sum(),)\n )\n\n if self.mask_span_distribution is not None:\n assert len(lengths.size()) == 1\n assert lengths.size() == indices.size()\n lengths -= 1\n while indices.size(0) > 0:\n assert lengths.size() == indices.size()\n lengths -= is_word_start[indices + 1].long()\n uncompleted = lengths >= 0\n indices = indices[uncompleted] + 1\n mask_random = mask_random[uncompleted]\n lengths = lengths[uncompleted]\n if self.replace_length != -1:\n # delete token\n to_keep[indices] = 0\n else:\n # keep index, but replace it with [MASK]\n source[indices] = self.mask_idx\n source[indices[mask_random]] = torch.randint(\n 4, len(self.tgt_dict) - self.code_dict_size - self.num_bins, size=(mask_random.sum(),)\n )\n else:\n # A bit faster when all lengths are 1\n while indices.size(0) > 0:\n uncompleted = is_word_start[indices + 1] == 0\n indices = indices[uncompleted] + 1\n mask_random = mask_random[uncompleted]\n if self.replace_length != -1:\n # delete token\n to_keep[indices] = 0\n else:\n # keep index, but replace it with [MASK]\n source[indices] = self.mask_idx\n source[indices[mask_random]] = torch.randint(\n 4, len(self.tgt_dict) - self.code_dict_size - self.num_bins, size=(mask_random.sum(),)\n )\n\n assert source_length - 1 not in indices\n\n source = source[to_keep]\n\n if num_inserts > 0:\n source = self.add_insertion_noise(source, num_inserts / source.size(0))\n\n return source\n\n def add_insertion_noise(self, tokens, p):\n if p == 0.0:\n return tokens\n\n num_tokens = len(tokens)\n n = int(math.ceil(num_tokens * p))\n\n noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1\n noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)\n noise_mask[noise_indices] = 1\n result = torch.LongTensor(n + len(tokens)).fill_(-1)\n\n num_random = int(math.ceil(n * self.random_ratio))\n result[noise_indices[num_random:]] = self.mask_idx\n result[noise_indices[:num_random]] = torch.randint(\n low=4, high=len(self.tgt_dict)-self.code_dict_size-self.num_bins, size=(num_random,)\n )\n\n result[~noise_mask] = tokens\n\n assert (result >= 0).all()\n return result\n\n def collater(self, samples, pad_to_length=None):\n \"\"\"Merge samples of different tasks to form two mini-batches.\n Args:\n samples (List[Tuple]): samples to collate\n Returns:\n Tuple[dict]: two mini-batch containing the data of different tasks\n \"\"\"\n\n samples_v1 = [] # containing image-text pairs\n samples_v2 = [] # containing detection data, text data and image data\n for sample_tuple in samples:\n samples_v1 += sample_tuple[0]\n samples_v2 += sample_tuple[1]\n if samples_v2 != []:\n res_v1 = collate(samples_v1, pad_idx=self.src_dict.pad(), eos_idx=self.eos)\n res_v2 = collate(samples_v2, pad_idx=self.src_dict.pad(), eos_idx=self.eos)\n return res_v1, res_v2\n else:\n res_v1 = collate(samples_v1, pad_idx=self.src_dict.pad(), eos_idx=self.eos)\n return res_v1" }, { "identifier": "FileDataset", "path": "data/file_dataset.py", "snippet": "class FileDataset:\n def __init__(self, file_path, selected_col_ids=None, dtypes=None, separator=\"\\t\", cached_index=False):\n self.file_path = file_path\n assert os.path.exists(self.file_path), \"Error: The local datafile {} not exists!\".format(self.file_path)\n\n self.separator = separator\n if selected_col_ids is None:\n # default to all fields\n self.selected_col_ids = list(\n range(len(open(self.file_path).readline().rstrip(\"\\n\").split(self.separator))))\n else:\n self.selected_col_ids = [int(col_id) for col_id in selected_col_ids.split(\",\")]\n if dtypes is None:\n # default to str\n self.dtypes = [str for col_id in self.selected_col_ids]\n else:\n self.dtypes = [eval(col_dtype) for col_dtype in dtypes.split(\",\")]\n assert len(self.dtypes) == len(self.selected_col_ids)\n\n self.data_cnt = 0\n try:\n self.slice_id = torch.distributed.get_rank()\n self.slice_count = torch.distributed.get_world_size()\n except Exception:\n self.slice_id = 0\n self.slice_count = 1\n self.cached_index = cached_index\n self._init_seek_index()\n self._reader = self._get_reader()\n print(\"file {} slice_id {} row count {} total row count {}\".format(\n self.file_path, self.slice_id, self.row_count, self.total_row_count)\n )\n\n def _init_seek_index(self):\n if self.cached_index:\n cache_path = \"{}.index\".format(self.file_path)\n assert os.path.exists(cache_path), \"cache file {} not exists!\".format(cache_path)\n self.total_row_count, self.lineid_to_offset = pickle.load(open(cache_path, \"rb\"))\n print(\"local datafile {} slice_id {} use cached row_count and line_idx-to-offset mapping\".format(\n self.file_path, self.slice_id))\n else:\n # make an iteration over the file to get row_count and line_idx-to-offset mapping\n fp = open(self.file_path, \"r\")\n print(\"local datafile {} slice_id {} begin to initialize row_count and line_idx-to-offset mapping\".format(\n self.file_path, self.slice_id))\n self.total_row_count = 0\n offset = 0\n self.lineid_to_offset = []\n for line in fp:\n self.lineid_to_offset.append(offset)\n self.total_row_count += 1\n offset += len(line.encode('utf-8'))\n self._compute_start_pos_and_row_count()\n print(\"local datafile {} slice_id {} finished initializing row_count and line_idx-to-offset mapping\".format(\n self.file_path, self.slice_id))\n\n def _compute_start_pos_and_row_count(self):\n self.row_count = self.total_row_count // self.slice_count\n if self.slice_id < self.total_row_count - self.row_count * self.slice_count:\n self.row_count += 1\n self.start_pos = self.row_count * self.slice_id\n else:\n self.start_pos = self.row_count * self.slice_id + (self.total_row_count - self.row_count * self.slice_count)\n\n def _get_reader(self):\n fp = open(self.file_path, \"r\")\n fp.seek(self.lineid_to_offset[self.start_pos])\n return fp\n\n def _seek(self, offset=0):\n try:\n print(\"slice_id {} seek offset {}\".format(self.slice_id, self.start_pos + offset))\n self._reader.seek(self.lineid_to_offset[self.start_pos + offset])\n self.data_cnt = offset\n except Exception:\n print(\"slice_id {} seek offset {}\".format(self.slice_id, offset))\n self._reader.seek(self.lineid_to_offset[offset])\n self.data_cnt = offset\n\n def __del__(self):\n self._reader.close()\n\n def __len__(self):\n return self.row_count\n\n def get_total_row_count(self):\n return self.total_row_count\n\n def __getitem__(self, index):\n if self.data_cnt == self.row_count:\n print(\"reach the end of datafile, start a new reader\")\n self.data_cnt = 0\n self._reader = self._get_reader()\n column_l = self._reader.readline().rstrip(\"\\n\").split(self.separator)\n self.data_cnt += 1\n try:\n column_l = [dtype(column_l[col_id]) for col_id, dtype in zip(self.selected_col_ids, self.dtypes)]\n except IndexError:\n print('Stop')\n return column_l" } ]
from dataclasses import dataclass, field from typing import Optional from fairseq.tasks import register_task from fairseq.data import FairseqDataset, iterators from tasks.ofa_task import OFATask, OFAConfig from data.pretrain_data.unify_dataset import UnifyDataset from data.file_dataset import FileDataset import json import logging import os import math
11,519
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig) class UnifyTask(OFATask): def __init__(self, cfg: UnifyConfig, src_dict, tgt_dict): super().__init__(cfg, src_dict, tgt_dict) self.type2ans_dict = json.load(open(os.path.join(self.cfg.neg_sample_dir, 'type2ans.json'))) self.ans2type_dict = {} for type, answer_list in self.type2ans_dict.items(): if type == 'other': continue for answer in answer_list: self.ans2type_dict[answer] = type self.all_object_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'object.txt')) if row.strip() != '' ] self.all_caption_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'all_captions.txt')) if row.strip() != '' ] self.pure_text_dataset = None self.pure_image_dataset = None self.detection_dataset = None if self.cfg.text_data is not None: self.pure_text_dataset = FileDataset(self.cfg.text_data, self.cfg.text_selected_cols) if self.cfg.image_data is not None: self.pure_image_dataset = FileDataset(self.cfg.image_data, self.cfg.image_selected_cols) if self.cfg.detection_data is not None: self.detection_dataset = FileDataset(self.cfg.detection_data, self.cfg.detection_selected_cols) def load_dataset(self, split, epoch=1, combine=False, **kwargs): paths = self.cfg.data.split(',') assert len(paths) > 0 file_path = paths[(epoch - 1) % (len(paths))] dataset = FileDataset(file_path, self.cfg.selected_cols)
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig) class UnifyTask(OFATask): def __init__(self, cfg: UnifyConfig, src_dict, tgt_dict): super().__init__(cfg, src_dict, tgt_dict) self.type2ans_dict = json.load(open(os.path.join(self.cfg.neg_sample_dir, 'type2ans.json'))) self.ans2type_dict = {} for type, answer_list in self.type2ans_dict.items(): if type == 'other': continue for answer in answer_list: self.ans2type_dict[answer] = type self.all_object_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'object.txt')) if row.strip() != '' ] self.all_caption_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'all_captions.txt')) if row.strip() != '' ] self.pure_text_dataset = None self.pure_image_dataset = None self.detection_dataset = None if self.cfg.text_data is not None: self.pure_text_dataset = FileDataset(self.cfg.text_data, self.cfg.text_selected_cols) if self.cfg.image_data is not None: self.pure_image_dataset = FileDataset(self.cfg.image_data, self.cfg.image_selected_cols) if self.cfg.detection_data is not None: self.detection_dataset = FileDataset(self.cfg.detection_data, self.cfg.detection_selected_cols) def load_dataset(self, split, epoch=1, combine=False, **kwargs): paths = self.cfg.data.split(',') assert len(paths) > 0 file_path = paths[(epoch - 1) % (len(paths))] dataset = FileDataset(file_path, self.cfg.selected_cols)
self.datasets[split] = UnifyDataset(
2
2023-10-20 20:01:42+00:00
16k