repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
pytorch-labs/torchfix
torchfix/visitors/vision/pretrained.py
[ { "identifier": "LintViolation", "path": "torchfix/common.py", "snippet": "class LintViolation:\n error_code: str\n message: str\n line: int\n column: int\n node: cst.CSTNode\n replacement: Optional[cst.CSTNode]\n\n def flake8_result(self):\n full_message = f\"{self.error_code} {self.message}\"\n return (self.line, 1 + self.column, full_message, \"TorchFix\")\n\n def codemod_result(self) -> str:\n fixable = f\" [{CYAN}*{ENDC}]\" if self.replacement is not None else \"\"\n colon = f\"{CYAN}:{ENDC}\"\n position = f\"{colon}{self.line}{colon}{1 + self.column}{colon}\"\n error_code = f\"{RED}{BOLD}{self.error_code}{ENDC}\"\n return f\"{position} {error_code}{fixable} {self.message}\"" }, { "identifier": "TorchVisitor", "path": "torchfix/common.py", "snippet": "class TorchVisitor(cst.BatchableCSTVisitor, ABC):\n METADATA_DEPENDENCIES = (\n QualifiedNameProvider,\n WhitespaceInclusivePositionProvider,\n )\n\n ERROR_CODE: Union[str, List[str]]\n\n def __init__(self) -> None:\n self.violations: List[LintViolation] = []\n self.needed_imports: Set[ImportItem] = set()\n\n @staticmethod\n def get_specific_arg(\n node: cst.Call, arg_name: str, arg_pos: int\n ) -> Optional[cst.Arg]:\n # `arg_pos` is zero-based.\n curr_pos = 0\n for arg in node.args:\n if arg.keyword is None:\n if curr_pos == arg_pos:\n return arg\n curr_pos += 1\n elif arg.keyword.value == arg_name:\n return arg\n return None\n\n def get_qualified_name_for_call(self, node: cst.Call) -> Optional[str]:\n # Guard against situations like `vmap(a)(b)`:\n #\n # Call(\n # func=Call(\n # func=Name(\n # value='vmap',\n #\n # The QualifiedName metadata for the outer call will be the same\n # as for the inner call.\n if isinstance(node.func, cst.Call):\n return None\n\n name_metadata = list(self.get_metadata(QualifiedNameProvider, node))\n if not name_metadata:\n return None\n qualified_name = name_metadata[0].name\n return qualified_name" } ]
from typing import Optional from libcst.codemod.visitors import ImportItem from ...common import LintViolation, TorchVisitor import libcst as cst
5,544
("segmentation.deeplabv3_resnet50", "pretrained"): "DeepLabV3_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.deeplabv3_resnet50", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("segmentation.deeplabv3_resnet101", "pretrained"): "DeepLabV3_ResNet101_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.deeplabv3_resnet101", "pretrained_backbone"): "ResNet101_Weights.IMAGENET1K_V1", ("segmentation.deeplabv3_mobilenet_v3_large", "pretrained"): "DeepLabV3_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.deeplabv3_mobilenet_v3_large", "pretrained_backbone"): "MobileNet_V3_Large_Weights.IMAGENET1K_V1", ("segmentation.fcn_resnet50", "pretrained"): "FCN_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.fcn_resnet50", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("segmentation.fcn_resnet101", "pretrained"): "FCN_ResNet101_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.fcn_resnet101", "pretrained_backbone"): "ResNet101_Weights.IMAGENET1K_V1", ("detection.ssd300_vgg16", "pretrained"): "SSD300_VGG16_Weights.COCO_V1", ("detection.ssd300_vgg16", "pretrained_backbone"): "VGG16_Weights.IMAGENET1K_FEATURES", ("detection.ssdlite320_mobilenet_v3_large", "pretrained"): "SSDLite320_MobileNet_V3_Large_Weights.COCO_V1", ("detection.ssdlite320_mobilenet_v3_large", "pretrained_backbone"): "MobileNet_V3_Large_Weights.IMAGENET1K_V1", } # fmt: on # The same model can be imported from torchvision.models directly, # or from a submodule like torchvision.models.resnet. MODEL_SUBMODULES = ( "alexnet", "convnext", "densenet", "efficientnet", "googlenet", "inception", "mnasnet", "mobilenet", "regnet", "resnet", "shufflenetv2", "squeezenet", "vgg", "vision_transformer", "swin_transformer", "maxvit", ) def visit_Call(self, node): def _new_arg_and_import( old_arg: cst.Arg, is_backbone: bool ) -> Optional[cst.Arg]: old_arg_name = "pretrained_backbone" if is_backbone else "pretrained" if old_arg is None or (model_name, old_arg_name) not in self.MODEL_WEIGHTS: return None new_arg_name = "weights_backbone" if is_backbone else "weights" weights_arg = None if cst.ensure_type(old_arg.value, cst.Name).value == "True": weights_str = self.MODEL_WEIGHTS[(model_name, old_arg_name)] if is_backbone is False and len(model_name.split(".")) > 1: # Prepend things like 'detection.' to the weights string weights_str = model_name.split(".")[0] + "." + weights_str weights_str = "models." + weights_str weights_arg = cst.ensure_type( cst.parse_expression(f"f({new_arg_name}={weights_str})"), cst.Call ).args[0] self.needed_imports.add( ImportItem( module_name="torchvision", obj_name="models", ) ) elif cst.ensure_type(old_arg.value, cst.Name).value == "False": weights_arg = cst.ensure_type( cst.parse_expression(f"f({new_arg_name}=None)"), cst.Call ).args[0] return weights_arg qualified_name = self.get_qualified_name_for_call(node) if qualified_name is None: return if qualified_name.startswith("torchvision.models"): model_name = qualified_name[len("torchvision.models") + 1 :] for submodule in self.MODEL_SUBMODULES: if model_name.startswith(submodule + "."): model_name = model_name[len(submodule) + 1 :] if (model_name, "pretrained") not in self.MODEL_WEIGHTS: return message = None pretrained_arg = self.get_specific_arg(node, "pretrained", 0) if pretrained_arg is not None: message = "Parameter `pretrained` is deprecated, please use `weights` instead." pretrained_backbone_arg = self.get_specific_arg( node, "pretrained_backbone", 1 ) if pretrained_backbone_arg is not None: message = "Parameter `pretrained_backbone` is deprecated, please use `weights_backbone` instead." replacement_args = list(node.args) new_pretrained_arg = _new_arg_and_import(pretrained_arg, is_backbone=False) has_replacement = False if new_pretrained_arg is not None: for pos, arg in enumerate(node.args): if arg is pretrained_arg: break replacement_args[pos] = new_pretrained_arg has_replacement = True new_pretrained_backbone_arg = _new_arg_and_import( pretrained_backbone_arg, is_backbone=True ) if new_pretrained_backbone_arg is not None: for pos, arg in enumerate(node.args): if arg is pretrained_backbone_arg: break replacement_args[pos] = new_pretrained_backbone_arg has_replacement = True replacement = ( node.with_changes(args=replacement_args) if has_replacement else None ) if message is not None: position_metadata = self.get_metadata( cst.metadata.WhitespaceInclusivePositionProvider, node ) self.violations.append(
class TorchVisionDeprecatedPretrainedVisitor(TorchVisitor): """ Find and fix deprecated `pretrained` parameters in TorchVision models. Both `pretrained` and `pretrained_backbone` parameters are supported. The parameters are updated to the new `weights` and `weights_backbone` parameters only if the old parameter has explicit literal `True` or `False` value, otherwise only lint violation is emitted. """ ERROR_CODE = "TOR201" # flake8: noqa: E105 # fmt: off MODEL_WEIGHTS = { ("mobilenet_v2", "pretrained"): "MobileNet_V2_Weights.IMAGENET1K_V1", ("mobilenet_v3_large", "pretrained"): "MobileNet_V3_Large_Weights.IMAGENET1K_V1", ("mobilenet_v3_small", "pretrained"): "MobileNet_V3_Small_Weights.IMAGENET1K_V1", ("densenet121", "pretrained"): "DenseNet121_Weights.IMAGENET1K_V1", ("densenet161", "pretrained"): "DenseNet161_Weights.IMAGENET1K_V1", ("densenet169", "pretrained"): "DenseNet169_Weights.IMAGENET1K_V1", ("densenet201", "pretrained"): "DenseNet201_Weights.IMAGENET1K_V1", ("detection.maskrcnn_resnet50_fpn", "pretrained"): "MaskRCNN_ResNet50_FPN_Weights.COCO_V1", ("detection.maskrcnn_resnet50_fpn", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("detection.maskrcnn_resnet50_fpn_v2", "pretrained"): "MaskRCNN_ResNet50_FPN_V2_Weights.COCO_V1", ("detection.maskrcnn_resnet50_fpn_v2", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("detection.retinanet_resnet50_fpn", "pretrained"): "RetinaNet_ResNet50_FPN_Weights.COCO_V1", ("detection.retinanet_resnet50_fpn", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("detection.retinanet_resnet50_fpn_v2", "pretrained"): "RetinaNet_ResNet50_FPN_V2_Weights.COCO_V1", ("detection.retinanet_resnet50_fpn_v2", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("optical_flow.raft_large", "pretrained"): "Raft_Large_Weights.C_T_SKHT_V2", ("optical_flow.raft_small", "pretrained"): "Raft_Small_Weights.C_T_V2", ("alexnet", "pretrained"): "AlexNet_Weights.IMAGENET1K_V1", ("convnext_tiny", "pretrained"): "ConvNeXt_Tiny_Weights.IMAGENET1K_V1", ("convnext_small", "pretrained"): "ConvNeXt_Small_Weights.IMAGENET1K_V1", ("convnext_base", "pretrained"): "ConvNeXt_Base_Weights.IMAGENET1K_V1", ("convnext_large", "pretrained"): "ConvNeXt_Large_Weights.IMAGENET1K_V1", ("inception_v3", "pretrained"): "Inception_V3_Weights.IMAGENET1K_V1", ("maxvit_t", "pretrained"): "MaxVit_T_Weights.IMAGENET1K_V1", ("mnasnet0_5", "pretrained"): "MNASNet0_5_Weights.IMAGENET1K_V1", ("mnasnet0_75", "pretrained"): "MNASNet0_75_Weights.IMAGENET1K_V1", ("mnasnet1_0", "pretrained"): "MNASNet1_0_Weights.IMAGENET1K_V1", ("mnasnet1_3", "pretrained"): "MNASNet1_3_Weights.IMAGENET1K_V1", ("detection.fasterrcnn_resnet50_fpn", "pretrained"): "FasterRCNN_ResNet50_FPN_Weights.COCO_V1", ("detection.fasterrcnn_resnet50_fpn", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("detection.fasterrcnn_resnet50_fpn_v2", "pretrained"): "FasterRCNN_ResNet50_FPN_V2_Weights.COCO_V1", ("detection.fasterrcnn_resnet50_fpn_v2", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("detection.fasterrcnn_mobilenet_v3_large_320_fpn", "pretrained"): "FasterRCNN_MobileNet_V3_Large_320_FPN_Weights.COCO_V1", ("detection.fasterrcnn_mobilenet_v3_large_320_fpn", "pretrained_backbone"): "MobileNet_V3_Large_Weights.IMAGENET1K_V1", ("detection.fasterrcnn_mobilenet_v3_large_fpn", "pretrained"): "FasterRCNN_MobileNet_V3_Large_FPN_Weights.COCO_V1", ("detection.fasterrcnn_mobilenet_v3_large_fpn", "pretrained_backbone"): "MobileNet_V3_Large_Weights.IMAGENET1K_V1", ("detection.fcos_resnet50_fpn", "pretrained"): "FCOS_ResNet50_FPN_Weights.COCO_V1", ("detection.fcos_resnet50_fpn", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("segmentation.lraspp_mobilenet_v3_large", "pretrained"): "LRASPP_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.lraspp_mobilenet_v3_large", "pretrained_backbone"): "MobileNet_V3_Large_Weights.IMAGENET1K_V1", ("shufflenet_v2_x0_5", "pretrained"): "ShuffleNet_V2_X0_5_Weights.IMAGENET1K_V1", ("shufflenet_v2_x1_0", "pretrained"): "ShuffleNet_V2_X1_0_Weights.IMAGENET1K_V1", ("shufflenet_v2_x1_5", "pretrained"): "ShuffleNet_V2_X1_5_Weights.IMAGENET1K_V1", ("shufflenet_v2_x2_0", "pretrained"): "ShuffleNet_V2_X2_0_Weights.IMAGENET1K_V1", ("squeezenet1_0", "pretrained"): "SqueezeNet1_0_Weights.IMAGENET1K_V1", ("squeezenet1_1", "pretrained"): "SqueezeNet1_1_Weights.IMAGENET1K_V1", ("swin_t", "pretrained"): "Swin_T_Weights.IMAGENET1K_V1", ("swin_s", "pretrained"): "Swin_S_Weights.IMAGENET1K_V1", ("swin_b", "pretrained"): "Swin_B_Weights.IMAGENET1K_V1", ("swin_v2_t", "pretrained"): "Swin_V2_T_Weights.IMAGENET1K_V1", ("swin_v2_s", "pretrained"): "Swin_V2_S_Weights.IMAGENET1K_V1", ("swin_v2_b", "pretrained"): "Swin_V2_B_Weights.IMAGENET1K_V1", ("video.s3d", "pretrained"): "S3D_Weights.KINETICS400_V1", ("video.swin3d_t", "pretrained"): "Swin3D_T_Weights.KINETICS400_V1", ("video.swin3d_s", "pretrained"): "Swin3D_S_Weights.KINETICS400_V1", ("video.swin3d_b", "pretrained"): "Swin3D_B_Weights.KINETICS400_V1", ("vit_b_16", "pretrained"): "ViT_B_16_Weights.IMAGENET1K_V1", ("vit_b_32", "pretrained"): "ViT_B_32_Weights.IMAGENET1K_V1", ("vit_l_16", "pretrained"): "ViT_L_16_Weights.IMAGENET1K_V1", ("vit_l_32", "pretrained"): "ViT_L_32_Weights.IMAGENET1K_V1", ("vit_h_14", "pretrained"): "None", ("vgg11", "pretrained"): "VGG11_Weights.IMAGENET1K_V1", ("vgg11_bn", "pretrained"): "VGG11_BN_Weights.IMAGENET1K_V1", ("vgg13", "pretrained"): "VGG13_Weights.IMAGENET1K_V1", ("vgg13_bn", "pretrained"): "VGG13_BN_Weights.IMAGENET1K_V1", ("vgg16", "pretrained"): "VGG16_Weights.IMAGENET1K_V1", ("vgg16_bn", "pretrained"): "VGG16_BN_Weights.IMAGENET1K_V1", ("vgg19", "pretrained"): "VGG19_Weights.IMAGENET1K_V1", ("vgg19_bn", "pretrained"): "VGG19_BN_Weights.IMAGENET1K_V1", ("video.mvit_v1_b", "pretrained"): "MViT_V1_B_Weights.KINETICS400_V1", ("video.mvit_v2_s", "pretrained"): "MViT_V2_S_Weights.KINETICS400_V1", ("video.r3d_18", "pretrained"): "R3D_18_Weights.KINETICS400_V1", ("video.mc3_18", "pretrained"): "MC3_18_Weights.KINETICS400_V1", ("video.r2plus1d_18", "pretrained"): "R2Plus1D_18_Weights.KINETICS400_V1", ("regnet_y_400mf", "pretrained"): "RegNet_Y_400MF_Weights.IMAGENET1K_V1", ("regnet_y_800mf", "pretrained"): "RegNet_Y_800MF_Weights.IMAGENET1K_V1", ("regnet_y_1_6gf", "pretrained"): "RegNet_Y_1_6GF_Weights.IMAGENET1K_V1", ("regnet_y_3_2gf", "pretrained"): "RegNet_Y_3_2GF_Weights.IMAGENET1K_V1", ("regnet_y_8gf", "pretrained"): "RegNet_Y_8GF_Weights.IMAGENET1K_V1", ("regnet_y_16gf", "pretrained"): "RegNet_Y_16GF_Weights.IMAGENET1K_V1", ("regnet_y_32gf", "pretrained"): "RegNet_Y_32GF_Weights.IMAGENET1K_V1", ("regnet_y_128gf", "pretrained"): "None", ("regnet_x_400mf", "pretrained"): "RegNet_X_400MF_Weights.IMAGENET1K_V1", ("regnet_x_800mf", "pretrained"): "RegNet_X_800MF_Weights.IMAGENET1K_V1", ("regnet_x_1_6gf", "pretrained"): "RegNet_X_1_6GF_Weights.IMAGENET1K_V1", ("regnet_x_3_2gf", "pretrained"): "RegNet_X_3_2GF_Weights.IMAGENET1K_V1", ("regnet_x_8gf", "pretrained"): "RegNet_X_8GF_Weights.IMAGENET1K_V1", ("regnet_x_16gf", "pretrained"): "RegNet_X_16GF_Weights.IMAGENET1K_V1", ("regnet_x_32gf", "pretrained"): "RegNet_X_32GF_Weights.IMAGENET1K_V1", ("resnet18", "pretrained"): "ResNet18_Weights.IMAGENET1K_V1", ("resnet34", "pretrained"): "ResNet34_Weights.IMAGENET1K_V1", ("resnet50", "pretrained"): "ResNet50_Weights.IMAGENET1K_V1", ("resnet101", "pretrained"): "ResNet101_Weights.IMAGENET1K_V1", ("resnet152", "pretrained"): "ResNet152_Weights.IMAGENET1K_V1", ("resnext50_32x4d", "pretrained"): "ResNeXt50_32X4D_Weights.IMAGENET1K_V1", ("resnext101_32x8d", "pretrained"): "ResNeXt101_32X8D_Weights.IMAGENET1K_V1", ("resnext101_64x4d", "pretrained"): "ResNeXt101_64X4D_Weights.IMAGENET1K_V1", ("wide_resnet50_2", "pretrained"): "Wide_ResNet50_2_Weights.IMAGENET1K_V1", ("wide_resnet101_2", "pretrained"): "Wide_ResNet101_2_Weights.IMAGENET1K_V1", ("efficientnet_b0", "pretrained"): "EfficientNet_B0_Weights.IMAGENET1K_V1", ("efficientnet_b1", "pretrained"): "EfficientNet_B1_Weights.IMAGENET1K_V1", ("efficientnet_b2", "pretrained"): "EfficientNet_B2_Weights.IMAGENET1K_V1", ("efficientnet_b3", "pretrained"): "EfficientNet_B3_Weights.IMAGENET1K_V1", ("efficientnet_b4", "pretrained"): "EfficientNet_B4_Weights.IMAGENET1K_V1", ("efficientnet_b5", "pretrained"): "EfficientNet_B5_Weights.IMAGENET1K_V1", ("efficientnet_b6", "pretrained"): "EfficientNet_B6_Weights.IMAGENET1K_V1", ("efficientnet_b7", "pretrained"): "EfficientNet_B7_Weights.IMAGENET1K_V1", ("efficientnet_v2_s", "pretrained"): "EfficientNet_V2_S_Weights.IMAGENET1K_V1", ("efficientnet_v2_m", "pretrained"): "EfficientNet_V2_M_Weights.IMAGENET1K_V1", ("efficientnet_v2_l", "pretrained"): "EfficientNet_V2_L_Weights.IMAGENET1K_V1", ("googlenet", "pretrained"): "GoogLeNet_Weights.IMAGENET1K_V1", ("segmentation.deeplabv3_resnet50", "pretrained"): "DeepLabV3_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.deeplabv3_resnet50", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("segmentation.deeplabv3_resnet101", "pretrained"): "DeepLabV3_ResNet101_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.deeplabv3_resnet101", "pretrained_backbone"): "ResNet101_Weights.IMAGENET1K_V1", ("segmentation.deeplabv3_mobilenet_v3_large", "pretrained"): "DeepLabV3_MobileNet_V3_Large_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.deeplabv3_mobilenet_v3_large", "pretrained_backbone"): "MobileNet_V3_Large_Weights.IMAGENET1K_V1", ("segmentation.fcn_resnet50", "pretrained"): "FCN_ResNet50_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.fcn_resnet50", "pretrained_backbone"): "ResNet50_Weights.IMAGENET1K_V1", ("segmentation.fcn_resnet101", "pretrained"): "FCN_ResNet101_Weights.COCO_WITH_VOC_LABELS_V1", ("segmentation.fcn_resnet101", "pretrained_backbone"): "ResNet101_Weights.IMAGENET1K_V1", ("detection.ssd300_vgg16", "pretrained"): "SSD300_VGG16_Weights.COCO_V1", ("detection.ssd300_vgg16", "pretrained_backbone"): "VGG16_Weights.IMAGENET1K_FEATURES", ("detection.ssdlite320_mobilenet_v3_large", "pretrained"): "SSDLite320_MobileNet_V3_Large_Weights.COCO_V1", ("detection.ssdlite320_mobilenet_v3_large", "pretrained_backbone"): "MobileNet_V3_Large_Weights.IMAGENET1K_V1", } # fmt: on # The same model can be imported from torchvision.models directly, # or from a submodule like torchvision.models.resnet. MODEL_SUBMODULES = ( "alexnet", "convnext", "densenet", "efficientnet", "googlenet", "inception", "mnasnet", "mobilenet", "regnet", "resnet", "shufflenetv2", "squeezenet", "vgg", "vision_transformer", "swin_transformer", "maxvit", ) def visit_Call(self, node): def _new_arg_and_import( old_arg: cst.Arg, is_backbone: bool ) -> Optional[cst.Arg]: old_arg_name = "pretrained_backbone" if is_backbone else "pretrained" if old_arg is None or (model_name, old_arg_name) not in self.MODEL_WEIGHTS: return None new_arg_name = "weights_backbone" if is_backbone else "weights" weights_arg = None if cst.ensure_type(old_arg.value, cst.Name).value == "True": weights_str = self.MODEL_WEIGHTS[(model_name, old_arg_name)] if is_backbone is False and len(model_name.split(".")) > 1: # Prepend things like 'detection.' to the weights string weights_str = model_name.split(".")[0] + "." + weights_str weights_str = "models." + weights_str weights_arg = cst.ensure_type( cst.parse_expression(f"f({new_arg_name}={weights_str})"), cst.Call ).args[0] self.needed_imports.add( ImportItem( module_name="torchvision", obj_name="models", ) ) elif cst.ensure_type(old_arg.value, cst.Name).value == "False": weights_arg = cst.ensure_type( cst.parse_expression(f"f({new_arg_name}=None)"), cst.Call ).args[0] return weights_arg qualified_name = self.get_qualified_name_for_call(node) if qualified_name is None: return if qualified_name.startswith("torchvision.models"): model_name = qualified_name[len("torchvision.models") + 1 :] for submodule in self.MODEL_SUBMODULES: if model_name.startswith(submodule + "."): model_name = model_name[len(submodule) + 1 :] if (model_name, "pretrained") not in self.MODEL_WEIGHTS: return message = None pretrained_arg = self.get_specific_arg(node, "pretrained", 0) if pretrained_arg is not None: message = "Parameter `pretrained` is deprecated, please use `weights` instead." pretrained_backbone_arg = self.get_specific_arg( node, "pretrained_backbone", 1 ) if pretrained_backbone_arg is not None: message = "Parameter `pretrained_backbone` is deprecated, please use `weights_backbone` instead." replacement_args = list(node.args) new_pretrained_arg = _new_arg_and_import(pretrained_arg, is_backbone=False) has_replacement = False if new_pretrained_arg is not None: for pos, arg in enumerate(node.args): if arg is pretrained_arg: break replacement_args[pos] = new_pretrained_arg has_replacement = True new_pretrained_backbone_arg = _new_arg_and_import( pretrained_backbone_arg, is_backbone=True ) if new_pretrained_backbone_arg is not None: for pos, arg in enumerate(node.args): if arg is pretrained_backbone_arg: break replacement_args[pos] = new_pretrained_backbone_arg has_replacement = True replacement = ( node.with_changes(args=replacement_args) if has_replacement else None ) if message is not None: position_metadata = self.get_metadata( cst.metadata.WhitespaceInclusivePositionProvider, node ) self.violations.append(
LintViolation(
0
2023-11-15 01:21:07+00:00
8k
FISHers6/CodeLearn-Agent
codelearn/agents/question_solve.py
[ { "identifier": "CustomOutputParser", "path": "codelearn/agents/code_agent.py", "snippet": "class CustomPromptTemplate(StringPromptTemplate):\nclass CustomOutputParser(AgentOutputParser):\n def format(self, **kwargs) -> str:\n def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:" }, { "identifier": "Project", "path": "codelearn/project/project.py", "snippet": "class Project:\n\n def __init__(self, id: str, local_dir: str, source_content: FileTree, repo_url: str = None, last_updated_time = None):\n \"\"\"\n :param name: 项目名称\n :param contents: 一个字典,其中键是文件路径,值是文件内容\n \"\"\"\n self.id = id\n self.local_dir = local_dir\n self.repo_url = repo_url\n self.contents = source_content\n self.last_updated_time = last_updated_time" }, { "identifier": "CodeRetriever", "path": "codelearn/retrieval/code_retriever.py", "snippet": "class CodeRetriever(Retriever):\n\n vector_store: VectorStoreBase\n embending: Embeddings\n index_name: str\n \n\n def retrieve(self, query: str, project: Project, top_k: int = 1, search_kwargs: Optional[dict] = None) -> List[Tuple[Document, float]]:\n print(self.embending)\n # 实现代码检索的逻辑\n db = self.vector_store.load_local(folder_path=project.id, embeddings=self.embending, index_name=self.index_name)\n if not db:\n raise ValueError(f\"Could not found project, id: {project.id}, repo_url: {project.repo_url}, local_dir: {project.local_dir}, please index project firstly then try again\")\n docs_with_score = db.similarity_search_with_score(query, k=top_k)\n # sort\n # Lost in the Middle: How Language Models Use Long Contexts\n return docs_with_score" }, { "identifier": "MultiQueryMultiRetriever", "path": "codelearn/retrieval/multi_retriever.py", "snippet": "class MultiQueryMultiRetriever(BaseRetriever):\n retrievers: List[Retriever] = Field(default_factory=list)\n project: Optional[Project] = None\n llm_chain: Optional[LLMChain] = None\n parser_key: str = \"lines\"\n languages: List[str] = Field(default=[\"en-US\", \"zh-CN\"])\n\n def _get_relevant_documents(self, query: str, top_k: int = 5, search_kwargs: Optional[dict] = None, **kwargs: Any) -> List[Document]:\n multi_query = self.generage_queries(query, languages=self.languages)\n print(f\"multi_query is {multi_query}\")\n retrievel_documents = self.retrieve(multi_query, top_k=top_k, search_kwargs=search_kwargs)\n sorted_docs_by_score = sorted(retrievel_documents, key=lambda x: x[1], reverse=True)\n documents = [doc for doc, _ in sorted_docs_by_score]\n documents = self.unique_documents(documents)\n print(f\"len docs: {len(documents)}\\n{documents}\")\n return documents\n\n def generage_queries(self, origin_query: str, languages: List[str]) -> List[str]:\n print(f\"origin_query is: {origin_query}, languages is: {languages}\")\n response = self.llm_chain({\"question\": origin_query, \"languages\": languages})\n lines = getattr(response[\"text\"], self.parser_key, [])\n print(f\"lines is {lines}\")\n return lines\n \n def retrieve(self, multi_query: List[str], top_k: int = 5, search_kwargs: Optional[dict] = None) -> List[Tuple[Document, float]]:\n docs_with_scores = []\n for retriever in self.retrievers:\n for query in multi_query:\n docs_with_scores.extend(retriever.retrieve(query, self.project, top_k, search_kwargs))\n return docs_with_scores\n \n def unique_documents(self, documents: Sequence[Document]) -> List[Document]:\n return [doc for i, doc in enumerate(documents) if doc not in documents[:i]]\n\n @classmethod\n def from_llm(\n cls,\n retrievers: List[Retriever],\n llm: BaseLLM,\n project: Project,\n prompt: PromptTemplate = DEFAULT_QUERY_PROMPT,\n parser_key: str = \"lines\",\n languages: List[str] = [\"en-US\", \"zh-CN\"]\n ) -> \"MultiQueryMultiRetriever\":\n output_parser = LineListOutputParser()\n llm_chain = LLMChain(llm=llm, prompt=prompt, output_parser=output_parser)\n return cls(\n retrievers=retrievers,\n llm_chain=llm_chain,\n parser_key=parser_key,\n project=project,\n languages=languages\n )" }, { "identifier": "VectorStoreBase", "path": "codelearn/storage/vector.py", "snippet": "class VectorStoreBase(ABC):\n \"\"\"Base VectorStorage class for VectorStore.\"\"\"\n\n @abstractmethod\n def save_local(self, vector_store: VectorStore, folder_path: str, index_name: str = \"code\"):\n pass\n\n @abstractmethod\n def load_local(\n self,\n folder_path: str,\n embeddings: Embeddings,\n index_name: str = \"code\",\n **kwargs: Any\n ) -> Optional[VectorStore]:\n pass\n\n @abstractmethod\n def embending(self, project: Project, documents: List[Document], embedding: Embeddings, vector_store: Optional[VectorStore] = None, index_name=\"code\"):\n pass" }, { "identifier": "CodeSearchTool", "path": "codelearn/tools/code_search.py", "snippet": "class CodeSearchTool(BaseTool):\n \"\"\"Tool that searches for similar source code and documentation based on a given query.\"\"\"\n\n name: str = \"search_source_code\"\n # todo 修改tool描述 只需给定一个origin_query\n description: str = (\n \"This tool supports code similarity recall and documentation similarity recall. \"\n \"It requires one input parameter: the original question of user.\"\n \"Firstly the tool will translation question in English.and give a hypothetical similar code.\"\n \"Secondly the tool then performs recall source code context in user project, based on the multi question and hypothetical similar code.\"\n \"Lastly the tool will return source code context, then you will answer user question based on context.\"\n \"For the same query, you can only use this tool once, but for different queries you want to recall source code context, you can use it again using other query. Please do not use tools for unrelated topics, but use other tools or output final answer.\"\n )\n # , then you will answer user question based on context. if you not have much context,can't answer, please use tools continue.\n \n # I have found a similar code snippet that may help me understand the role of the handle function method in Middleware. \n # Now I can recall source code context in the user project to provide a more accurate answer.\n project: Project\n multi_retriever: MultiQueryMultiRetriever\n\n def _search_code(\n self,\n origin_query: str\n ) -> Dict[str, Union[List[str], str]]:\n docs = self.multi_retriever.invoke(input=origin_query)\n # todo metadata中获取文件路径和行号以及符号\n context = \"\\n\".join([f\"{doc.metadata}\\n{doc.page_content}\" for doc in docs])\n return json.dumps({\n \"source_code\": context,\n \"ToolHint\": \"The results contain documents and code snippets that are similar to the provided query and hypothetical code.If you have enough context, output final answer directly\"\n })\n # Analyze the results to find the most relevant answer.\n\n def _run(\n self,\n origin_query: str,\n ) -> Dict[str, Union[List[str], str]]:\n \"\"\"Use the tool.\"\"\"\n return self._search_code(origin_query)" }, { "identifier": "DirectoryStructViewTool", "path": "codelearn/tools/directory_struct_view.py", "snippet": "class DirectoryStructViewTool(BaseTool):\n \"\"\"Tool to automate exploration of project directory structures.\"\"\"\n\n name: str = \"get_directory_struct\"\n description: str = (\n \"The 'get_directory_struct' tool provides a structured view of specified directories within the project. \"\n \"Input a comma-separated list of directory paths to explore. \"\n \"Avoid providing filenames with extensions; **only focus on full path directory paths.For example swim-main/src/example.txt is file not allowed, src/example is directory folder**\"\n \"Output includes a dictionary with 'files' key containing the list of files and subdirectories for the provided paths, \"\n \"and an 'ToolHint' key for guidance on interpreting and further querying the structure. \"\n \"Useful for delving into specific project directories and understanding their contents.\"\n )\n project: Project\n\n def _get_struct(self, paths: List[str]) -> dict:\n file_tree = self.project.contents\n filenames = []\n for path in paths:\n filenames.extend(file_tree.get_all_files_and_directories_in_directory(path))\n return json.dumps({\n \"files\": filenames,\n \"ToolHint\": (\n \"This response provides a list of directories and files in the root directory of the repository. \"\n \"Before querying the GetRepositoryContent endpoint, analyze the directory list and refine it as needed. \"\n \"Query this endpoint again with the refined list of directories you're interested in. \"\n \"You can query for multiple directories, and it's advisable to query more rather than less. \"\n \"For retrieving files in specific directories, use the same endpoint and provide the list via the RelativePaths request property.\"\n )\n })\n\n def _run(\n self,\n directory_paths: str,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n ) -> dict:\n \"\"\"Use the tool.\"\"\"\n paths = process_file_paths(directory_paths)\n return self._get_struct(paths)" }, { "identifier": "FileContentViewTool", "path": "codelearn/tools/file_content_view.py", "snippet": "class FileContentViewTool(BaseTool):\n \"\"\"Tool to fetch and display detailed content of project files.\"\"\"\n\n name: str = \"get_file_content\"\n description: str = (\n \"The 'get_file_content' tool fetches and displays detailed content of specified files within the project, including both source code and documentation. It's an important tool for users who need detailed from code source.\"\n \"Input a comma-separated list of file names (without folder or path names) to view. Incomplete paths are not accepted. For example swim-main/src/example.txt is a full path file, but 'src/example' is incomplete directory folder not allowed\"\n \"Output is a dictionary with 'files' key containing a list of dictionaries for each file, \"\n \"**Ensure you've requested the repository structure before asking for file contents.The requested file must exist in the project**\"\n \"Useful for users diving deep into a project's codebase or documentation to understand its intricacies.\"\n )\n project: Project\n\n def _get_file_content(self, paths: List[str]) -> dict:\n file_tree = self.project.contents\n files = []\n for path in paths:\n file_content = file_tree.get_file_content(path)\n files.append({\n \"path\": path,\n \"content\": file_content,\n \"isValid\": True if file_content else False\n })\n return json.dumps({\n \"files\": files,\n \"ToolHint\": (\n \"You now have the content of the requested files. **then you need answer user question baied on content file**\\n\"\n \"Before answering, ensure you have enough context by considering the relevance of your response to the user's question. \"\n \"Calculate the relevance score, and if it falls below 0.7, request additional files. Repeat until the score is satisfactory.\\n\"\n \"**If you lack sufficient context to answer, continue exploring using this or other tools.**\"\n )\n })\n\n def _run(\n self,\n file_paths: str\n ) -> dict:\n \"\"\"Use the tool.\"\"\"\n paths = process_file_paths(file_paths)\n print(f\"use FileContentViewTool: {paths}\\n\")\n return self._get_file_content(paths)" }, { "identifier": "ProjectStructViewTool", "path": "codelearn/tools/project_struct_view.py", "snippet": "class ProjectStructViewTool(BaseTool):\n \"\"\"Tool that get project struct.\"\"\"\n\n name: str = \"get_project_struct\"\n description: str = (\n \"The 'get_project_struct' is designed to provide a structured view of the project's directory hierarchy. \"\n \"It returns a list of all files in the root directory and all subdirectory paths within the project. \"\n \"There's no input required for this tool. \"\n \"The output is a dictionary containing the 'structure' key, which holds the list of files and directories, \"\n \"and an 'ToolHint' key that provides guidance on how to interpret and further query the structure. \"\n \"The 'structure' key will contain a list of file paths and directories paths from the root directory and all subdirectory paths. \"\n \"The tool is particularly useful for understanding the layout of a project and identifying key directories or files of interest.if you did not know project structure before, using this tool first.\"\n )\n project: Project\n\n def _get_project_struct(self) -> Any:\n file_tree = self.project.contents\n structure = file_tree.get_project_structure()\n\n return json.dumps({\n \"structure\": structure,\n \"ToolHint\": (\n \"Analyze the repository structure, focusing on files or directories with examples as they are highly relevant. \"\n \"**Query files content directly If you can find some files associated with the problem** query subfolders for large structures. For example src/example.txt is file, src/example is directory folder\"\n \"In separate queries, fetch markdown files for documentation. \"\n \"Limit each query to 20 files. In case of an error, inform the user without excessive retries (maximum 3).\"\n )\n })\n\n \n def _run(\n self,\n *args, \n **kwargs\n ) -> Any:\n \"\"\"Use the tool.\"\"\"\n if not self.project:\n raise ValueError(\"not found project\")\n return self._get_project_struct()" } ]
from typing import Any, List from langchain.chat_models import ChatOpenAI from langchain.agents import load_tools, initialize_agent, AgentType from langchain.schema.embeddings import Embeddings from langchain.schema.document import Document from codelearn.agents.code_agent import CustomOutputParser, CustomPromptTemplate, template_with_history from codelearn.project.project import Project from langchain.chains import LLMChain from codelearn.retrieval.code_retriever import CodeRetriever from codelearn.retrieval.multi_retriever import MultiQueryMultiRetriever from codelearn.storage.vector import VectorStoreBase from codelearn.tools.code_search import CodeSearchTool from codelearn.tools.directory_struct_view import DirectoryStructViewTool from codelearn.tools.file_content_view import FileContentViewTool from codelearn.tools.project_struct_view import ProjectStructViewTool from langchain.chat_models import ChatOpenAI from langchain.prompts import MessagesPlaceholder from langchain.tools import BaseTool from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser from langchain.memory import ConversationBufferWindowMemory
3,646
def question_solve_agent( llm: ChatOpenAI, query: str, project: Project, vector_store: VectorStoreBase, embending: Embeddings, languages: List[str], chat_history: Any = {}, max_iterations = 20 ): code_retrival = CodeRetriever(vector_store=vector_store, embending=embending, index_name="code") multi_retrievel = MultiQueryMultiRetriever.from_llm(retrievers=[code_retrival], llm=llm, project=project, languages=languages) code_search = CodeSearchTool( project = project, multi_retriever = multi_retrievel )
def question_solve_agent( llm: ChatOpenAI, query: str, project: Project, vector_store: VectorStoreBase, embending: Embeddings, languages: List[str], chat_history: Any = {}, max_iterations = 20 ): code_retrival = CodeRetriever(vector_store=vector_store, embending=embending, index_name="code") multi_retrievel = MultiQueryMultiRetriever.from_llm(retrievers=[code_retrival], llm=llm, project=project, languages=languages) code_search = CodeSearchTool( project = project, multi_retriever = multi_retrievel )
directory_struct_view = DirectoryStructViewTool(project = project)
6
2023-11-12 13:13:30+00:00
8k
kirill-vish/Beyond-INet
main.py
[ { "identifier": "run_imagenetx", "path": "inference/imagenet_x.py", "snippet": "@torch.no_grad()\ndef run_imagenetx(data_loader, model, device, model_name):\n model.eval()\n\n total_samples = 0\n max_probs_list = []\n max_indices_list = []\n file_names_list = []\n\n date = datetime.now().strftime(\"%d-%m-%Y_%H:%M:%S\")\n\n folder = os.path.join(os.environ[\"HOME\"],\n f\"outputs/imagenetx/{date}_{model_name}\")\n if not os.path.exists(folder):\n os.makedirs(folder)\n output_csv_path = os.path.join(folder, \"preds.csv\")\n\n with open(output_csv_path, \"w\", newline=\"\") as csvfile:\n csv_writer = csv.writer(csvfile)\n csv_writer.writerow(\n [\"file_name\", \"predicted_class\", \"predicted_probability\"])\n\n for batch in tqdm(data_loader):\n images, target, file_names = batch\n\n images = images.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n\n logits = model(images)\n\n probs = F.softmax(logits, dim=1)\n max_probs, max_indices = torch.max(probs, dim=1)\n\n max_probs_list.append(max_probs.detach().cpu().numpy())\n max_indices_list.append(max_indices.detach().cpu().numpy())\n file_names_list.append(file_names)\n\n for file_name, pred_class, pred_prob in zip(\n file_names, max_indices, max_probs):\n csv_writer.writerow(\n [file_name, pred_class.item(),\n pred_prob.item()])\n\n total_samples += images.shape[0]\n\n print(\"General inference finished successfully!\")\n factor_accs = get_factor_accuracies(\n os.path.join(os.environ[\"HOME\"],\n f\"outputs/imagenetx/{date}_{model_name}\"))\n return error_ratio(factor_accs)" }, { "identifier": "run_invariance", "path": "inference/invariance.py", "snippet": "@torch.no_grad()\ndef run_invariance(data_loader, model, device, args):\n if args.experiment == \"scale\" or \"shift\" in args.experiment:\n if args.experiment == \"scale\":\n crop_transform = CropScale(model_name=args.model,\n scale_factor=args.scale_factor)\n elif \"shift\" in args.experiment:\n crop_transform = CropShift(model_name=args.model,\n shift=args.shift_x)\n if transform_val is None:\n transform_val = transforms.Compose([\n crop_transform,\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n else:\n normalize = transform_val.transforms[-1]\n assert isinstance(normalize, transforms.Normalize)\n transform_val = transforms.Compose([\n crop_transform,\n transform_val.transforms[-3],\n transform_val.transforms[-2],\n transform_val.transforms[-1],\n ])\n elif args.experiment == \"resolution\":\n if transform_val is None:\n transform_val = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n scaling = 256 / 224\n transform_val.transforms[0].size = int(args.image_size * scaling)\n transform_val.transforms[1].size = args.image_size\n\n model.eval()\n\n total_correct_class_probs = 0.0\n total_correct = 0\n total_samples = 0\n\n for batch in tqdm(data_loader):\n images, target = batch\n images = images.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n\n logits = model(images)\n\n probs = F.softmax(logits, dim=1)\n correct_class_probs = probs[torch.arange(images.size(0)), target]\n\n _, pred = torch.max(logits, 1)\n total_correct += (pred == target).sum().item()\n\n total_correct_class_probs += correct_class_probs.sum().item()\n total_samples += images.size(0)\n\n avg_correct_class_prob = total_correct_class_probs / total_samples\n top1_accuracy = total_correct / total_samples\n\n return {\n \"correct_class_prob\": avg_correct_class_prob,\n \"top1_accuracy\": top1_accuracy,\n }" }, { "identifier": "run_pug_imagenet", "path": "inference/pug_imagenet.py", "snippet": "@torch.no_grad()\ndef run_pug_imagenet(model, root_folder, transform_val=None):\n with open(os.path.join(root_folder, \"class_to_imagenet_idx.json\")) as f:\n labels = json.load(f)\n labels = dict(sorted(labels.items()))\n inversed_dict = {}\n counter = 0\n for k, v in labels.items():\n for val in v:\n inversed_dict[int(val)] = counter\n counter = counter + 1\n\n if transform_val is None:\n tr_normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n transform_val = transforms.Compose([\n transforms.CenterCrop(256),\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n tr_normalize,\n ])\n\n dataset_names = [\n \"Worlds\",\n \"Camera_Pitch\",\n \"Camera_Yaw\",\n \"Camera_Roll\",\n \"Object_Pitch\",\n \"Object_Yaw\",\n \"Object_Roll\",\n \"Object_Scale\",\n \"Object_Texture\",\n \"Scene_Light\",\n ]\n\n results = {}\n\n for dataset_name in dataset_names:\n dataset_path = os.path.join(root_folder, dataset_name)\n dataset = datasets.ImageFolder(dataset_path, transform=transform_val)\n dataloader = DataLoader(dataset,\n batch_size=256,\n shuffle=False,\n num_workers=10,\n drop_last=False)\n\n print(f\"Running inference on {dataset_name}.\")\n\n nb_corrects = 0.0\n for images, labels in tqdm(dataloader):\n images = images.cuda()\n labels = labels.cuda()\n with torch.no_grad(), torch.cuda.amp.autocast():\n output = model(images).softmax(dim=-1)\n pred = torch.argmax(output, dim=1)\n for p in range(pred.size(0)):\n if pred[p].item() in inversed_dict.keys():\n pred[p] = inversed_dict[pred[p].item()]\n else:\n pred[p] = 999\n nb_corrects += sum((pred == labels).float())\n\n accuracy = (nb_corrects / len(dataset)) * 100.0\n results[dataset_name] = accuracy.item()\n\n return results" }, { "identifier": "run_robustness", "path": "inference/robustness.py", "snippet": "@torch.no_grad()\ndef run_robustness(model, root, test_transform, args):\n model.eval()\n\n acc_val = evaluate_imagenet_val(\n model=model,\n data_dir=os.path.join(root, \"benchmarks/data/imagenet-val\"),\n test_transform=test_transform,\n )\n\n acc_a = evaluate_imagenet_a(\n model=model,\n data_dir=os.path.join(root, \"benchmarks/data/imagenet-a\"),\n test_transform=test_transform,\n )\n\n acc_r = evaluate_imagenet_r(\n model=model,\n data_dir=os.path.join(root, \"benchmarks/data/imagenet-r\"),\n test_transform=test_transform,\n )\n\n acc_sketch = evaluate_imagenet_sketch(\n model=model,\n data_dir=os.path.join(root, \"benchmarks/data/imagenet-sketch\"),\n test_transform=test_transform,\n )\n\n acc_v2 = evaluate_imagenet_v2(\n model=model,\n data_dir=os.path.join(root, \"benchmarks/data/imagenetv2\"),\n test_transform=test_transform,\n )\n\n # acc_c, _ = evaluate_imagenet_c(\n # model=model,\n # data_dir=os.path.join(root, \"benchmarks/data/imagenet-c\"),\n # test_transform=test_transform,\n # )\n\n acc_hard = evaluate_imagenet_hard(model,\n test_transform,\n device=args.device,\n args=args)\n\n return {\n \"acc_a\": acc_a,\n \"acc_r\": acc_r,\n \"acc_sketch\": acc_sketch,\n \"acc_v2\": acc_v2,\n \"acc_val\": acc_val,\n \"acc_hard\": acc_hard,\n # \"acc_c\": acc_c,\n }" }, { "identifier": "ImageFolderWithPaths", "path": "utils/misc.py", "snippet": "class ImageFolderWithPaths(ImageFolder):\n\n def __getitem__(self, index):\n path, target = self.samples[index]\n sample = self.loader(path)\n if self.transform is not None:\n sample = self.transform(sample)\n if self.target_transform is not None:\n target = self.target_transform(target)\n filename = os.path.basename(path)\n return sample, target, filename" }, { "identifier": "get_world_size", "path": "utils/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "load_model_transform", "path": "utils/misc.py", "snippet": "def load_model_transform(model_name, pretrained_dir, img_size=224):\n print(f\"Loading {model_name}\")\n checkpoint_path = None\n transform_val = None\n if model_name == \"deit3_21k\":\n model = models_deit.deit_base_patch16_LS(img_size=img_size)\n checkpoint_path = os.path.join(pretrained_dir,\n \"deit_3_base_224_21k.pth\")\n elif model_name == \"convnext_base_21k\":\n model = models_convnextv1.convnext_base()\n checkpoint_path = os.path.join(pretrained_dir,\n \"convnext_base_22k_1k_224.pth\")\n elif model_name == \"vit_clip\":\n model, _, transform_val = open_clip.create_model_and_transforms(\n 'ViT-B-16', pretrained='laion400m_e31', force_image_size=img_size)\n model = models_clip.CLIPModel(model=model, model_name='ViT-B-16')\n checkpoint_path = None\n elif model_name == \"convnext_clip\":\n model, _, transform_val = open_clip.create_model_and_transforms(\n 'convnext_base',\n pretrained='laion400m_s13b_b51k',\n force_image_size=img_size)\n model = models_clip.CLIPModel(model=model, model_name='convnext_base')\n checkpoint_path = None\n\n if checkpoint_path is not None:\n checkpoint = torch.load(checkpoint_path)\n state_dict = checkpoint['model']\n if img_size != 224 and model_name == 'deit3_21k':\n state_dict = interpolate_pos_embed(model, state_dict)\n msg = model.load_state_dict(state_dict, strict=False)\n print(msg)\n assert set(checkpoint['model'].keys()) == set(\n model.state_dict().keys())\n assert len(msg.missing_keys) == 0 and len(\n msg.unexpected_keys\n ) == 0, \"Some keys in the state dict do not match\"\n\n return model, transform_val" }, { "identifier": "resolve_name", "path": "utils/misc.py", "snippet": "def resolve_name(args):\n date = datetime.now().strftime(\"%d-%m-%Y_%H:%M:%S\")\n if args.experiment == \"robustness\":\n return f\"{date}_robustness_{args.model}\"\n elif args.experiment == \"imagenetx\":\n return f\"{date}_imagenetx_{args.model}\"\n elif args.experiment == \"imagenethard\":\n return f\"{date}_imagenethard_{args.model}\"\n elif args.experiment == \"pug_imagenet\":\n return f\"{date}_pug_imagenet_{args.model}\"\n elif args.experiment == \"scale\":\n return f\"{date}_scale_{args.model}\"\n elif args.experiment == \"resolution\":\n return f\"{date}_resolution_{args.model}\"\n elif args.experiment == \"imagenet_real\":\n return f\"{date}_real_{args.model}\"\n elif \"shift\" in args.experiment:\n return f\"{date}_{args.experiment}_{args.model}\"" } ]
import argparse import json import os import random import numpy as np import pandas import torch import torch.backends.cudnn as cudnn import torchvision.datasets as datasets import torchvision.transforms as transforms import wandb from collections import defaultdict from pathlib import Path from imagenetv2_pytorch import ImageNetV2Dataset from torchvision import transforms from inference.imagenet_x import run_imagenetx from inference.invariance import run_invariance from inference.pug_imagenet import run_pug_imagenet from inference.robustness import run_robustness from utils.misc import (ImageFolderWithPaths, get_world_size, load_model_transform, resolve_name)
4,030
parser.add_argument("--shift_y", type=int, default=0, help="Shift Y") parser.add_argument("--data_path", type=str, default="", help="dataset path") parser.add_argument("--pretrained_dir", type=str, default="pretrained", help="pretrained directory") parser.add_argument( "--nb_classes", default=1000, type=int, help="number of the classification types", ) parser.add_argument("--image_size", default=224, type=int, help="image size") parser.add_argument( "--output_dir", default="./outputs", help="path where to save, empty for no saving", ) parser.add_argument("--device", default="cuda", help="device to use for training / testing") parser.add_argument("--seed", default=0, type=int) parser.add_argument("--num_workers", default=10, type=int) parser.add_argument( "--pin_mem", action="store_true", help= "Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.", ) parser.set_defaults(pin_mem=True) parser.add_argument( "--num_runs", default=1, type=int, help="number of how many repeated runs of experiment", ) parser.add_argument("--n_bins", default=15, type=int, help="number of bins in ECE calculation") parser.add_argument("--run_name", type=str, default="") parser.add_argument("--dataset", type=str, default="") parser.add_argument("--debug", action="store_true") return parser def main(args): print("job dir: {}".format(os.path.dirname(os.path.realpath(__file__)))) print("{}".format(args).replace(", ", ",\n")) device = torch.device(args.device) seed = args.seed torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True model_name = None transform_val = None data_loader_val = None model, transform_val = load_model_transform(args.model, args.pretrained_dir, args.image_size) if transform_val is None: transform_val = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) print(transform_val) if args.experiment == "imagenetx" or args.experiment == "pug_imagenet": dataset_val = ImageFolderWithPaths(root=args.data_path, transform=transform_val) else: if "imagenetv2" in args.data_path: dataset_val = ImageNetV2Dataset("matched-frequency", transform=transform_val, location=args.data_path) elif "imagenet-r" in args.data_path: dataset_val = datasets.ImageFolder(os.path.join(args.data_path), transform=transform_val) elif "imagenet" in args.data_path: dataset_val = datasets.ImageFolder(os.path.join(args.data_path), transform=transform_val) if args.experiment != "robustness": data_loader_val = torch.utils.data.DataLoader( dataset_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True, shuffle=False, ) model.to(device) model.eval() model_without_ddp = model n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Model = %s" % str(model_without_ddp)) print(args.model) print("number of params (M): %.2f" % (n_parameters / 1.0e6))
def get_args_parser(): parser = argparse.ArgumentParser("Beyond ImageNet accuracy") parser.add_argument( "--batch_size", default=512, type=int, help="Batch size per GPU (effective batch size is batch_size * # gpus", ) parser.add_argument("--model", type=str, metavar="MODEL", help="name of model") parser.add_argument("--experiment", default="scale", type=str, help="Name of model to train") parser.add_argument("--scale_factor", type=float, help="scale factor") parser.add_argument("--shift_x", type=int, default=0, help="Shift X") parser.add_argument("--shift_y", type=int, default=0, help="Shift Y") parser.add_argument("--data_path", type=str, default="", help="dataset path") parser.add_argument("--pretrained_dir", type=str, default="pretrained", help="pretrained directory") parser.add_argument( "--nb_classes", default=1000, type=int, help="number of the classification types", ) parser.add_argument("--image_size", default=224, type=int, help="image size") parser.add_argument( "--output_dir", default="./outputs", help="path where to save, empty for no saving", ) parser.add_argument("--device", default="cuda", help="device to use for training / testing") parser.add_argument("--seed", default=0, type=int) parser.add_argument("--num_workers", default=10, type=int) parser.add_argument( "--pin_mem", action="store_true", help= "Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.", ) parser.set_defaults(pin_mem=True) parser.add_argument( "--num_runs", default=1, type=int, help="number of how many repeated runs of experiment", ) parser.add_argument("--n_bins", default=15, type=int, help="number of bins in ECE calculation") parser.add_argument("--run_name", type=str, default="") parser.add_argument("--dataset", type=str, default="") parser.add_argument("--debug", action="store_true") return parser def main(args): print("job dir: {}".format(os.path.dirname(os.path.realpath(__file__)))) print("{}".format(args).replace(", ", ",\n")) device = torch.device(args.device) seed = args.seed torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True model_name = None transform_val = None data_loader_val = None model, transform_val = load_model_transform(args.model, args.pretrained_dir, args.image_size) if transform_val is None: transform_val = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) print(transform_val) if args.experiment == "imagenetx" or args.experiment == "pug_imagenet": dataset_val = ImageFolderWithPaths(root=args.data_path, transform=transform_val) else: if "imagenetv2" in args.data_path: dataset_val = ImageNetV2Dataset("matched-frequency", transform=transform_val, location=args.data_path) elif "imagenet-r" in args.data_path: dataset_val = datasets.ImageFolder(os.path.join(args.data_path), transform=transform_val) elif "imagenet" in args.data_path: dataset_val = datasets.ImageFolder(os.path.join(args.data_path), transform=transform_val) if args.experiment != "robustness": data_loader_val = torch.utils.data.DataLoader( dataset_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=True, shuffle=False, ) model.to(device) model.eval() model_without_ddp = model n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Model = %s" % str(model_without_ddp)) print(args.model) print("number of params (M): %.2f" % (n_parameters / 1.0e6))
eff_batch_size = args.batch_size * get_world_size()
5
2023-11-15 22:22:06+00:00
8k
shengliu66/ICV
tasks/base.py
[ { "identifier": "hf_datasets_root", "path": "anchor.py", "snippet": "" }, { "identifier": "TokenizedForStyleRightPad", "path": "tasks/loader.py", "snippet": "class TokenizedForStyleRightPad(Dataset):\n def __init__(self, data, tok: PreTrainedTokenizer, prompt_fn, mode = 'eval', no_padding=False, prefix=''):\n # data: [query: str, choices: list(str)]\n self.tok = tok\n self.prompt_fn = prompt_fn\n self.references = None\n self.max_length = self._find_max_length(data, mode=mode)\n if mode == 'ft':\n self.data = self._build_ft_data(data)\n elif mode == 'eval':\n self.data, self.references = self._build_eval_data(data, no_padding=no_padding, prefix=prefix)\n else:\n raise NotImplementedError\n logger.info(f\"Tokenization finished: {len(self.data)}, max_length={self.max_length}\")\n\n def _find_max_length(self, data, mode=eval):\n max_len = 0\n\n def tok_len(t):\n return len(self.tok.encode(t))\n\n for ex in tqdm(data, desc=\"Data preprocessing(1/2)\"):\n query = ex[\"query\"]\n if mode == 'eval':\n len_query = len(self.prompt_fn(query)[0])\n elif mode == 'ft':\n len_query = len(self.prompt_fn(query)[1])\n else:\n raise NotImplementedError\n max_len = max(max_len, len_query)\n return max_len\n\n def _build_eval_data(self, data, no_padding=False, prefix=''):\n processed = []\n references = []\n for ex in tqdm(data, desc=\"Data preprocessing(2/2)\"):\n query = ex[\"query\"]\n processed_input = self.prompt_fn(query, return_reference = True, Instruction = prefix)\n t_query, t_full, t_reference = processed_input\n processed_input = self.tokenize(t_full, t_query, no_padding=no_padding)\n processed.append(processed_input)\n references.append(t_reference)\n\n logger.info(\"Style dataset: finish!\")\n return processed, references\n\n def _build_ft_data(self, data):\n processed = []\n for ex in tqdm(data, desc=\"Data preprocessing(2/2)\"):\n query = ex[\"query\"]\n processed_input = self.prompt_fn(query)\n t_query, t_full = processed_input\n processed_input = self.tokenize(t_query, t_full)\n processed.append(processed_input)\n\n logger.info(\"Finetuning dataset: finish!\")\n return processed\n\n def tokenize_demonstration(self, demonstration):\n e = self.tok(demonstration)\n return torch.LongTensor(e[\"input_ids\"]), torch.LongTensor(e[\"attention_mask\"]) # no padding\n\n def tokenize_each_demonstration(self, demonstration_list, dataset_name=None):\n tokenized_demonstration_list = []\n for exp_id in range(len(demonstration_list)):\n demonstration_list[exp_id] = (demonstration_list[exp_id][0].strip(\" .\").strip(\".\"), demonstration_list[exp_id][1].strip(\" .\").strip(\".\"))\n\n e_original = self.tok(demonstration_list[exp_id][0]) \n e_rewrite = self.tok(demonstration_list[exp_id][1])\n tokenized_demonstration_list.append((e_original, e_rewrite)) \n return tokenized_demonstration_list\n\n def tokenize(self, only_query, full_text, no_padding = False):\n tok_only_query = self.tok(only_query, add_special_tokens=False)\n tok_full_no_padding = self.tok(full_text, add_special_tokens=False)\n tok_full = self.tok(\n full_text,\n padding=\"max_length\",\n max_length=self.max_length,\n add_special_tokens=False,\n ) # <pad> is not a special token\n\n if no_padding: \n e = {\n \"input_ids\": tok_full_no_padding.input_ids,\n \"attention_mask\": tok_full_no_padding.attention_mask,\n }\n else:\n e = {\n \"input_ids\": tok_full.input_ids,\n \"attention_mask\": tok_full.attention_mask,\n }\n\n return e\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n\n es = self.data[idx]\n\n if self.references:\n return torch.LongTensor(es[\"input_ids\"]), torch.LongTensor(es[\"attention_mask\"]), self.references[idx]\n else:\n return es" }, { "identifier": "RandomContext", "path": "utils/rng_ctx.py", "snippet": "class RandomContext:\n \"\"\"Save and restore state of PyTorch, NumPy, Python RNGs.\"\"\"\n\n def __init__(self, seed=None):\n outside_state = RandomState()\n\n random.seed(seed)\n np.random.seed(seed)\n if seed is None:\n torch.manual_seed(random.randint(-sys.maxsize - 1, sys.maxsize))\n else:\n torch.manual_seed(seed)\n # torch.cuda.manual_seed_all is called by torch.manual_seed\n self.inside_state = RandomState()\n\n outside_state.restore()\n\n self._active = False\n\n def __enter__(self):\n if self._active:\n raise Exception(\"RandomContext can be active only once\")\n\n self.outside_state = RandomState()\n self.inside_state.restore()\n self._active = True\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.inside_state = RandomState()\n self.outside_state.restore()\n self.outside_state = None\n\n self._active = False" }, { "identifier": "EmptyContext", "path": "utils/rng_ctx.py", "snippet": "class EmptyContext:\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass" }, { "identifier": "PCA", "path": "utils/pca.py", "snippet": "class PCA(nn.Module):\n def __init__(self, n_components):\n super().__init__()\n self.n_components = n_components\n\n @torch.no_grad()\n def fit(self, X):\n n, d = X.size()\n if self.n_components is not None:\n d = min(self.n_components, d)\n self.register_buffer(\"mean_\", X.mean(0, keepdim=True))\n Z = X - self.mean_ # center\n U, S, Vh = torch.linalg.svd(Z, full_matrices=False)\n Vt = Vh\n U, Vt = svd_flip(U, Vt)\n self.register_buffer(\"components_\", Vt[:d])\n return self\n\n def forward(self, X):\n return self.transform(X)\n\n def transform(self, X):\n assert hasattr(self, \"components_\"), \"PCA must be fit before use.\"\n return torch.matmul(X - self.mean_, self.components_.t())\n\n def fit_transform(self, X):\n self.fit(X)\n return self.transform(X)\n\n def inverse_transform(self, Y):\n assert hasattr(self, \"components_\"), \"PCA must be fit before use.\"\n return torch.matmul(Y, self.components_) + self.mean_" }, { "identifier": "modified_forward_context_manager", "path": "utils/context_manager.py", "snippet": "def modified_forward_context_manager(model, forward_modifiers=()):\n context_manager = CombinedContextManager([*forward_modifiers])\n return context_manager" }, { "identifier": "traced_forward_context_manager", "path": "utils/context_manager.py", "snippet": "def traced_forward_context_manager(model, with_submodules=False):\n forward_trace = ForwardTrace()\n context_manager = ForwardTracer(model, forward_trace, with_submodules=with_submodules)\n return context_manager, forward_trace" } ]
import json import logging import random import re import torch import numpy as np import datasets from collections import defaultdict from anchor import hf_datasets_root from tasks.loader import TokenizedForStyleRightPad from utils.rng_ctx import RandomContext, EmptyContext from utils.pca import PCA from utils.context_manager import modified_forward_context_manager, traced_forward_context_manager
4,288
dataset_name, subset, split = self.dataset_part(part) data = self.do_download(dataset_name, subset, split=split, cache_dir=str(hf_datasets_root)) if part == "sample": data = data.train_test_split(test_size=0.4)['train'] if part == "result": data = data.train_test_split(test_size=0.4)['test'] data.to_json(f_path) logger.info(f"... success, saved at: {f_path}") @staticmethod def do_download(dataset_name, subset, split, cache_dir): raw_data = datasets.load_dataset(dataset_name, subset, split=split, cache_dir=cache_dir) logger.info("Download success.") return raw_data def mk_result_dataset(self, tokenizer, no_padding=False, prefix=''): return TokenizedForStyleRightPad(self.raw_data_result, tokenizer, self.paralell_style_promptify, no_padding=no_padding, prefix=prefix) def mk_test_dataset(self, tokenzier): return self.mk_result_dataset(tokenzier) def mk_dev_dataset(self, tokenizer): sample_size = len(self.raw_data_result) ans_set = set(e["answer_idx"] for e in self.raw_data_sample) ans_map = defaultdict(list) for idx, e in enumerate(self.raw_data_sample): label = e["answer_idx"] ans_map[label].append(idx) per_label = sample_size // len(ans_set) residual = sample_size - per_label * len(ans_set) selected_ids = [] with self._rng_context: for label, all_ids in ans_map.items(): selected = random.sample(all_ids, per_label) selected_ids.extend(selected) remain_ids = set(range(len(self.raw_data_sample))) - set(selected_ids) residual_selected = random.sample(remain_ids, residual) selected_ids.extend(residual_selected) random.shuffle(selected_ids) self.raw_data_dev = [self.raw_data_sample[i] for i in selected_ids] return TokenizedForStyleRightPad(self.raw_data_dev, tokenizer, self.paralell_style_promptify) def mk_finetune_dataset(self, tokenizer, mode = 'ft'): selected_exemplar = self._cahced_selected_exemplar assert (selected_exemplar != None), "No demonstration is selected yet, run stratified_sampling first! \n" return TokenizedForStyleRightPad(selected_exemplar, tokenizer, self.paralell_style_promptify, mode=mode) def mk_result_dataset_with_demostration(self, tokenizer, exemplar_str, no_padding=False): def add_demostration(query, return_reference = False, Instruction = ''): if return_reference: with_query, with_query_and_paraphrase, references = self.paralell_style_promptify(query, return_reference=return_reference, Instruction=Instruction) with_query = with_query.replace(Instruction,"") with_query_and_paraphrase = with_query_and_paraphrase.replace(Instruction,"") return f"{exemplar_str}{with_query}", f"{exemplar_str}{with_query_and_paraphrase}", references else: with_query, with_query_and_paraphrase = self.paralell_style_promptify(query, return_reference=return_reference, Instruction=Instruction) with_query = with_query.replace(Instruction,"") with_query_and_paraphrase = with_query_and_paraphrase.replace(Instruction,"") return f"{exemplar_str}{with_query}", f"{exemplar_str}{with_query_and_paraphrase}" return TokenizedForStyleRightPad(self.raw_data_result, tokenizer, add_demostration, no_padding=no_padding) @staticmethod def modified_forward(model, inputs, forward_modifiers = ()): context_manager = modified_forward_context_manager(model, forward_modifiers=forward_modifiers) input_ids = torch.tensor(inputs['input_ids']) attention_mask = torch.tensor(inputs['attention_mask']) with context_manager: outputs = model( input_ids=input_ids.unsqueeze(0).cuda(), attention_mask=attention_mask.unsqueeze(0).cuda(), ) return outputs @staticmethod def traced_forward(model, inputs, forward_modifiers = (), with_submodules=False): context_manager, forward_trace = traced_forward_context_manager(model, with_submodules=with_submodules) with context_manager: outputs = BaseProbInference.modified_forward( model, inputs=inputs, forward_modifiers=forward_modifiers, ) return outputs, forward_trace @staticmethod def get_latentstates(model, inputs): h_all = [] for example_id in range(len(inputs)): latents_for_all_styles= [] for style_id in range(len(inputs[example_id])): _, forward_trace = BaseProbInference.traced_forward(model, inputs[example_id][style_id], with_submodules=False) task_latents = forward_trace.residual_stream.hidden[:, :, -4:, :].mean(2,keepdim=False) #[:, :, -1, :] # get last token task_latents = task_latents[:, 1:] # the first one is the embedding layer (num_data, num_layers, hidden_size) latents_for_all_styles.append(task_latents) h_all.append(tuple(latents_for_all_styles)) return h_all @staticmethod def get_icv(model, inputs, rank=1): hidden_states = BaseProbInference.get_latentstates(model, inputs) _, num_layers, hidden_dim = hidden_states[0][0].size() hidden_states_all = [] num_demonstration = len(hidden_states) for demonstration_id in range(num_demonstration): h = hidden_states[demonstration_id][0].flatten() - hidden_states[demonstration_id][1].flatten() hidden_states_all.append(h) fit_data = torch.stack(hidden_states_all)
logger = logging.getLogger("task") class BaseProbInference: def __init__(self, prompt_version): if prompt_version == "default": self.prompt_version = self.default_prompt_version() else: self.prompt_version = prompt_version self.raw_data_sample = None self.raw_data_dev = None self.can_be_stratified = False self.num_base_shot = 1 self._rng_context = EmptyContext() self._cached_prefix = None self._cached_ex_list = None self._cahced_selected_exemplar = None self.shuffled_mapping = None def default_prompt_version(self): raise NotImplementedError def set_seed(self, seed): self._rng_context = RandomContext(seed=seed) def dataset_signature(self): # { # "result": (dataset_name, subset, split), # which produce the final result # "sample": (dataset_name, subset, split), # which we sample ICL few-shot examples # } raise NotImplementedError def dataset_part(self, part): return self.dataset_signature()[part] def dataset_preprocess(self, raw_data): raise NotImplementedError def handcrafted_exemplars(self): raise NotImplementedError def exemplar_seperator(self): raise NotImplementedError def paralell_style_promptify(self, query): raise NotImplementedError def shuffle_exemplars(self): prefix = self._cached_prefix ex_list = self._cached_ex_list ex_list_with_idx = list(enumerate(ex_list)) with self._rng_context: random.shuffle(ex_list_with_idx) indices, ex_list = zip(*ex_list_with_idx) self.shuffled_mapping = indices return self.build_exemplar_from_examples(prefix, ex_list) def random_selected_exemplars(self, num_shots, prefix = ""): with self._rng_context: num_shots = min(len(self.raw_data_sample), num_shots) sampled = random.sample(self.raw_data_sample, num_shots) self._cahced_selected_exemplar = sampled ex_list = [e["query"] for e in sampled] self._cached_prefix = prefix self._cached_ex_list = ex_list return self.build_exemplar_from_examples(prefix, ex_list) def stratified_sampling(self, num_k_shots): num_shots = self.num_base_shot * num_k_shots if not self.can_be_stratified: logger.info("Cannot be stratified, fallback to random selection.") return self.random_selected_exemplars(num_shots) prefix = "" ans_set = set(e["answer_idx"] for e in self.raw_data_sample) ans_map = defaultdict(list) for idx, e in enumerate(self.raw_data_sample): label = e["answer_idx"] ans_map[label].append(idx) per_label = num_shots // len(ans_set) residual = num_shots - per_label * len(ans_set) selected_ids = [] with self._rng_context: for label, all_ids in ans_map.items(): selected = random.sample(all_ids, per_label) selected_ids.extend(selected) remain_ids = set(range(len(self.raw_data_sample))) - set(selected_ids) residual_selected = random.sample(remain_ids, residual) selected_ids.extend(residual_selected) random.shuffle(selected_ids) selected_exemplar = [self.raw_data_sample[i] for i in selected_ids] self._cahced_selected_exemplar = selected_exemplar ex_list = [e["query"] for e in selected_exemplar] self._cached_prefix = prefix self._cached_ex_list = ex_list return self.build_exemplar_from_examples(prefix, ex_list) def build_exemplar_from_examples(self, prefix, ex_list): s = prefix if len(s): s += self.exemplar_seperator() for query in ex_list: _, line = self.paralell_style_promptify(query) # query, <query_with_answer> s += line + self.exemplar_seperator() return s def dataset_file_path(self, part): dataset_name, subset, split = self.dataset_part(part) dumped_folder = hf_datasets_root.joinpath("dumped") if not dumped_folder.exists(): dumped_folder.mkdir(parents=True) if part == "sample": split = 'train' if part == "result": split = 'test' file_name = f"{dataset_name}-{subset}-{split}.jsonl" file_name = re.sub(r"[^\w_. -]", "_", file_name) return dumped_folder.joinpath(file_name) def do_load_part(self, part): f_path = self.dataset_file_path(part) print(f_path) if not f_path.exists(): self.not_exist_download(part) return self.do_load_part(part) # call once more else: with f_path.open("r") as f: raw_data = [json.loads(line) for line in f] data = self.dataset_preprocess(raw_data) logger.info(f"Data loaded: {part}.") return data def do_load(self): self.raw_data_sample = self.do_load_part("sample") self.raw_data_result = self.do_load_part("result") def not_exist_download(self, part): f_path = self.dataset_file_path(part) logger.info(f"{f_path} not exist, download from huggingface datasets hub...") dataset_name, subset, split = self.dataset_part(part) data = self.do_download(dataset_name, subset, split=split, cache_dir=str(hf_datasets_root)) if part == "sample": data = data.train_test_split(test_size=0.4)['train'] if part == "result": data = data.train_test_split(test_size=0.4)['test'] data.to_json(f_path) logger.info(f"... success, saved at: {f_path}") @staticmethod def do_download(dataset_name, subset, split, cache_dir): raw_data = datasets.load_dataset(dataset_name, subset, split=split, cache_dir=cache_dir) logger.info("Download success.") return raw_data def mk_result_dataset(self, tokenizer, no_padding=False, prefix=''): return TokenizedForStyleRightPad(self.raw_data_result, tokenizer, self.paralell_style_promptify, no_padding=no_padding, prefix=prefix) def mk_test_dataset(self, tokenzier): return self.mk_result_dataset(tokenzier) def mk_dev_dataset(self, tokenizer): sample_size = len(self.raw_data_result) ans_set = set(e["answer_idx"] for e in self.raw_data_sample) ans_map = defaultdict(list) for idx, e in enumerate(self.raw_data_sample): label = e["answer_idx"] ans_map[label].append(idx) per_label = sample_size // len(ans_set) residual = sample_size - per_label * len(ans_set) selected_ids = [] with self._rng_context: for label, all_ids in ans_map.items(): selected = random.sample(all_ids, per_label) selected_ids.extend(selected) remain_ids = set(range(len(self.raw_data_sample))) - set(selected_ids) residual_selected = random.sample(remain_ids, residual) selected_ids.extend(residual_selected) random.shuffle(selected_ids) self.raw_data_dev = [self.raw_data_sample[i] for i in selected_ids] return TokenizedForStyleRightPad(self.raw_data_dev, tokenizer, self.paralell_style_promptify) def mk_finetune_dataset(self, tokenizer, mode = 'ft'): selected_exemplar = self._cahced_selected_exemplar assert (selected_exemplar != None), "No demonstration is selected yet, run stratified_sampling first! \n" return TokenizedForStyleRightPad(selected_exemplar, tokenizer, self.paralell_style_promptify, mode=mode) def mk_result_dataset_with_demostration(self, tokenizer, exemplar_str, no_padding=False): def add_demostration(query, return_reference = False, Instruction = ''): if return_reference: with_query, with_query_and_paraphrase, references = self.paralell_style_promptify(query, return_reference=return_reference, Instruction=Instruction) with_query = with_query.replace(Instruction,"") with_query_and_paraphrase = with_query_and_paraphrase.replace(Instruction,"") return f"{exemplar_str}{with_query}", f"{exemplar_str}{with_query_and_paraphrase}", references else: with_query, with_query_and_paraphrase = self.paralell_style_promptify(query, return_reference=return_reference, Instruction=Instruction) with_query = with_query.replace(Instruction,"") with_query_and_paraphrase = with_query_and_paraphrase.replace(Instruction,"") return f"{exemplar_str}{with_query}", f"{exemplar_str}{with_query_and_paraphrase}" return TokenizedForStyleRightPad(self.raw_data_result, tokenizer, add_demostration, no_padding=no_padding) @staticmethod def modified_forward(model, inputs, forward_modifiers = ()): context_manager = modified_forward_context_manager(model, forward_modifiers=forward_modifiers) input_ids = torch.tensor(inputs['input_ids']) attention_mask = torch.tensor(inputs['attention_mask']) with context_manager: outputs = model( input_ids=input_ids.unsqueeze(0).cuda(), attention_mask=attention_mask.unsqueeze(0).cuda(), ) return outputs @staticmethod def traced_forward(model, inputs, forward_modifiers = (), with_submodules=False): context_manager, forward_trace = traced_forward_context_manager(model, with_submodules=with_submodules) with context_manager: outputs = BaseProbInference.modified_forward( model, inputs=inputs, forward_modifiers=forward_modifiers, ) return outputs, forward_trace @staticmethod def get_latentstates(model, inputs): h_all = [] for example_id in range(len(inputs)): latents_for_all_styles= [] for style_id in range(len(inputs[example_id])): _, forward_trace = BaseProbInference.traced_forward(model, inputs[example_id][style_id], with_submodules=False) task_latents = forward_trace.residual_stream.hidden[:, :, -4:, :].mean(2,keepdim=False) #[:, :, -1, :] # get last token task_latents = task_latents[:, 1:] # the first one is the embedding layer (num_data, num_layers, hidden_size) latents_for_all_styles.append(task_latents) h_all.append(tuple(latents_for_all_styles)) return h_all @staticmethod def get_icv(model, inputs, rank=1): hidden_states = BaseProbInference.get_latentstates(model, inputs) _, num_layers, hidden_dim = hidden_states[0][0].size() hidden_states_all = [] num_demonstration = len(hidden_states) for demonstration_id in range(num_demonstration): h = hidden_states[demonstration_id][0].flatten() - hidden_states[demonstration_id][1].flatten() hidden_states_all.append(h) fit_data = torch.stack(hidden_states_all)
pca = PCA(n_components=rank).to(fit_data.device).fit(fit_data.float())
4
2023-11-11 18:20:45+00:00
8k
Fraunhofer-SCAI/corr_shap
corr_shap/sampling/sampling_factory.py
[ { "identifier": "SamplingStrategy", "path": "corr_shap/sampling/SamplingStrategy.py", "snippet": "class SamplingStrategy:\n def __init__(self, explainer, **kwargs):\n \"\"\" Construct all necessary attributes for the SamplingStrategy object.\"\"\"\n self.data = explainer.data.data\n self.data_weights = explainer.data.weights\n self.data_weight_sum = np.sum(self.data_weights)\n self.N = explainer.N # num samples in self.data\n\n def sample(self, m):\n \"\"\"\n Return prepared sample data.\n These data have fixed features for those contained in subset (m=1) and normalized weights.\n\n :param m: given mask of subset\n :return: samples with fixed masked features and normalized weights\n \"\"\"\n x = self.x\n samples = self.data.copy()\n samples = self.set_masked_features_to_instance(m, x, samples)\n weights = self.normalize(self.data_weights)\n return samples, weights\n\n def normalize(self, weights):\n \"\"\" Normalize weights by their sum\"\"\"\n if self.data_weight_sum != 0:\n weights = weights/self.data_weight_sum\n return weights\n\n def set_masked_features_to_instance(self, m, x, samples):\n \"\"\"\n Set masked features for subset to given instance.\n\n :param m: given mask of subset\n :param x: given instance to be explained\n :param samples: background data that are the basis for the sample\n :return: samples with fixed masked features\n \"\"\"\n if isinstance(self.varyingFeatureGroups, (list,)):\n for j in range(self.varyingFeatureGroups.shape[0]):\n for k in self.varyingFeatureGroups[j]:\n if m[j] == 1.0:\n samples[:, k] = x[0, k]\n else:\n # for non-jagged numpy array we can significantly boost performance\n mask = m == 1.0\n groups = self.varyingFeatureGroups[mask]\n if len(groups.shape) == 2:\n for group in groups:\n samples[:, group] = x[0, group]\n else:\n # further performance optimization in case each group has a single feature\n evaluation_data = x[0, groups]\n samples[:, groups] = evaluation_data\n return samples\n\n def set_instance(self, instance):\n \"\"\" Set instance to x. \"\"\"\n self.x = instance.x.copy()\n\n\n def set_varying_feature_groups(self, varying_groups):\n \"\"\" Set indicies of varying feature groups.\"\"\"\n self.varyingFeatureGroups = varying_groups" }, { "identifier": "GaussStrategy", "path": "corr_shap/sampling/GaussStrategy.py", "snippet": "class GaussStrategy(SamplingStrategy):\n \"\"\"Uses the idea that the data to be explained are multivariate normally distributed\n to modify the kernel SHAP method to explain the output of a function.\n\n If the data are multivariate normally distributed, we can calculate\n the conditional expected value and the conditional covariance while fixing the given subset.\n Sampling can be done using these new parameters.\n \"\"\"\n\n def __init__(self, explainer):\n \"\"\" Construct all necessary attributes for the GaussStrategy object, especially mean and covariance.\"\"\"\n super().__init__(explainer)\n self.mean_gen, self.cov_gen = self.calc_mean_cov(self.data, self.data_weights)\n\n def sample(self, m, x=None, mean=None, cov=None):\n \"\"\"\n Return prepared sample data.\n Samples from normal distribution with computed conditional mean (_varying) und covariance (_varying)\n\n :param m: given mask of subset\n :param x: given instance to be explained\n :param mean: mean of given data\n :param cov: covariance of given data\n :return: samples with fixed masked features and normalized weights\n \"\"\"\n\n if x is None:\n x = self.x\n if mean is None:\n mean = self.mean_varying\n if cov is None:\n cov = self.cov_varying\n\n mean_cond, cov_cond = self.calc_conditional_mean_cov(x, m, mean, cov)\n samples = self.data.copy()\n samples = self.set_masked_features_to_instance(m, x, samples)\n gauss_sample_len_mean = np.random.multivariate_normal(mean_cond, cov_cond, self.N)\n # if features are not in subset fill them with random gauss samples of same mean_varying and covariance\n mask = m == 1.0\n help_mask = np.zeros_like(self.group_mask)\n help_mask[self.group_mask == True] = ~mask\n samples[:, help_mask] = gauss_sample_len_mean\n\n weights = np.ones(self.N) / self.N\n return samples, weights\n\n def set_varying_feature_groups(self, varying_groups):\n \"\"\"\n Set mask only to features which vary in the dataset.\n\n :param varying_groups: contain indices of featues which vary in dataset\n :return: set mask to true if feature varies, determines mean and covariance only from varying features\n \"\"\"\n super().set_varying_feature_groups(varying_groups)\n group_mask = np.zeros(self.data.data.shape[1], dtype=bool)\n for ind in varying_groups:\n group_mask[ind] = True\n self.group_mask = group_mask\n self.mean_varying, self.cov_varying = self.mask_mean_cov(self.mean_gen, self.cov_gen)\n\n def calc_mean_cov(self, data, weights=None):\n \"\"\"\n Return mean and covariance of given data.\n\n :param data: data for which mean and covariance is to be calculated\n :param weights: weights assigned to data\n :return: mean and covariance\n \"\"\"\n mean_trainingset = np.average(data, axis=0, weights=weights)\n cov_trainingset = np.cov(np.transpose(data))\n\n return mean_trainingset, cov_trainingset\n\n def calc_conditional_mean_cov(self, x, m, mean, cov):\n \"\"\"\n Return mean und covariance of non-subset features conditional on (with instance x) fixed features of subset.\n\n :param x: given instance to be explained\n :param m: given mask of subset\n :param mean: mean of given data\n :param cov: covariance of given data\n :return: conditional mean and covariance of non-subset features\n \"\"\"\n #\n warnings.filterwarnings(\"ignore\")\n\n # split mean_varying in S and S_bar\n mean_sub = mean[m == 1]\n mean_subcom = mean[m == 0]\n\n # split covariance in SS, S_barS, SS_bar and S_barS_bar\n cov_sub_sub = cov[m == 1][:, m == 1]\n cov_sub_subcom = cov[m == 1][:, m == 0]\n cov_subcom_sub = np.transpose(cov_sub_subcom)\n cov_subcom_subcom = cov[m == 0][:, m == 0]\n\n x_sub = np.transpose(x)\n x_sub = x_sub[self.group_mask]\n x_sub = x_sub[m == 1]\n x_sub = x_sub.reshape(-1)\n # use pseudo-inverse for inverse covariance for simplicity\n cov_sub_sub_inv = np.linalg.pinv(cov_sub_sub)\n\n # compute conditional mean_varying and conditional covariance\n mean_cond = np.add(mean_subcom, np.dot(cov_subcom_sub, np.dot(cov_sub_sub_inv, x_sub - mean_sub)))\n cov_cond = cov_subcom_subcom - np.dot(cov_subcom_sub, np.dot(cov_sub_sub_inv, cov_sub_subcom))\n\n return mean_cond, cov_cond\n\n def mask_mean_cov(self, mean, cov):\n \"\"\"\n Exclude non-varying indices from mean and covariance because they are not important for further computations.\n\n :param mean: mean of given data\n :param cov: covariance of given data\n :return: mean and covariance adjusted for non-varying features\n \"\"\"\n mean_varying = mean[self.group_mask] # TODO groups can be in different order from features => make changes everywhere self.group_mask is used\n cov_varying = cov[self.group_mask][:, self.group_mask]\n return mean_varying, cov_varying" }, { "identifier": "CopulaStrategy", "path": "corr_shap/sampling/CopulaStrategy.py", "snippet": "class CopulaStrategy(GaussStrategy):\n \"\"\" Uses a Gaussian copula to modify the kernel SHAP method to explain the output of any function.\n\n Using the Gaussian copula, we represent the data by their marginal distribution\n and transform them so that they are multivariate normally distributed.\n Now we can apply the first modification.\n After that, the data must be transformed back accordingly.\n \"\"\"\n def __init__(self, explainer, **kwargs):\n \"\"\"\n Construct all necessary attributes for the CopulaStrategy object.\n A sorted version of data is determined.\n \"\"\"\n\n super().__init__(explainer)\n\n # sort each column of the data separately to use in transform_data_back\n self.data_sorted = np.zeros((self.data.shape[0], self.data.shape[1]))\n for col in range(self.data.shape[1]):\n sample = list(self.data[:, col])\n self.data_sorted[:, col] = sorted(sample)\n\n def sample(self, m, x=None, mean=None, cov=None):\n \"\"\"\n Return prepared sample data.\n Samples from data that are previously transformed to normal distribution.\n Afterwards back transformation of data and samples.\n\n :param m: given mask of subset\n :param x: given instance to be explained\n :param mean: mean of given data\n :param cov: covariance of given data\n :return: samples with fixed masked features and normalized weights\n \"\"\"\n\n samples, weights = super().sample(m, x, mean, cov)\n samples = self.transform_data_back(samples)\n return samples, weights\n\n def calc_mean_cov(self, data, weights):\n \"\"\"\n Return mean and covariance of given data.\n\n :param data: data for which mean and covariance is to be calculated\n :param weights: weights assigned to data\n :return: mean and covariance\n \"\"\"\n data_transformed = self.transform_data(data)\n return super().calc_mean_cov(data=data_transformed, weights=weights)\n\n def transform_data_back(self, data):\n \"\"\"\n Transform normal distributed data used in copula method back into original distribution\n based on earlier margin distribution of features.\n \"\"\"\n data = data.copy()\n unif_back = norm.cdf(data)\n unif_back[unif_back == 1] = 0.9999999999999999\n sort_index = (self.data_sorted.shape[0] * unif_back).astype(int)\n for col in range(self.data_sorted.shape[1]):\n data[:, col] = self.data_sorted[sort_index[:,col], col]\n return data\n\n def transform_data(self, data):\n \"\"\"\n Transform given data into normal distributed data for copula method based on margin distribution of features.\n \"\"\"\n # transform data and instance to uniform distributed data based on empirical distribution\n unif = np.zeros(data.shape)\n for col in range(data.shape[1]):\n self.ecdf = ECDF(data[:, col])\n unif[:, col] = self.ecdf(data[:, col])\n # to avoid errors: Set 1 to 0.999... and 0 to 0.0...01\n unif = np.where(unif != 1, unif, 0.9999999999999999)\n unif = np.where(unif != 0, unif, 0.0000000000000001)\n # transform uniform distributed data and instance to normal distributed data\n return norm.ppf(unif)\n\n def set_instance(self, instance):\n \"\"\" Set instance x to transformed version.\"\"\"\n super().set_instance(instance)\n self.x = self.transform_instance(self.x)\n\n def transform_instance(self, x):\n \"\"\" Determine transformed version of instance x based on marginal distribution. \"\"\"\n x_unif = np.zeros_like(x)\n for col in range(self.data.shape[1]):\n x_unif[:, col] = self.ecdf(x[:, col])\n x_unif = np.where(x_unif != 1, x_unif, 0.9999999999999999)\n x_unif = np.where(x_unif != 0, x_unif, 0.0000000000000001)\n return norm.ppf(x_unif)" }, { "identifier": "EmpiricalStrategy", "path": "corr_shap/sampling/EmpiricalStrategy.py", "snippet": "class EmpiricalStrategy(GaussStrategy):\n \"\"\" Uses the idea of kernel density estimation to modify the kernel SHAP method\n to explain the output of a function.\n\n A method based on the idea that data points important for explanation must be closer\n to the data point being explained. The distance is used to determine weights\n that indicate the importance for the explanation.\n \"\"\"\n\n def __init__(self, explainer, sigma=0.1, eta=0.9, **kwargs):\n \"\"\"\n Construct all necessary attributes for the EmpiricalConditionalStrategy object, especially a smoothing parameter\n used in distance computations (sigma) and a limit for the number of used rows for explanation (eta).\n \"\"\"\n super().__init__(explainer)\n self.sigma = sigma\n self.eta = eta\n\n def sample(self, m):\n \"\"\"\n Return prepared sample data.\n Determine most important samples for explanation of instance.\n Idea: The closer data and instance to be explained are the more important they are for explanation.\n Determine distance of data and instance to be explained, determine weights based on distance\n and choose most weighted data as sample.\n\n :param m: given mask of subset\n :return: samples with fixed masked features and normalized weights\n \"\"\"\n\n samples = self.data.copy()\n samples = self.set_masked_features_to_instance(m, self.x, samples)\n\n dist_weights = self.calculate_dist(self.x, m)\n if dist_weights is None:\n # all of the samples are too far away so that all dist_weights would be 0\n # => use equal distweights for all datapoints\n dist_weights = np.ones(self.data.shape[0])\n\n data_weights = self.data_weights.copy()\n data_weights[dist_weights == 0] = 0\n data_weights = self.normalize(data_weights)\n weights = dist_weights * data_weights\n\n return samples, weights\n\n def normalize(self, weights):\n \"\"\" Return normalized data weights \"\"\"\n sum = np.sum(weights)\n if sum != 0:\n weights = weights/sum\n return weights\n\n def calculate_dist(self, x, m):\n \"\"\"\n Computes distance and weights for empirical distribution method.\n Based on a modified version of mahalanobis distance, weights are calculated for each row of data.\n All data rows that are important enough until a limit (eta) is reached are used as sample data.\n Weights of rows over this limit are set to 0.\n\n :param x: given instance to be explained\n :param m: given mask of subset\n :return: weights assigned to sample data\n \"\"\"\n # preparing data for calculating distance\n subset_size = np.sum(m)\n cov_S = self.cov_varying[m == 1][:, m == 1]\n cov_S_inv = np.linalg.pinv(cov_S)\n x_S = x[0, self.group_mask == 1]\n x_S = x_S[m == 1]\n dataset_S = self.data[:, self.group_mask == 1][:, m == 1]\n\n x_diff_S = x_S - dataset_S\n d_S2_matrix = x_diff_S[:, :, None] * cov_S_inv[None, :, :] * x_diff_S[:, None, :] # d_S2_matrix[i,j,k] = x_diff_S[i, j] * cov_S_inv[j, k] * x_diff_S[i, k]\n d_S2 = np.sum(np.sum(d_S2_matrix, axis=-1), axis=-1) # d_S2[i] = sum_j sum_k d_S2_matrix[i,j,k]\n d_S2 = np.abs(d_S2/subset_size) # distance D_S ^2\n w_S = np.exp(-d_S2 / (2 * self.sigma * self.sigma)) # weights\n w_sum = np.sum(w_S)\n if w_sum == 0:\n return None\n w_S = w_S / w_sum # normalize weights\n sorted_ind = np.argsort(w_S)[::-1]\n w_cumsum = np.cumsum(w_S[sorted_ind])\n K = np.searchsorted(w_cumsum, self.eta, side=\"right\")\n w_S[sorted_ind[K+1:]] = 0\n w_S = w_S / np.sum(w_S) * np.sum(w_S != 0) # normalize again\n return w_S" }, { "identifier": "GaussEmpiricalStrategy", "path": "corr_shap/sampling/GaussEmpiricalStrategy.py", "snippet": "class GaussEmpiricalStrategy(EmpiricalStrategy, GaussStrategy):\n \"\"\" Uses a combination of the Gauss method and the Emp-Cond method\n to modify the kernel SHAP method to explain the output of a function.\n\n Experiments showed that for small subset sizes the Emp-Cond method performs better\n and for larger subsets the Gauss or Copula method performs better.\n \"\"\"\n\n def __init__(self, explainer, sigma=0.1, eta=0.9, dim=3, **kwargs):\n \"\"\"\n Construct all necessary attributes for the CombiGaussStrategy object,\n especially the number of dimension that is used to determine which sampling method to use.\n \"\"\"\n EmpiricalStrategy.__init__(self, explainer, sigma=sigma, eta=eta)\n # dimension that decides which method is used\n self.dim = dim\n\n def sample(self, m):\n \"\"\"\n Determine correct sample method.\n If subset size (given by m) is smaller than a fixed dim sample with empirical conditional sample\n otherwise with gauss.\n\n :param m: given mask of subset\n :return: right sample strategy\n \"\"\"\n if np.sum(m) <= self.dim:\n return EmpiricalStrategy.sample(self, m)\n else:\n return GaussStrategy.sample(self, m)" }, { "identifier": "CopulaEmpiricalStrategy", "path": "corr_shap/sampling/CopulaEmpiricalStrategy.py", "snippet": "class CopulaEmpiricalStrategy(EmpiricalStrategy, CopulaStrategy):\n \"\"\" Uses a combination of the Copula method and the Emp-Cond method\n to modify the kernel SHAP method to explain the output of a function.\n\n Experiments showed that for small subset sizes the Emp-Cond method performs better\n and for larger subsets the Gauss or Copula method performs better.\n \"\"\"\n\n def __init__(self, explainer, sigma=0.1, eta=0.9, dim=3, **kwargs):\n \"\"\"\n Construct all necessary attributes for the CombiGaussStrategy object,\n especially the number of dimension that is used to determine which sampling method to use\n and the transformed version of mean and covariance.\n \"\"\"\n EmpiricalStrategy.__init__(self, explainer, sigma=sigma, eta=eta)\n self.mean_transformed, self.cov_transformed = CopulaStrategy.calc_mean_cov(self, data=self.data, weights=self.data_weights)\n # dimension that decides which method is used\n self.dim = dim\n \n def calc_mean_cov(self, data, weights):\n \"\"\"\n Return mean and covariance of given data.\n\n :param data: data for which mean and covariance is to be calculated\n :param weights: weights assigned to data\n :return: mean and covariance\n \"\"\"\n return EmpiricalStrategy.calc_mean_cov(self, data, weights)\n\n def sample(self, m):\n \"\"\"\n Determine correct sample method.\n If subset size (given by m) is smaller than an fixed dim sample with empirical conditional sample\n otherwise with copula.\n\n :param m: given mask of subset\n :return: right sample strategy\n \"\"\"\n if np.sum(m) <= self.dim:\n return EmpiricalStrategy.sample(self, m)\n else:\n return CopulaStrategy.sample(self, m, x=self.x_transformed, mean=self.mean_transformed_varying, cov=self.cov_transformed_varying)\n\n def set_instance(self, instance):\n \"\"\" Set instance, transformed x and the transformed mean and covariance version.\"\"\"\n EmpiricalStrategy.set_instance(self, instance)\n self.x_transformed = CopulaStrategy.transform_instance(self, self.x)\n self.mean_transformed_varying, self.cov_transformed_varying = self.mask_mean_cov(self.mean_transformed, self.cov_transformed)" } ]
from .SamplingStrategy import SamplingStrategy from .GaussStrategy import GaussStrategy from .CopulaStrategy import CopulaStrategy from .EmpiricalStrategy import EmpiricalStrategy from .GaussEmpiricalStrategy import GaussEmpiricalStrategy from .CopulaEmpiricalStrategy import CopulaEmpiricalStrategy
5,026
def get_sampling_strategy(type, explainer, kwargs): """Assign the sampling strategy method to the explainer based on the given type. """ sampling_strategies = {"default": SamplingStrategy, "gauss": GaussStrategy, "copula": CopulaStrategy, "empirical": EmpiricalStrategy, "gauss+empirical": GaussEmpiricalStrategy,
def get_sampling_strategy(type, explainer, kwargs): """Assign the sampling strategy method to the explainer based on the given type. """ sampling_strategies = {"default": SamplingStrategy, "gauss": GaussStrategy, "copula": CopulaStrategy, "empirical": EmpiricalStrategy, "gauss+empirical": GaussEmpiricalStrategy,
"copula+empirical": CopulaEmpiricalStrategy}
5
2023-11-14 08:56:18+00:00
8k
RapidAI/TableStructureRec
wired_table_rec/table_line_rec.py
[ { "identifier": "OrtInferSession", "path": "wired_table_rec/utils.py", "snippet": "class OrtInferSession:\n def __init__(self, model_path: Union[str, Path], num_threads: int = -1):\n self.verify_exist(model_path)\n\n self.num_threads = num_threads\n self._init_sess_opt()\n\n cpu_ep = \"CPUExecutionProvider\"\n cpu_provider_options = {\n \"arena_extend_strategy\": \"kSameAsRequested\",\n }\n EP_list = [(cpu_ep, cpu_provider_options)]\n try:\n self.session = InferenceSession(\n str(model_path), sess_options=self.sess_opt, providers=EP_list\n )\n except TypeError:\n # 这里兼容ort 1.5.2\n self.session = InferenceSession(str(model_path), sess_options=self.sess_opt)\n\n def _init_sess_opt(self):\n self.sess_opt = SessionOptions()\n self.sess_opt.log_severity_level = 4\n self.sess_opt.enable_cpu_mem_arena = False\n\n if self.num_threads != -1:\n self.sess_opt.intra_op_num_threads = self.num_threads\n\n self.sess_opt.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL\n\n def __call__(self, input_content: List[np.ndarray]) -> np.ndarray:\n input_dict = dict(zip(self.get_input_names(), input_content))\n try:\n return self.session.run(None, input_dict)\n except Exception as e:\n error_info = traceback.format_exc()\n raise ONNXRuntimeError(error_info) from e\n\n def get_input_names(\n self,\n ):\n return [v.name for v in self.session.get_inputs()]\n\n def get_output_name(self, output_idx=0):\n return self.session.get_outputs()[output_idx].name\n\n def get_metadata(self):\n meta_dict = self.session.get_modelmeta().custom_metadata_map\n return meta_dict\n\n @staticmethod\n def verify_exist(model_path: Union[Path, str]):\n if not isinstance(model_path, Path):\n model_path = Path(model_path)\n\n if not model_path.exists():\n raise FileNotFoundError(f\"{model_path} does not exist!\")\n\n if not model_path.is_file():\n raise FileExistsError(f\"{model_path} must be a file\")" }, { "identifier": "bbox_decode", "path": "wired_table_rec/utils_table_line_rec.py", "snippet": "def bbox_decode(heat, wh, reg=None, K=100):\n \"\"\"bbox组成:[V1, V2, V3, V4]\n V1~V4: bbox的4个坐标点\n \"\"\"\n batch = heat.shape[0]\n heat, keep = _nms(heat)\n scores, inds, clses, ys, xs = _topk(heat, K=K)\n if reg is not None:\n reg = _tranpose_and_gather_feat(reg, inds)\n reg = reg.reshape(batch, K, 2)\n xs = xs.reshape(batch, K, 1) + reg[:, :, 0:1]\n ys = ys.reshape(batch, K, 1) + reg[:, :, 1:2]\n else:\n xs = xs.reshape(batch, K, 1) + 0.5\n ys = ys.reshape(batch, K, 1) + 0.5\n\n wh = _tranpose_and_gather_feat(wh, inds)\n wh = wh.reshape(batch, K, 8)\n clses = clses.reshape(batch, K, 1).astype(np.float32)\n scores = scores.reshape(batch, K, 1)\n\n bboxes = np.concatenate(\n [\n xs - wh[..., 0:1],\n ys - wh[..., 1:2],\n xs - wh[..., 2:3],\n ys - wh[..., 3:4],\n xs - wh[..., 4:5],\n ys - wh[..., 5:6],\n xs - wh[..., 6:7],\n ys - wh[..., 7:8],\n ],\n axis=2,\n )\n detections = np.concatenate([bboxes, scores, clses], axis=2)\n return detections, inds" }, { "identifier": "bbox_post_process", "path": "wired_table_rec/utils_table_line_rec.py", "snippet": "def bbox_post_process(bbox, c, s, h, w):\n for i in range(bbox.shape[0]):\n bbox[i, :, 0:2] = transform_preds(bbox[i, :, 0:2], c[i], s[i], (w, h))\n bbox[i, :, 2:4] = transform_preds(bbox[i, :, 2:4], c[i], s[i], (w, h))\n bbox[i, :, 4:6] = transform_preds(bbox[i, :, 4:6], c[i], s[i], (w, h))\n bbox[i, :, 6:8] = transform_preds(bbox[i, :, 6:8], c[i], s[i], (w, h))\n return bbox" }, { "identifier": "gbox_decode", "path": "wired_table_rec/utils_table_line_rec.py", "snippet": "def gbox_decode(mk, st_reg, reg=None, K=400):\n \"\"\"gbox的组成:[V1, P1, P2, P3, P4]\n P1~P4: 四个框的中心点\n V1: 四个框的交点\n \"\"\"\n batch = mk.shape[0]\n mk, keep = _nms(mk)\n scores, inds, clses, ys, xs = _topk(mk, K=K)\n if reg is not None:\n reg = _tranpose_and_gather_feat(reg, inds)\n reg = reg.reshape(batch, K, 2)\n xs = xs.reshape(batch, K, 1) + reg[:, :, 0:1]\n ys = ys.reshape(batch, K, 1) + reg[:, :, 1:2]\n else:\n xs = xs.reshape(batch, K, 1) + 0.5\n ys = ys.reshape(batch, K, 1) + 0.5\n\n scores = scores.reshape(batch, K, 1)\n clses = clses.reshape(batch, K, 1).astype(np.float32)\n st_Reg = _tranpose_and_gather_feat(st_reg, inds)\n\n bboxes = np.concatenate(\n [\n xs - st_Reg[..., 0:1],\n ys - st_Reg[..., 1:2],\n xs - st_Reg[..., 2:3],\n ys - st_Reg[..., 3:4],\n xs - st_Reg[..., 4:5],\n ys - st_Reg[..., 5:6],\n xs - st_Reg[..., 6:7],\n ys - st_Reg[..., 7:8],\n ],\n axis=2,\n )\n return np.concatenate([xs, ys, bboxes, scores, clses], axis=2), keep" }, { "identifier": "gbox_post_process", "path": "wired_table_rec/utils_table_line_rec.py", "snippet": "def gbox_post_process(gbox, c, s, h, w):\n for i in range(gbox.shape[0]):\n gbox[i, :, 0:2] = transform_preds(gbox[i, :, 0:2], c[i], s[i], (w, h))\n gbox[i, :, 2:4] = transform_preds(gbox[i, :, 2:4], c[i], s[i], (w, h))\n gbox[i, :, 4:6] = transform_preds(gbox[i, :, 4:6], c[i], s[i], (w, h))\n gbox[i, :, 6:8] = transform_preds(gbox[i, :, 6:8], c[i], s[i], (w, h))\n gbox[i, :, 8:10] = transform_preds(gbox[i, :, 8:10], c[i], s[i], (w, h))\n return gbox" }, { "identifier": "get_affine_transform", "path": "wired_table_rec/utils_table_line_rec.py", "snippet": "def get_affine_transform(\n center, scale, rot, output_size, shift=np.array([0, 0], dtype=np.float32), inv=0\n):\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\n scale = np.array([scale, scale], dtype=np.float32)\n\n scale_tmp = scale\n src_w = scale_tmp[0]\n dst_w = output_size[0]\n dst_h = output_size[1]\n\n rot_rad = np.pi * rot / 180\n src_dir = get_dir([0, src_w * -0.5], rot_rad)\n dst_dir = np.array([0, dst_w * -0.5], np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n dst = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = center + scale_tmp * shift\n src[1, :] = center + src_dir + scale_tmp * shift\n dst[0, :] = [dst_w * 0.5, dst_h * 0.5]\n dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir\n\n src[2:, :] = get_3rd_point(src[0, :], src[1, :])\n dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])\n\n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n return trans" }, { "identifier": "group_bbox_by_gbox", "path": "wired_table_rec/utils_table_line_rec.py", "snippet": "def group_bbox_by_gbox(\n bboxes, gboxes, score_thred=0.3, v2c_dist_thred=2, c2v_dist_thred=0.5\n):\n def point_in_box(box, point):\n x1, y1, x2, y2 = box[0], box[1], box[2], box[3]\n x3, y3, x4, y4 = box[4], box[5], box[6], box[7]\n ctx, cty = point[0], point[1]\n a = (x2 - x1) * (cty - y1) - (y2 - y1) * (ctx - x1)\n b = (x3 - x2) * (cty - y2) - (y3 - y2) * (ctx - x2)\n c = (x4 - x3) * (cty - y3) - (y4 - y3) * (ctx - x3)\n d = (x1 - x4) * (cty - y4) - (y1 - y4) * (ctx - x4)\n if all(x > 0 for x in (a, b, c, d)) or all(x < 0 for x in (a, b, c, d)):\n return True\n return False\n\n def get_distance(pt1, pt2):\n return math.sqrt(\n (pt1[0] - pt2[0]) * (pt1[0] - pt2[0])\n + (pt1[1] - pt2[1]) * (pt1[1] - pt2[1])\n )\n\n dets = copy.deepcopy(bboxes)\n sign = np.zeros((len(dets), 4))\n\n for gbox in gboxes:\n if gbox[10] < score_thred:\n break\n\n vertex = [gbox[0], gbox[1]]\n for i in range(4):\n center = [gbox[2 * i + 2], gbox[2 * i + 3]]\n if get_distance(vertex, center) < v2c_dist_thred:\n continue\n\n for k, bbox in enumerate(dets):\n if bbox[8] < score_thred:\n break\n\n if sum(sign[k]) == 4:\n continue\n\n w = (abs(bbox[6] - bbox[0]) + abs(bbox[4] - bbox[2])) / 2\n h = (abs(bbox[3] - bbox[1]) + abs(bbox[5] - bbox[7])) / 2\n m = max(w, h)\n if point_in_box(bbox, center):\n min_dist, min_id = 1e4, -1\n for j in range(4):\n dist = get_distance(vertex, [bbox[2 * j], bbox[2 * j + 1]])\n if dist < min_dist:\n min_dist = dist\n min_id = j\n\n if (\n min_id > -1\n and min_dist < c2v_dist_thred * m\n and sign[k][min_id] == 0\n ):\n bboxes[k][2 * min_id] = vertex[0]\n bboxes[k][2 * min_id + 1] = vertex[1]\n sign[k][min_id] = 1\n return bboxes" }, { "identifier": "nms", "path": "wired_table_rec/utils_table_line_rec.py", "snippet": "def nms(dets, thresh):\n if len(dets) < 2:\n return dets\n\n index_keep, keep = [], []\n for i in range(len(dets)):\n box = dets[i]\n if box[-1] < thresh:\n break\n\n max_score_index = -1\n ctx = (dets[i][0] + dets[i][2] + dets[i][4] + dets[i][6]) / 4\n cty = (dets[i][1] + dets[i][3] + dets[i][5] + dets[i][7]) / 4\n\n for j in range(len(dets)):\n if i == j or dets[j][-1] < thresh:\n break\n\n x1, y1 = dets[j][0], dets[j][1]\n x2, y2 = dets[j][2], dets[j][3]\n x3, y3 = dets[j][4], dets[j][5]\n x4, y4 = dets[j][6], dets[j][7]\n a = (x2 - x1) * (cty - y1) - (y2 - y1) * (ctx - x1)\n b = (x3 - x2) * (cty - y2) - (y3 - y2) * (ctx - x2)\n c = (x4 - x3) * (cty - y3) - (y4 - y3) * (ctx - x3)\n d = (x1 - x4) * (cty - y4) - (y1 - y4) * (ctx - x4)\n if all(x > 0 for x in (a, b, c, d)) or all(x < 0 for x in (a, b, c, d)):\n if dets[i][8] > dets[j][8] and max_score_index < 0:\n max_score_index = i\n elif dets[i][8] < dets[j][8]:\n max_score_index = -2\n break\n\n if max_score_index > -1:\n index_keep.append(max_score_index)\n elif max_score_index == -1:\n index_keep.append(i)\n\n keep = [dets[index_keep[i]] for i in range(len(index_keep))]\n return np.array(keep)" }, { "identifier": "merge_adjacent_polys", "path": "wired_table_rec/utils_table_recover.py", "snippet": "def merge_adjacent_polys(polygons: np.ndarray) -> np.ndarray:\n \"\"\"合并相邻iou大于阈值的框\"\"\"\n combine_iou_thresh = 0.1\n pair_polygons = list(zip(polygons, polygons[1:, ...]))\n pair_ious = np.array([compute_poly_iou(p1, p2) for p1, p2 in pair_polygons])\n idxs = np.argwhere(pair_ious >= combine_iou_thresh)\n\n if idxs.size <= 0:\n return polygons\n\n polygons = combine_two_poly(polygons, idxs)\n\n # 注意:递归调用\n polygons = merge_adjacent_polys(polygons)\n return polygons" }, { "identifier": "sorted_boxes", "path": "wired_table_rec/utils_table_recover.py", "snippet": "def sorted_boxes(dt_boxes: np.ndarray) -> np.ndarray:\n \"\"\"\n Sort text boxes in order from top to bottom, left to right\n args:\n dt_boxes(array):detected text boxes with shape (N, 4, 2)\n return:\n sorted boxes(array) with shape (N, 4, 2)\n \"\"\"\n num_boxes = dt_boxes.shape[0]\n dt_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))\n _boxes = list(dt_boxes)\n\n # 解决相邻框,后边比前面y轴小,则会被排到前面去的问题\n for i in range(num_boxes - 1):\n for j in range(i, -1, -1):\n if (\n abs(_boxes[j + 1][0][1] - _boxes[j][0][1]) < 10\n and _boxes[j + 1][0][0] < _boxes[j][0][0]\n ):\n _boxes[j], _boxes[j + 1] = _boxes[j + 1], _boxes[j]\n else:\n break\n return np.array(_boxes)" } ]
from typing import Any, Dict, Optional from .utils import OrtInferSession from .utils_table_line_rec import ( bbox_decode, bbox_post_process, gbox_decode, gbox_post_process, get_affine_transform, group_bbox_by_gbox, nms, ) from .utils_table_recover import merge_adjacent_polys, sorted_boxes import cv2 import numpy as np
4,480
# -*- encoding: utf-8 -*- # @Author: SWHL # @Contact: [email protected] class TableLineRecognition: def __init__(self, model_path: str = None): self.K = 1000 self.MK = 4000 self.mean = np.array([0.408, 0.447, 0.470], dtype=np.float32).reshape(1, 1, 3) self.std = np.array([0.289, 0.274, 0.278], dtype=np.float32).reshape(1, 1, 3) self.inp_height = 1024 self.inp_width = 1024 self.session = OrtInferSession(model_path) def __call__(self, img: np.ndarray) -> Optional[np.ndarray]: img_info = self.preprocess(img) pred = self.infer(img_info) polygons = self.postprocess(pred) if polygons.size == 0: return None polygons = polygons.reshape(polygons.shape[0], 4, 2)
# -*- encoding: utf-8 -*- # @Author: SWHL # @Contact: [email protected] class TableLineRecognition: def __init__(self, model_path: str = None): self.K = 1000 self.MK = 4000 self.mean = np.array([0.408, 0.447, 0.470], dtype=np.float32).reshape(1, 1, 3) self.std = np.array([0.289, 0.274, 0.278], dtype=np.float32).reshape(1, 1, 3) self.inp_height = 1024 self.inp_width = 1024 self.session = OrtInferSession(model_path) def __call__(self, img: np.ndarray) -> Optional[np.ndarray]: img_info = self.preprocess(img) pred = self.infer(img_info) polygons = self.postprocess(pred) if polygons.size == 0: return None polygons = polygons.reshape(polygons.shape[0], 4, 2)
polygons = sorted_boxes(polygons)
9
2023-11-11 08:37:11+00:00
8k
davep/tinboard
tinboard/app.py
[ { "identifier": "Main", "path": "tinboard/screens/main.py", "snippet": "class Main(Screen[None]):\n \"\"\"The main application screen.\"\"\"\n\n CONTEXT_HELP = \"\"\"\n ## Application keys and commands\n\n The following keys and commands are available throughout the application:\n\n | Key | Command | Description |\n | - | - | - |\n | <kbd>F1</kbd> | `Help` | This help screen. |\n | <kbd>F2</kbd> | `Visit Pinboard` | Visit the main Pinboard website. |\n | <kbd>F3</kbd> | | Toggle the bookmark details pane. |\n | <kbd>F4</kbd> | | Toggle the sort order of the tags menu. |\n | <kbd>F12</kbd> | `Logout` | Forgot your API token and remove the local bookmark cache. |\n | <kbd>Ctrl</kbd>+<kbd>l</kbd> | `Redownload/refresh bookmarks` | Reload the local bookmarks from Pinboard. |\n | <kbd>Ctrl</kbd>+<kbd>q</kbd> | `Quit the application` | Shockingly... quit the application! |\n | <kbd>Ctrl</kbd>+<kbd>p</kbd> | | Show the command palette. |\n | <kbd>/</kbd> | | Text search. |\n | <kbd>#</kbd> | | Focus the menu of tags. |\n | <kbd>a</kbd> | `Show All` | Show all bookmarks. |\n | <kbd>R</kbd> | `Show Unread` | Show all unread bookmarks. |\n | <kbd>r</kbd> | `Show Read` | Show all read bookmarks. |\n | <kbd>P</kbd> | `Show Private` | Show all private bookmarks. |\n | <kbd>p</kbd> | `Show Public` | Show all public bookmarks. |\n | <kbd>T</kbd> | `Show Untagged` | Show all untagged bookmarks. |\n | <kbd>t</kbd> | `Show Tagged` | Show all tagged bookmarks. |\n \"\"\"\n\n TITLE = f\"Tinboard v{__version__}\"\n SUB_TITLE = \"A pinboard.in client\"\n COMMANDS = {\n BookmarkCommands,\n CoreCommands,\n CoreFilteringCommands,\n TagCommands,\n }\n\n CSS = \"\"\"\n Main {\n layout: horizontal;\n }\n\n Main > .focus {\n border: none;\n border-left: tall $accent 50%;\n background: $boost;\n }\n\n Main > .focus:focus, Main > .focus:focus-within {\n border: none;\n border-left: tall $accent;\n background: $panel;\n }\n\n #menu {\n padding: 0;\n margin: 0;\n width: 2fr;\n height: 1fr;\n min-width: 28;\n\n Filters {\n padding-left: 1;\n }\n\n TagsMenu {\n height: 1fr;\n }\n\n Rule {\n height: 1;\n margin: 0 0 0 0;\n background: $boost;\n color: $accent 50%;\n }\n }\n\n #menu:focus-within Rule {\n background: $boost;\n color: $accent;\n }\n\n Bookmarks {\n height: 1fr;\n width: 5fr;\n }\n\n Details {\n height: 1fr;\n width: 3fr;\n min-width: 30;\n }\n\n /* Tweaks to the above when the details are hidden. */\n\n Main.details-hidden Details {\n display: none;\n }\n\n Main.details-hidden Bookmarks {\n width: 8fr;\n }\n \"\"\"\n\n BINDINGS = [\n filter_binding(\"All\"),\n filter_binding(\"Public\"),\n filter_binding(\"Private\"),\n filter_binding(\"Unread\"),\n filter_binding(\"Read\"),\n filter_binding(\"Tagged\"),\n filter_binding(\"Untagged\"),\n Binding(\"f1\", \"help\", \"Help\"),\n Binding(\"f2\", \"goto_pinboard\"),\n Binding(\"f3\", \"toggle_details\"),\n Binding(\"f4\", \"toggle_tag_order\"),\n Binding(\"f12\", \"logout\"),\n Binding(\"ctrl+l\", \"redownload\"),\n Binding(\"escape\", \"escape\"),\n Binding(\"ctrl+q\", \"quit\", \"Quit\"),\n Binding(\"#\", \"focus('tags-menu')\"),\n Binding(\"/\", \"search\"),\n ]\n\n def __init__(self, api_token: str) -> None:\n \"\"\"Initialise the main screen.\n\n Args:\n api_token: The Pinboard API token.\n \"\"\"\n super().__init__()\n self._api = API(api_token)\n\n def compose(self) -> ComposeResult:\n \"\"\"Lay out the content of the screen.\"\"\"\n yield Header()\n with Vertical(id=\"menu\", classes=\"focus\"):\n yield Filters()\n yield Rule()\n yield TagsMenu(id=\"tags-menu\")\n yield Bookmarks(classes=\"focus\")\n yield Details(classes=\"focus\")\n yield Footer()\n\n def on_mount(self) -> None:\n \"\"\"Start the process of loading the bookmarks.\"\"\"\n configuration = load_configuration()\n self.set_class(not configuration.details_visible, \"details-hidden\")\n self.sub_title = \"Loading cached bookmarks...\"\n self.query_one(Bookmarks).loading = True\n self.query_one(TagsMenu).sort_by_count = configuration.sort_tags_by_count\n self.load_bookmarks()\n\n @work(thread=True)\n def load_bookmarks(self) -> None:\n \"\"\"Load the local copy of the bookmarks, if they exist.\"\"\"\n self.query_one(Bookmarks).load()\n self.app.call_from_thread(self.maybe_redownload)\n\n @work\n async def download_bookmarks(self) -> None:\n \"\"\"Get all the bookmarks from Pinboard.\n\n Note:\n As a side-effect of calling this method, the local copy of all\n the bookmarks will be overwritten.\n \"\"\"\n try:\n (await self.query_one(Bookmarks).download_all(self._api)).save()\n except API.RequestError:\n self.app.bell()\n self.notify(\n \"Error downloading bookmarks from the server.\",\n title=\"Download Error\",\n severity=\"error\",\n timeout=8,\n )\n self._bookmarks_changed()\n except OSError as error:\n self.app.bell()\n self.notify(\n f\"Error saving the bookmarks.\\n\\n{error}\",\n title=\"Save Error\",\n severity=\"error\",\n timeout=8,\n )\n\n @work\n async def maybe_redownload(self) -> None:\n \"\"\"Redownload the bookmarks if they look newer on the server.\"\"\"\n if last_download := self.query_one(Bookmarks).last_downloaded:\n try:\n latest_on_server = await self._api.last_update()\n except API.Error:\n self.app.bell()\n self.notify(\n \"Unable to get the last change date from Pinboard. Is your token valid?\",\n title=\"Server Error\",\n severity=\"error\",\n timeout=8,\n )\n return\n if latest_on_server > last_download:\n self.notify(\n \"Bookmarks on the server appear newer; downloading a fresh copy.\"\n )\n self.action_redownload()\n else:\n self.notify(\"No local bookmarks found; checking with the server.\")\n self.action_redownload()\n\n @on(Bookmarks.Changed)\n def _bookmarks_changed(self) -> None:\n \"\"\"Refresh the display when an update happens.\"\"\"\n bookmarks = self.query_one(Bookmarks)\n bookmarks.loading = False\n self.query_one(TagsMenu).show(bookmarks.tag_counts)\n TagCommands.current_tags = list(bookmarks.tags)\n self.query_one(Details).bookmark = bookmarks.current_bookmark\n # TODO: if getting the counts starts to look like it causes a wee\n # pause, perhaps calculate them from within a threaded worker.\n # Mostly though, so far, I'm not seeing any impact.\n self.query_one(Filters).counts = bookmarks.counts\n bookmarks.focus()\n\n @on(Bookmarks.OptionHighlighted, \"Bookmarks\")\n def refresh_details(self, event: Bookmarks.OptionHighlighted) -> None:\n \"\"\"Show the details of a highlighted bookmark.\n\n Args:\n event: The event causing the refresh.\n \"\"\"\n assert isinstance(event.option, Bookmark)\n self.query_one(Details).bookmark = event.option\n\n def action_help(self) -> None:\n \"\"\"Show the help screen.\"\"\"\n self.app.push_screen(Help(self))\n\n def action_goto_pinboard(self) -> None:\n \"\"\"Open Pinbaord in the user's web browser.\"\"\"\n open_url(\"https://pinboard.in\")\n\n def action_toggle_details(self) -> None:\n \"\"\"Toggle the display of the details pane.\"\"\"\n self.toggle_class(\"details-hidden\")\n config = load_configuration()\n config.details_visible = not self.has_class(\"details-hidden\")\n save_configuration(config)\n\n def action_toggle_tag_order(self) -> None:\n \"\"\"Toggle the ordering of the tags in the tag menu.\"\"\"\n tags = self.query_one(TagsMenu)\n tags.sort_by_count = not tags.sort_by_count\n tags.show(self.query_one(Bookmarks).tag_counts)\n config = load_configuration()\n config.sort_tags_by_count = tags.sort_by_count\n save_configuration(config)\n\n def _logout(self, confirmed: bool) -> None:\n \"\"\"Process the logout confirmation.\n\n Args:\n confirmed: Was the logout confirmed by the user?\n \"\"\"\n if confirmed:\n token_file().unlink(True)\n bookmarks_file().unlink(True)\n self.app.exit(ExitStates.TOKEN_FORGOTTEN)\n\n def action_logout(self) -> None:\n \"\"\"Perform the logout action.\"\"\"\n self.app.push_screen(\n Confirm(\n \"Logout\",\n \"Remove the local copy of your API token and delete the local copy of all bookmarks?\",\n ),\n callback=self._logout,\n )\n\n def action_redownload(self) -> None:\n \"\"\"Freshly download the bookmarks.\"\"\"\n self.sub_title = \"Loading...\"\n self.query_one(TagsMenu).show([])\n self.query_one(Bookmarks).loading = True\n self.download_bookmarks()\n\n def action_escape(self) -> None:\n \"\"\"Give some context to banging the escape key.\"\"\"\n if self.screen.focused is None:\n return\n if isinstance(self.screen.focused, Details) or isinstance(\n self.screen.focused.parent, Details\n ):\n self.query_one(Bookmarks).focus()\n elif isinstance(self.screen.focused, (Bookmarks, TagsMenu)):\n self.query_one(Filters).focus()\n\n @on(Filters.ShowAll)\n def action_show_all(self) -> None:\n \"\"\"Show all bookmarks.\"\"\"\n self.query_one(Bookmarks).show_all()\n\n @on(Filters.ShowPublic)\n def action_show_public(self) -> None:\n \"\"\"Show all public bookmarks.\"\"\"\n self.query_one(Bookmarks).public_filter = (\n None if self.query_one(Bookmarks).public_filter is True else True\n )\n\n @on(Filters.ShowPrivate)\n def action_show_private(self) -> None:\n \"\"\"Show all private bookmarks.\"\"\"\n self.query_one(Bookmarks).public_filter = (\n None if self.query_one(Bookmarks).public_filter is False else False\n )\n\n @on(Filters.ShowUnread)\n def action_show_unread(self) -> None:\n \"\"\"Show all unread bookmarks.\"\"\"\n self.query_one(Bookmarks).read_filter = (\n None if self.query_one(Bookmarks).read_filter is False else False\n )\n\n @on(Filters.ShowRead)\n def action_show_read(self) -> None:\n \"\"\"Show all read bookmarks.\"\"\"\n self.query_one(Bookmarks).read_filter = (\n None if self.query_one(Bookmarks).read_filter is True else True\n )\n\n @on(Filters.ShowUntagged)\n def action_show_untagged(self) -> None:\n \"\"\"Show all untagged bookmarks.\"\"\"\n self.query_one(Bookmarks).has_tags_filter = (\n None if self.query_one(Bookmarks).has_tags_filter is False else False\n )\n\n @on(Filters.ShowTagged)\n def action_show_tagged(self) -> None:\n \"\"\"Show all tagged bookmarks.\"\"\"\n self.query_one(Bookmarks).has_tags_filter = (\n None if self.query_one(Bookmarks).has_tags_filter is True else True\n )\n\n def _search(self, search_text: str) -> None:\n \"\"\"Handle a request to search for text in the bookmarks.\"\"\"\n self.query_one(Bookmarks).text_filter = search_text\n\n def action_search(self) -> None:\n \"\"\"Do some free-text searching.\"\"\"\n self.app.push_screen(SearchInput(), callback=self._search)\n\n @on(ShowTaggedWith)\n def show_tagged_with(self, event: ShowTaggedWith) -> None:\n \"\"\"Show all bookmarks tagged with a given tag.\n\n Args:\n event: The event that contains the tag to show.\n \"\"\"\n self.query_one(Bookmarks).tag_filter = {event.tag}\n\n @on(ShowAlsoTaggedWith)\n def show_also_tagged_with(self, event: ShowAlsoTaggedWith) -> None:\n \"\"\"Add a tag to any current tag filter and show the matching bookmarks.\n\n Args:\n event: The event that contains the tag to add.\n \"\"\"\n self.query_one(Bookmarks).tag_filter |= {event.tag}\n\n @on(ClearTags)\n def clear_tags(self) -> None:\n \"\"\"Clear any tags that are in use.\"\"\"\n self.query_one(Bookmarks).tag_filter = set()\n\n async def post_result(self, result: BookmarkData | None) -> None:\n \"\"\"Handle the result of an edit of a bookmark.\n\n Args:\n result: The result data, or `None` if the edit was cancelled.\n \"\"\"\n if result:\n try:\n await self._api.add_bookmark(result)\n self.query_one(Bookmarks).update_bookmark(result).save()\n self.notify(\"Bookmark saved.\")\n except API.Error as error:\n self.app.bell()\n self.notify(\n str(error),\n title=\"Error saving bookmark data\",\n severity=\"error\",\n timeout=10,\n )\n self.app.push_screen(\n BookmarkInput(\n self._api, result, known_tags=self.query_one(Bookmarks).all_tags\n ),\n callback=self.post_result,\n )\n except OSError as error:\n self.app.bell()\n self.notify(\n f\"Error saving the bookmarks.\\n\\n{error}\",\n title=\"Save Error\",\n severity=\"error\",\n timeout=8,\n )\n\n @on(CopyBookmarkURL)\n def copy_bookmark_to_clipboard(self) -> None:\n \"\"\"Copy the currently-highlighted bookmark to the clipboard.\"\"\"\n if (bookmark := self.query_one(Bookmarks).current_bookmark) is None:\n self.app.bell()\n elif bookmark.data.href:\n try:\n to_clipboard(bookmark.data.href)\n except PyperclipException:\n self.app.bell()\n self.notify(\n \"Clipboard support not available in your environment.\",\n severity=\"error\",\n )\n else:\n self.notify(\"URL copied to the clipboard\")\n\n @on(AddBookmark)\n def add(self) -> None:\n \"\"\"Add a new bookmark.\"\"\"\n self.app.push_screen(\n BookmarkInput(self._api, known_tags=self.query_one(Bookmarks).all_tags),\n callback=self.post_result,\n )\n\n @on(EditBookmark)\n def edit(self) -> None:\n \"\"\"Edit the current bookmark, if there is one.\"\"\"\n if (bookmark := self.query_one(Bookmarks).current_bookmark) is None:\n self.app.bell()\n else:\n self.app.push_screen(\n BookmarkInput(\n self._api,\n bookmark.data,\n known_tags=self.query_one(Bookmarks).all_tags,\n ),\n callback=self.post_result,\n )\n\n async def _delete(self, bookmark: Bookmark, confirmed: bool) -> None:\n \"\"\"Respond to the user's confirmation about a bookmark deletion.\n\n Args:\n bookmark: The bookmark to delete.\n confirmed: The decision the user made about deletion.\n \"\"\"\n if confirmed:\n try:\n await self._api.delete_bookmark(bookmark.data.href)\n except API.Error:\n self.app.bell()\n self.notify(\n \"Error trying to delete the bookmark.\",\n title=\"Server Error\",\n severity=\"error\",\n timeout=8,\n )\n return\n try:\n self.query_one(Bookmarks).remove_bookmark(bookmark).save()\n except OSError as error:\n self.app.bell()\n self.notify(\n f\"Error saving the bookmarks.\\n\\n{error}\",\n title=\"Save Error\",\n severity=\"error\",\n timeout=8,\n )\n return\n self.notify(\"Bookmark deleted.\", severity=\"warning\")\n\n @on(DeleteBookmark)\n def delete(self) -> None:\n \"\"\"Delete the current bookmark, if there is one.\"\"\"\n if (bookmark := self.query_one(Bookmarks).current_bookmark) is None:\n self.app.bell()\n else:\n self.app.push_screen(\n Confirm(\n \"Delete?\",\n f\"Are you sure you wish to delete this bookmark?\\n\\n[dim i]{bookmark.data.description}[/]\",\n ),\n callback=partial(self._delete, bookmark),\n )\n\n @on(ToggleBookmarkRead)\n async def toggle_read(self) -> None:\n \"\"\"Toggle the read/unread status of the current bookmark.\"\"\"\n if (bookmark := self.query_one(Bookmarks).current_bookmark) is None:\n self.app.bell()\n else:\n bookmark.data.to_read = not bookmark.data.to_read\n await self.post_result(bookmark.data)\n\n @on(ToggleBookmarkPublic)\n async def toggle_public(self) -> None:\n \"\"\"Toggle the public/private status of the current bookmark.\"\"\"\n if (bookmark := self.query_one(Bookmarks).current_bookmark) is None:\n self.app.bell()\n else:\n bookmark.data.shared = not bookmark.data.shared\n await self.post_result(bookmark.data)" }, { "identifier": "TokenInput", "path": "tinboard/screens/token_input.py", "snippet": "class TokenInput(ModalScreen[str | None]):\n \"\"\"A modal screen that lets the user enter an API token.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n TokenInput {\n align: center middle;\n\n &> Vertical {\n padding: 1 2;\n width: 40%;\n min-width: 70;\n height: auto;\n background: $surface;\n border: panel $error;\n border-title-color: $text;\n\n &> Horizontal {\n margin-top: 1;\n height: auto;\n align-horizontal: right;\n\n &> Horizontal {\n margin-left: 1;\n height: auto;\n\n }\n\n Button {\n margin-right: 1;\n }\n }\n\n Label {\n width: 1fr;\n height: auto;\n padding-left: 1;\n padding-right: 1;\n margin-bottom: 1;\n }\n\n }\n }\n \"\"\"\n\n BINDINGS = [(\"escape\", \"cancel\"), (\"f1\", \"get_token\")]\n\n def compose(self) -> ComposeResult:\n \"\"\"Compose the content of the screen.\"\"\"\n with Vertical() as dialog:\n dialog.border_title = \"API Token Required\"\n yield Label(\n \"To use the [i]pinboard.in[/] API you will need an API token. \"\n \"You can [@click=screen.get_token]get your API token from your \"\n \"account page on [i]pinboard.in[/][/].\"\n )\n yield Input(placeholder=\"Paste the token here\")\n with Horizontal():\n with Horizontal():\n yield Button(\"Go to token [dim]\\\\[F1][/]\", id=\"get-token\")\n yield Button(\"Connect\", id=\"connect\", variant=\"primary\")\n yield Button(\"Cancel [dim]\\\\[Esc][/]\", id=\"cancel\", variant=\"error\")\n\n @on(Button.Pressed, \"#connect\")\n @on(Input.Submitted)\n def confirm(self) -> None:\n \"\"\"React to the user confirming their input.\"\"\"\n if token := self.query_one(Input).value.strip():\n self.dismiss(token)\n else:\n self.notify(\"Please provide a token\", severity=\"error\")\n\n @on(Button.Pressed, \"#cancel\")\n def action_cancel(self) -> None:\n \"\"\"React to the user cancelling the dialog.\"\"\"\n self.dismiss(None)\n\n @on(Button.Pressed, \"#get-token\")\n def action_get_token(self) -> None:\n \"\"\"Open the page for getting an API token.\"\"\"\n open_url(\"https://pinboard.in/settings/password\")\n self.query_one(Input).focus()" }, { "identifier": "load_configuration", "path": "tinboard/data/config.py", "snippet": "@lru_cache(maxsize=None)\ndef load_configuration() -> Configuration:\n \"\"\"Load the configuration.\n\n Returns:\n The configuration.\n\n Note:\n As a side-effect, if the configuration doesn't exist a default one\n will be saved to storage.\n\n This function is designed so that it's safe and low-cost to\n repeatedly call it. The configuration is cached and will only be\n loaded from storage when necessary.\n \"\"\"\n source = configuration_file()\n return (\n Configuration(**loads(source.read_text(encoding=\"utf-8\")))\n if source.exists()\n else save_configuration(Configuration())\n )" }, { "identifier": "save_configuration", "path": "tinboard/data/config.py", "snippet": "def save_configuration(configuration: Configuration) -> Configuration:\n \"\"\"Save the given configuration.\n\n Args:\n The configuration to store.\n\n Returns:\n The configuration.\n \"\"\"\n load_configuration.cache_clear()\n configuration_file().write_text(\n dumps(asdict(configuration), indent=4), encoding=\"utf-8\"\n )\n return load_configuration()" }, { "identifier": "ExitStates", "path": "tinboard/data/exit_states.py", "snippet": "class ExitStates(Enum):\n \"\"\"Exit states for the application.\"\"\"\n\n OKAY = 0\n \"\"\"The general exit state.\"\"\"\n\n TOKEN_NEEDED = 1\n \"\"\"The application exited because a token is needed and none was given.\"\"\"\n\n TOKEN_FORGOTTEN = 2\n \"\"\"The application exited because the user forgot the token.\"\"\"" }, { "identifier": "token_file", "path": "tinboard/data/token.py", "snippet": "def token_file() -> Path:\n \"\"\"The file where the API token is held.\n\n Returns:\n The path to the token file.\n \"\"\"\n return data_dir() / \".token\"" } ]
import os from textual.app import App from textual.binding import Binding from .screens import Main, TokenInput from .data import load_configuration, save_configuration, token_file, ExitStates
5,627
"""The main application class.""" ############################################################################## # Backward compatibility. from __future__ import annotations ############################################################################## # Python imports. ############################################################################## # Textual imports. ############################################################################## # Local imports. ############################################################################## class Tinboard(App[ExitStates]): """The application.""" BINDINGS = [ Binding("ctrl+backslash", "gndn"), Binding("ctrl+p", "command_palette", priority=True), ] def __init__(self) -> None: """Initialise the application.""" super().__init__() self.dark = load_configuration().dark_mode def token_bounce(self, token: str | None) -> None: """Handle the result of asking the user for their API token. Args: token: The resulting token. """ if token: token_file().write_text(token, encoding="utf-8")
"""The main application class.""" ############################################################################## # Backward compatibility. from __future__ import annotations ############################################################################## # Python imports. ############################################################################## # Textual imports. ############################################################################## # Local imports. ############################################################################## class Tinboard(App[ExitStates]): """The application.""" BINDINGS = [ Binding("ctrl+backslash", "gndn"), Binding("ctrl+p", "command_palette", priority=True), ] def __init__(self) -> None: """Initialise the application.""" super().__init__() self.dark = load_configuration().dark_mode def token_bounce(self, token: str | None) -> None: """Handle the result of asking the user for their API token. Args: token: The resulting token. """ if token: token_file().write_text(token, encoding="utf-8")
self.push_screen(Main(token))
0
2023-11-13 08:19:41+00:00
8k
wurenkai/MHA-UNet
train_cou.py
[ { "identifier": "MHA_UNet", "path": "models/model.py", "snippet": "class MHA_UNet(nn.Module):\n\n def __init__(self, num_classes=1, input_channels=3, pretrained=None,use_checkpoint=False, c_list=[16, 32, 64, 128, 256],\n split_att='fc', bridge=True):\n super().__init__()\n self.pretrained = pretrained\n self.use_checkpoint = use_checkpoint\n self.bridge = bridge\n\n self.encoder1 = nn.Sequential(\n nn.Conv2d(input_channels, c_list[0], 3, stride=1, padding=1),\n )\n\n self.encoder2 = nn.Sequential(\n nn.Conv2d(c_list[0], c_list[1], 3, stride=1, padding=1),\n nn.Dropout2d(0.1),\n )\n\n self.encoder3 = nn.Sequential(\n nn.Conv2d(c_list[1], c_list[2], 3, stride=1, padding=1),\n MHAblock(c_list[2]))\n self.encoder3_1 = nn.Dropout2d(0.1)\n\n self.encoder4 = nn.Sequential(\n nn.Conv2d(c_list[2], c_list[3], 3, stride=1, padding=1),\n MHAblock(c_list[3]))\n self.encoder4_1 = nn.Dropout2d(0.1)\n\n self.encoder5 = nn.Sequential(\n nn.Conv2d(c_list[3], c_list[4], 3, stride=1, padding=1),\n MHAblock(c_list[4]))\n self.encoder5_1 = nn.Dropout2d(0.1)\n\n # build Bottleneck layers\n self.ConvMixer = ConvMixerBlock(dim=c_list[4], depth=7, k=7)\n\n if bridge:\n self.scab = SC_Att_Bridge(c_list, split_att)\n print('SC_Att_Bridge was used')\n\n\n self.decoder1 = nn.Sequential(\n nn.Dropout2d(0.1),\n MHAblock(c_list[4]))\n self.decoder1_1 = nn.Conv2d(c_list[4], c_list[3], 3, stride=1, padding=1)\n\n self.decoder2 = nn.Sequential(\n nn.Dropout2d(0.1),\n MHAblock(c_list[3]))\n self.decoder2_1 = nn.Conv2d(c_list[3], c_list[2], 3, stride=1, padding=1)\n\n self.decoder3 = nn.Sequential(\n nn.Dropout2d(0.1),\n MHAblock(c_list[2]))\n self.decoder3_1 = nn.Conv2d(c_list[2], c_list[1], 3, stride=1, padding=1)\n\n self.decoder4 = nn.Sequential(\n nn.Dropout2d(0.1),\n nn.Conv2d(c_list[1], c_list[0], 3, stride=1, padding=1),\n )\n\n\n self.ebn1 = nn.GroupNorm(4, c_list[0])\n self.ebn2 = nn.GroupNorm(4, c_list[1])\n self.ebn3 = nn.GroupNorm(4, c_list[2])\n self.ebn4 = nn.GroupNorm(4, c_list[3])\n self.ebn5 = nn.GroupNorm(4, c_list[4])\n self.dbn1 = nn.GroupNorm(4, c_list[3])\n self.dbn2 = nn.GroupNorm(4, c_list[2])\n self.dbn3 = nn.GroupNorm(4, c_list[1])\n self.dbn4 = nn.GroupNorm(4, c_list[0])\n\n self.final = nn.Conv2d(c_list[0], num_classes, kernel_size=1)\n\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if isinstance(m, nn.Linear) and m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv1d):\n n = m.kernel_size[0] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.Conv2d):\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n fan_out //= m.groups\n m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, x):\n\n out = F.gelu(F.max_pool2d(self.ebn1(self.encoder1(x)), 2, 2))\n t1 = out # b, c0, H/2, W/2\n\n out = F.gelu(F.max_pool2d(self.ebn2(self.encoder2(out)), 2, 2))\n t2 = out # b, c1, H/4, W/4\n\n out = F.gelu(F.max_pool2d(self.ebn3(self.encoder3_1(self.encoder3(out)[0])), 2, 2))\n t3 = out # b, c2, H/8, W/8\n\n out = F.gelu(F.max_pool2d(self.ebn4(self.encoder4_1(self.encoder4(out)[0])), 2, 2))\n t4 = out # b, c3, H/16, W/16\n\n if self.bridge: t1, t2, t3, t4 = self.scab(t1, t2, t3, t4)\n out = F.gelu((self.ebn5(self.encoder5_1(self.encoder5(out)[0]))))# b, c4, H/32, W/32\n out = self.ConvMixer(out)\n\n out5 = F.gelu(self.dbn1(self.decoder1_1(self.decoder1(out)[0])))\n out5 = torch.add(out5, t4)\n\n out4 = F.gelu(F.interpolate(self.dbn2(self.decoder2_1(self.decoder2(out5)[0])), scale_factor=(2, 2), mode='bilinear',\n align_corners=True))\n out4 = torch.add(out4, t3)\n a = self.decoder3(out4)\n x1 = a[1]\n x2 = a[2]\n x3 = a[3]\n x4 = a[4]\n x5 = a[5]\n xx = a[0]\n\n out3 = F.gelu(F.interpolate(self.dbn3(self.decoder3_1(xx)), scale_factor=(2, 2), mode='bilinear',\n align_corners=True))\n out3 = torch.add(out3, t2)\n\n out2 = F.gelu(F.interpolate(self.dbn4(self.decoder4(out3)), scale_factor=(2, 2), mode='bilinear',\n align_corners=True))\n out2 = torch.add(out2, t1)\n\n out0 = F.interpolate(self.final(out2), scale_factor=(2, 2), mode='bilinear',\n align_corners=True)\n\n return torch.sigmoid(out0),torch.sigmoid(x1),torch.sigmoid(x2),torch.sigmoid(x3),torch.sigmoid(x4),torch.sigmoid(x5)" }, { "identifier": "NPY_datasets", "path": "dataset/npy_datasets.py", "snippet": "class NPY_datasets(Dataset):\r\n def __init__(self, path_Data, config, train=True,test=False):\r\n super(NPY_datasets, self)\r\n if train:\r\n images_list = os.listdir(path_Data+'train/images/')\r\n masks_list = os.listdir(path_Data+'train/masks/')\r\n self.data = []\r\n for i in range(len(images_list)):\r\n img_path = path_Data+'train/images/' + images_list[i]\r\n mask_path = path_Data+'train/masks/' + masks_list[i]\r\n self.data.append([img_path, mask_path])\r\n self.transformer = config.train_transformer\r\n elif test:\r\n images_list = os.listdir(path_Data+'test/images/')\r\n masks_list = os.listdir(path_Data+'test/masks/')\r\n self.data = []\r\n for i in range(len(images_list)):\r\n img_path = path_Data+'test/images/' + images_list[i]\r\n mask_path = path_Data+'test/masks/' + masks_list[i]\r\n self.data.append([img_path, mask_path])\r\n self.transformer = config.test_transformer\r\n else:\r\n images_list = os.listdir(path_Data+'val/images/')\r\n masks_list = os.listdir(path_Data+'val/masks/')\r\n self.data = []\r\n for i in range(len(images_list)):\r\n img_path = path_Data+'val/images/' + images_list[i]\r\n mask_path = path_Data+'val/masks/' + masks_list[i]\r\n self.data.append([img_path, mask_path])\r\n self.transformer = config.val_transformer\r\n \r\n def __getitem__(self, indx):\r\n img_path, msk_path = self.data[indx]\r\n img = np.array(Image.open(img_path).convert('RGB'))\r\n msk = np.expand_dims(np.array(Image.open(msk_path).convert('L')), axis=2) / 255\r\n img, msk = self.transformer((img, msk))\r\n return img, msk\r\n\r\n def __len__(self):\r\n return len(self.data)\r" }, { "identifier": "setting_config", "path": "configs/config_setting.py", "snippet": "class setting_config:\r\n \"\"\"\r\n the config of training setting.\r\n \"\"\"\r\n network = 'MHA-UNet'\r\n\r\n\r\n test_weights = ''\r\n\r\n datasets = 'ISIC2017'\r\n if datasets == 'ISIC2017':\r\n data_path = r'D:\\data\\data\\ISIC_2017\\\\'\r\n elif datasets == 'ISIC2018':\r\n data_path = r'D:\\data\\data\\ISIC_2018\\\\'\r\n elif datasets == 'PH2':\r\n data_path = r'D:\\data\\data\\PH2\\PH2_npy\\\\'\r\n else:\r\n raise Exception('datasets in not right!')\r\n\r\n criterion = BceDiceLoss()\r\n\r\n num_classes = 1\r\n input_size_h = 256\r\n input_size_w = 256\r\n input_channels = 3\r\n distributed = False\r\n local_rank = -1\r\n num_workers = 0\r\n seed = 42\r\n world_size = None\r\n rank = None\r\n amp = False\r\n batch_size = 8\r\n epochs = 250\r\n\r\n work_dir = 'results/' + network + '_' + datasets + '_' + datetime.now().strftime('%A_%d_%B_%Y_%Hh_%Mm_%Ss') + '/'\r\n\r\n print_interval = 20\r\n val_interval = 30\r\n save_interval = 100\r\n threshold = 0.5\r\n\r\n train_transformer = transforms.Compose([\r\n myNormalize(datasets, train=True),\r\n myToTensor(),\r\n myRandomHorizontalFlip(p=0.5),\r\n myRandomVerticalFlip(p=0.5),\r\n myRandomRotation(p=0.5, degree=[0, 360]),\r\n myResize(input_size_h, input_size_w)\r\n ])\r\n val_transformer = transforms.Compose([\r\n myNormalize(datasets, train=False),\r\n myToTensor(),\r\n myResize(input_size_h, input_size_w)\r\n ])\r\n test_transformer = transforms.Compose([\r\n myToTensor(),\r\n myResize(input_size_h, input_size_w)\r\n ])\r\n\r\n opt = 'AdamW'\r\n assert opt in ['Adadelta', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'ASGD', 'RMSprop', 'Rprop', 'SGD'], 'Unsupported optimizer!'\r\n if opt == 'Adadelta':\r\n lr = 0.01 # default: 1.0 – coefficient that scale delta before it is applied to the parameters\r\n rho = 0.9 # default: 0.9 – coefficient used for computing a running average of squared gradients\r\n eps = 1e-6 # default: 1e-6 – term added to the denominator to improve numerical stability \r\n weight_decay = 0.05 # default: 0 – weight decay (L2 penalty) \r\n elif opt == 'Adagrad':\r\n lr = 0.01 # default: 0.01 – learning rate\r\n lr_decay = 0 # default: 0 – learning rate decay\r\n eps = 1e-10 # default: 1e-10 – term added to the denominator to improve numerical stability\r\n weight_decay = 0.05 # default: 0 – weight decay (L2 penalty)\r\n elif opt == 'Adam':\r\n lr = 0.001 # default: 1e-3 – learning rate\r\n betas = (0.9, 0.999) # default: (0.9, 0.999) – coefficients used for computing running averages of gradient and its square\r\n eps = 1e-8 # default: 1e-8 – term added to the denominator to improve numerical stability \r\n weight_decay = 0.0001 # default: 0 – weight decay (L2 penalty) \r\n amsgrad = False # default: False – whether to use the AMSGrad variant of this algorithm from the paper On the Convergence of Adam and Beyond\r\n elif opt == 'AdamW':\r\n lr = 0.001 # default: 1e-3 – learning rate\r\n betas = (0.9, 0.999) # default: (0.9, 0.999) – coefficients used for computing running averages of gradient and its square\r\n eps = 1e-8 # default: 1e-8 – term added to the denominator to improve numerical stability\r\n weight_decay = 1e-2 # default: 1e-2 – weight decay coefficient\r\n amsgrad = False # default: False – whether to use the AMSGrad variant of this algorithm from the paper On the Convergence of Adam and Beyond \r\n elif opt == 'Adamax':\r\n lr = 2e-3 # default: 2e-3 – learning rate\r\n betas = (0.9, 0.999) # default: (0.9, 0.999) – coefficients used for computing running averages of gradient and its square\r\n eps = 1e-8 # default: 1e-8 – term added to the denominator to improve numerical stability\r\n weight_decay = 0 # default: 0 – weight decay (L2 penalty) \r\n elif opt == 'ASGD':\r\n lr = 0.01 # default: 1e-2 – learning rate \r\n lambd = 1e-4 # default: 1e-4 – decay term\r\n alpha = 0.75 # default: 0.75 – power for eta update\r\n t0 = 1e6 # default: 1e6 – point at which to start averaging\r\n weight_decay = 0 # default: 0 – weight decay\r\n elif opt == 'RMSprop':\r\n lr = 1e-2 # default: 1e-2 – learning rate\r\n momentum = 0 # default: 0 – momentum factor\r\n alpha = 0.99 # default: 0.99 – smoothing constant\r\n eps = 1e-8 # default: 1e-8 – term added to the denominator to improve numerical stability\r\n centered = False # default: False – if True, compute the centered RMSProp, the gradient is normalized by an estimation of its variance\r\n weight_decay = 0 # default: 0 – weight decay (L2 penalty)\r\n elif opt == 'Rprop':\r\n lr = 1e-2 # default: 1e-2 – learning rate\r\n etas = (0.5, 1.2) # default: (0.5, 1.2) – pair of (etaminus, etaplis), that are multiplicative increase and decrease factors\r\n step_sizes = (1e-6, 50) # default: (1e-6, 50) – a pair of minimal and maximal allowed step sizes \r\n elif opt == 'SGD':\r\n lr = 0.01 # – learning rate\r\n momentum = 0.9 # default: 0 – momentum factor \r\n weight_decay = 0.05 # default: 0 – weight decay (L2 penalty) \r\n dampening = 0 # default: 0 – dampening for momentum\r\n nesterov = False # default: False – enables Nesterov momentum \r\n \r\n sch = 'CosineAnnealingLR'\r\n if sch == 'StepLR':\r\n step_size = epochs // 5 # – Period of learning rate decay.\r\n gamma = 0.5 # – Multiplicative factor of learning rate decay. Default: 0.1\r\n last_epoch = -1 # – The index of last epoch. Default: -1.\r\n elif sch == 'MultiStepLR':\r\n milestones = [60, 120, 150] # – List of epoch indices. Must be increasing.\r\n gamma = 0.1 # – Multiplicative factor of learning rate decay. Default: 0.1.\r\n last_epoch = -1 # – The index of last epoch. Default: -1.\r\n elif sch == 'ExponentialLR':\r\n gamma = 0.99 # – Multiplicative factor of learning rate decay.\r\n last_epoch = -1 # – The index of last epoch. Default: -1.\r\n elif sch == 'CosineAnnealingLR':\r\n T_max = 50 # – Maximum number of iterations. Cosine function period.\r\n eta_min = 0.00001 # – Minimum learning rate. Default: 0.\r\n last_epoch = -1 # – The index of last epoch. Default: -1. \r\n elif sch == 'ReduceLROnPlateau':\r\n mode = 'min' # – One of min, max. In min mode, lr will be reduced when the quantity monitored has stopped decreasing; in max mode it will be reduced when the quantity monitored has stopped increasing. Default: ‘min’.\r\n factor = 0.1 # – Factor by which the learning rate will be reduced. new_lr = lr * factor. Default: 0.1.\r\n patience = 10 # – Number of epochs with no improvement after which learning rate will be reduced. For example, if patience = 2, then we will ignore the first 2 epochs with no improvement, and will only decrease the LR after the 3rd epoch if the loss still hasn’t improved then. Default: 10.\r\n threshold = 0.0001 # – Threshold for measuring the new optimum, to only focus on significant changes. Default: 1e-4.\r\n threshold_mode = 'rel' # – One of rel, abs. In rel mode, dynamic_threshold = best * ( 1 + threshold ) in ‘max’ mode or best * ( 1 - threshold ) in min mode. In abs mode, dynamic_threshold = best + threshold in max mode or best - threshold in min mode. Default: ‘rel’.\r\n cooldown = 0 # – Number of epochs to wait before resuming normal operation after lr has been reduced. Default: 0.\r\n min_lr = 0 # – A scalar or a list of scalars. A lower bound on the learning rate of all param groups or each group respectively. Default: 0.\r\n eps = 1e-08 # – Minimal decay applied to lr. If the difference between new and old lr is smaller than eps, the update is ignored. Default: 1e-8.\r\n elif sch == 'CosineAnnealingWarmRestarts':\r\n T_0 = 50 # – Number of iterations for the first restart.\r\n T_mult = 2 # – A factor increases T_{i} after a restart. Default: 1.\r\n eta_min = 1e-6 # – Minimum learning rate. Default: 0.\r\n last_epoch = -1 # – The index of last epoch. Default: -1. \r\n elif sch == 'WP_MultiStepLR':\r\n warm_up_epochs = 10\r\n gamma = 0.1\r\n milestones = [125, 225]\r\n elif sch == 'WP_CosineLR':\r\n warm_up_epochs = 20" } ]
import torch import os import sys import warnings from torch import nn from torch.cuda.amp import autocast, GradScaler from torch.utils.data import DataLoader from loader import * from models.model import MHA_UNet from dataset.npy_datasets import NPY_datasets from engine import * from utils import * from configs.config_setting import setting_config
5,185
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # "0, 1, 2, 3" warnings.filterwarnings("ignore") def main(config): print('#----------Creating logger----------#') sys.path.append(config.work_dir + '/') log_dir = os.path.join(config.work_dir, 'log') checkpoint_dir = os.path.join(config.work_dir, 'checkpoints') resume_model = os.path.join(checkpoint_dir, 'latest.pth') outputs = os.path.join(config.work_dir, 'outputs') if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) if not os.path.exists(outputs): os.makedirs(outputs) global logger logger = get_logger('train', log_dir) log_config_info(config, logger) print('#----------GPU init----------#') set_seed(config.seed) gpu_ids = [0] # [0, 1, 2, 3] torch.cuda.empty_cache() print('#----------Preparing dataset----------#') train_dataset = isic_loader(path_Data=config.data_path, train=True) train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, pin_memory=True, num_workers=config.num_workers) val_dataset = isic_loader(path_Data=config.data_path, train=False) val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, pin_memory=True, num_workers=config.num_workers, drop_last=True) test_dataset = isic_loader(path_Data=config.data_path, train=False, Test=True) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, pin_memory=True, num_workers=config.num_workers, drop_last=True) print('#----------Prepareing Models----------#') #model_cfg = config.model_config
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # "0, 1, 2, 3" warnings.filterwarnings("ignore") def main(config): print('#----------Creating logger----------#') sys.path.append(config.work_dir + '/') log_dir = os.path.join(config.work_dir, 'log') checkpoint_dir = os.path.join(config.work_dir, 'checkpoints') resume_model = os.path.join(checkpoint_dir, 'latest.pth') outputs = os.path.join(config.work_dir, 'outputs') if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) if not os.path.exists(outputs): os.makedirs(outputs) global logger logger = get_logger('train', log_dir) log_config_info(config, logger) print('#----------GPU init----------#') set_seed(config.seed) gpu_ids = [0] # [0, 1, 2, 3] torch.cuda.empty_cache() print('#----------Preparing dataset----------#') train_dataset = isic_loader(path_Data=config.data_path, train=True) train_loader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, pin_memory=True, num_workers=config.num_workers) val_dataset = isic_loader(path_Data=config.data_path, train=False) val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, pin_memory=True, num_workers=config.num_workers, drop_last=True) test_dataset = isic_loader(path_Data=config.data_path, train=False, Test=True) test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, pin_memory=True, num_workers=config.num_workers, drop_last=True) print('#----------Prepareing Models----------#') #model_cfg = config.model_config
model = MHA_UNet()
0
2023-11-13 06:59:52+00:00
8k
buptlihang/CVLM
examples/example_chat.py
[ { "identifier": "IMAGE_TOKEN_INDEX", "path": "model/utils.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "model/utils.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "model/utils.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "model/utils.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" }, { "identifier": "process_images", "path": "model/utils.py", "snippet": "def process_images(images, image_processor, model_cfg):\n new_images = []\n for image in images:\n image = expand2square(\n image, tuple(int(x * 255) for x in image_processor.image_mean))\n image = image_processor.preprocess(\n image, return_tensors='pt')['pixel_values'][0]\n new_images.append(image)\n if all(x.shape == new_images[0].shape for x in new_images):\n new_images = torch.stack(new_images, dim=0)\n return new_images" }, { "identifier": "tokenizer_image_token", "path": "model/utils.py", "snippet": "def tokenizer_image_token(prompt,\n tokenizer,\n image_token_index=IMAGE_TOKEN_INDEX,\n return_tensors=None):\n prompt_chunks = [\n tokenizer(chunk).input_ids for chunk in prompt.split('<image>')\n ]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep] * len(X))\n for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if len(prompt_chunks) > 0 and len(\n prompt_chunks[0]\n ) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id:\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks,\n [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == 'pt':\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f'Unsupported tensor type: {return_tensors}')\n return input_ids" }, { "identifier": "KeywordsStoppingCriteria", "path": "model/utils.py", "snippet": "class KeywordsStoppingCriteria(StoppingCriteria):\n\n def __init__(self, keywords, tokenizer, input_ids):\n self.keywords = keywords\n self.keyword_ids = []\n self.max_keyword_len = 0\n for keyword in keywords:\n cur_keyword_ids = tokenizer(keyword).input_ids\n if len(cur_keyword_ids\n ) > 1 and cur_keyword_ids[0] == tokenizer.bos_token_id:\n cur_keyword_ids = cur_keyword_ids[1:]\n if len(cur_keyword_ids) > self.max_keyword_len:\n self.max_keyword_len = len(cur_keyword_ids)\n self.keyword_ids.append(torch.tensor(cur_keyword_ids))\n self.tokenizer = tokenizer\n self.start_len = input_ids.shape[1]\n\n def __call__(self, output_ids: torch.LongTensor, scores: torch.FloatTensor,\n **kwargs) -> bool:\n assert output_ids.shape[\n 0] == 1, \"Only support batch size 1 (yet)\" # TODO\n offset = min(output_ids.shape[1] - self.start_len,\n self.max_keyword_len)\n self.keyword_ids = [\n keyword_id.to(output_ids.device) for keyword_id in self.keyword_ids\n ]\n for keyword_id in self.keyword_ids:\n if (output_ids[0, -keyword_id.shape[0]:] == keyword_id).all():\n return True\n outputs = self.tokenizer.batch_decode(output_ids[:, -offset:],\n skip_special_tokens=True)[0]\n for keyword in self.keywords:\n if keyword in outputs:\n return True\n return False" }, { "identifier": "Conversation", "path": "model/utils.py", "snippet": "class Conversation:\n \"\"\"A class that keeps all conversation history.\"\"\"\n system: str\n roles: List[str]\n messages: List[List[str]]\n offset: int\n sep_style: SeparatorStyle = SeparatorStyle.SINGLE\n sep: str = \"###\"\n sep2: str = None\n version: str = \"Unknown\"\n\n skip_next: bool = False\n\n def get_prompt(self):\n messages = self.messages\n if len(messages) > 0 and type(messages[0][1]) is tuple:\n messages = self.messages.copy()\n init_role, init_msg = messages[0].copy()\n init_msg = init_msg[0].replace(\"<image>\", \"\").strip()\n messages[0] = (init_role, \"<image>\\n\" + init_msg)\n\n if self.sep_style == SeparatorStyle.SINGLE:\n ret = self.system + self.sep\n for role, message in messages:\n if message:\n if type(message) is tuple:\n message, _, _ = message\n ret += role + \": \" + message + self.sep\n else:\n ret += role + \":\"\n elif self.sep_style == SeparatorStyle.TWO:\n seps = [self.sep, self.sep2]\n ret = self.system + seps[0]\n for i, (role, message) in enumerate(messages):\n if message:\n if type(message) is tuple:\n message, _, _ = message\n ret += role + \": \" + message + seps[i % 2]\n else:\n ret += role + \":\"\n else:\n raise ValueError(f\"Invalid style: {self.sep_style}\")\n\n return ret\n\n def append_message(self, role, message):\n self.messages.append([role, message])\n\n def get_images(self, return_pil=False):\n images = []\n for i, (role, msg) in enumerate(self.messages[self.offset:]):\n if i % 2 == 0:\n if type(msg) is tuple:\n import base64\n from io import BytesIO\n from PIL import Image\n msg, image, image_process_mode = msg\n if image_process_mode == \"Pad\":\n\n def expand2square(pil_img,\n background_color=(122, 116, 104)):\n width, height = pil_img.size\n if width == height:\n return pil_img\n elif width > height:\n result = Image.new(pil_img.mode,\n (width, width),\n background_color)\n result.paste(pil_img,\n (0, (width - height) // 2))\n return result\n else:\n result = Image.new(pil_img.mode,\n (height, height),\n background_color)\n result.paste(pil_img,\n ((height - width) // 2, 0))\n return result\n\n image = expand2square(image)\n elif image_process_mode in [\"Default\", \"Crop\"]:\n pass\n elif image_process_mode == \"Resize\":\n image = image.resize((336, 336))\n else:\n raise ValueError(\n f\"Invalid image_process_mode: {image_process_mode}\"\n )\n max_hw, min_hw = max(image.size), min(image.size)\n aspect_ratio = max_hw / min_hw\n max_len, min_len = 800, 400\n shortest_edge = int(\n min(max_len / aspect_ratio, min_len, min_hw))\n longest_edge = int(shortest_edge * aspect_ratio)\n W, H = image.size\n if longest_edge != max(image.size):\n if H > W:\n H, W = longest_edge, shortest_edge\n else:\n H, W = shortest_edge, longest_edge\n image = image.resize((W, H))\n if return_pil:\n images.append(image)\n else:\n buffered = BytesIO()\n image.save(buffered, format=\"PNG\")\n img_b64_str = base64.b64encode(\n buffered.getvalue()).decode()\n images.append(img_b64_str)\n return images\n\n def to_gradio_chatbot(self):\n ret = []\n for i, (role, msg) in enumerate(self.messages[self.offset:]):\n if i % 2 == 0:\n if type(msg) is tuple:\n import base64\n from io import BytesIO\n msg, image, image_process_mode = msg\n max_hw, min_hw = max(image.size), min(image.size)\n aspect_ratio = max_hw / min_hw\n max_len, min_len = 800, 400\n shortest_edge = int(\n min(max_len / aspect_ratio, min_len, min_hw))\n longest_edge = int(shortest_edge * aspect_ratio)\n W, H = image.size\n if H > W:\n H, W = longest_edge, shortest_edge\n else:\n H, W = shortest_edge, longest_edge\n image = image.resize((W, H))\n buffered = BytesIO()\n image.save(buffered, format=\"JPEG\")\n img_b64_str = base64.b64encode(\n buffered.getvalue()).decode()\n img_str = f'<img src=\"data:image/png;base64,{img_b64_str}\" alt=\"user upload image\" />'\n msg = img_str + msg.replace('<image>', '').strip()\n ret.append([msg, None])\n else:\n ret.append([msg, None])\n else:\n ret[-1][-1] = msg\n return ret\n\n def copy(self):\n return Conversation(system=self.system,\n roles=self.roles,\n messages=[[x, y] for x, y in self.messages],\n offset=self.offset,\n sep_style=self.sep_style,\n sep=self.sep,\n sep2=self.sep2,\n version=self.version)\n\n def dict(self):\n if len(self.get_images()) > 0:\n return {\n \"system\":\n self.system,\n \"roles\":\n self.roles,\n \"messages\": [[x, y[0] if type(y) is tuple else y]\n for x, y in self.messages],\n \"offset\":\n self.offset,\n \"sep\":\n self.sep,\n \"sep2\":\n self.sep2,\n }\n return {\n \"system\": self.system,\n \"roles\": self.roles,\n \"messages\": self.messages,\n \"offset\": self.offset,\n \"sep\": self.sep,\n \"sep2\": self.sep2,\n }" }, { "identifier": "SeparatorStyle", "path": "model/utils.py", "snippet": "class SeparatorStyle(Enum):\n \"\"\"Different separator style.\"\"\"\n SINGLE = auto()\n TWO = auto()" }, { "identifier": "load_image", "path": "model/utils.py", "snippet": "def load_image(image_file):\n if image_file.startswith('http://') or image_file.startswith('https://'):\n response = requests.get(image_file)\n image = Image.open(BytesIO(response.content)).convert('RGB')\n else:\n image = Image.open(image_file).convert('RGB')\n return image" }, { "identifier": "load_pretrained_model", "path": "model/utils.py", "snippet": "def load_pretrained_model(model_path,\n load_8bit=False,\n load_4bit=False,\n device_map=\"auto\",\n device=\"cuda\"):\n kwargs = {\"device_map\": device_map}\n\n if load_8bit:\n kwargs['load_in_8bit'] = True\n elif load_4bit:\n kwargs['load_in_4bit'] = True\n kwargs['quantization_config'] = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_compute_dtype=torch.float16,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type='nf4')\n else:\n kwargs['torch_dtype'] = torch.float16\n\n tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)\n model, output_loading_info = AutoModelForCausalLM.from_pretrained(\n model_path, output_loading_info=True, **kwargs)\n model.resize_token_embeddings(len(tokenizer))\n image_processor = model.model.image_processor\n\n if hasattr(model.config, \"max_sequence_length\"):\n context_len = model.config.max_sequence_length\n else:\n context_len = 2048\n\n return tokenizer, model, image_processor, context_len" }, { "identifier": "build_conversation", "path": "model/utils.py", "snippet": "def build_conversation():\n conversation = Conversation(\n system=\n \"A chat between a curious human and an artificial intelligence assistant. \"\n \"The assistant gives helpful, detailed, and polite answers to the human's questions.\",\n roles=[\"USER\", \"ASSISTANT\"],\n version=\"TWO\",\n messages=[],\n offset=0,\n sep_style=SeparatorStyle.TWO,\n sep=\" \",\n sep2=\"</s>\",\n )\n return conversation" }, { "identifier": "CvlmForCausalLM", "path": "model/cvlm.py", "snippet": "class CvlmForCausalLM(LlamaForCausalLM):\n config_class = CvlmConfig\n\n def __init__(self, config):\n super(LlamaForCausalLM, self).__init__(config)\n self.model = CvlmModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size,\n config.vocab_size,\n bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_model(self):\n return self.model\n\n def get_vision_encoder(self):\n return self.model.vision_encoder\n\n def extract_vision_feature(self, images):\n vision_features_outs = self.model.vision_encoder(\n images.to(device=self.device, dtype=self.dtype),\n output_hidden_states=True)\n vision_features = vision_features_outs.hidden_states[-2][:, 1:].to(\n images.dtype)\n vision_features = self.model.adapter(vision_features)\n return vision_features\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n images: Optional[torch.FloatTensor] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (output_hidden_states\n if output_hidden_states is not None else\n self.config.output_hidden_states)\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(\n input_ids, attention_mask, past_key_values, labels, images)\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(input_ids=input_ids,\n attention_mask=attention_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict)\n logits = self.lm_head(outputs[0])\n\n return CausalLMOutputWithPast(\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n **kwargs):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update({\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n \"images\": kwargs.get(\"images\", None),\n })\n return model_inputs\n\n def prepare_inputs_labels_for_multimodal(self, input_ids, attention_mask,\n past_key_values, labels, images):\n vision_encoder = self.get_vision_encoder()\n if vision_encoder is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_encoder is not None and images is not None and input_ids.shape[\n 1] == 1:\n attention_mask = torch.ones(\n (attention_mask.shape[0],\n past_key_values[-1][-1].shape[-2] + 1),\n dtype=attention_mask.dtype,\n device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n vision_features = self.extract_vision_feature(concat_images)\n split_sizes = [image.shape[0] for image in images]\n vision_features = torch.split(vision_features, split_sizes, dim=0)\n vision_features = [x.flatten(0, 1) for x in vision_features]\n else:\n vision_features = self.extract_vision_feature(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n # FIXME: this is a hacky fix, for deepspeed zero3 to work\n half_len = cur_input_ids.shape[0] // 2\n cur_vision_features = vision_features[cur_image_idx]\n cur_input_embeds_1 = self.get_model().embed_tokens(\n cur_input_ids[:half_len])\n cur_input_embeds_2 = self.get_model().embed_tokens(\n cur_input_ids[half_len:])\n cur_input_embeds = torch.cat([\n cur_input_embeds_1, cur_vision_features[0:0],\n cur_input_embeds_2\n ],\n dim=0)\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(\n cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_vision_features = vision_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n cur_new_input_embeds.append(self.get_model().embed_tokens(\n cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_vision_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(\n torch.full((cur_vision_features.shape[0], ),\n IGNORE_INDEX,\n device=labels.device,\n dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start + 1:]\n cur_image_idx += 1\n cur_input_ids = cur_input_ids[image_token_start + 1:]\n image_token_indices = torch.where(\n cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n cur_new_input_embeds.append(\n self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [\n x.to(device=self.device) for x in cur_new_input_embeds\n ]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat(\n (cur_new_embed,\n torch.zeros((max_len - cur_new_embed.shape[0],\n cur_new_embed.shape[1]),\n dtype=cur_new_embed.dtype,\n device=cur_new_embed.device)),\n dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat(\n (cur_new_label,\n torch.full((max_len - cur_new_label.shape[0], ),\n IGNORE_INDEX,\n dtype=cur_new_label.dtype,\n device=cur_new_label.device)),\n dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(\n attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full(\n (cur_new_labels.shape[0] - labels.shape[1], ),\n True,\n dtype=attention_mask.dtype,\n device=attention_mask.device)\n new_attn_mask_pad_right = torch.full(\n (cur_new_labels_align.shape[0] -\n cur_new_labels.shape[0], ),\n False,\n dtype=attention_mask.dtype,\n device=attention_mask.device)\n cur_new_attention_mask = torch.cat(\n (new_attn_mask_pad_left, cur_attention_mask,\n new_attn_mask_pad_right),\n dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full(\n (attention_mask.shape[0],\n new_input_embeds.shape[1] - input_ids.shape[1]),\n True,\n dtype=attention_mask.dtype,\n device=attention_mask.device)\n attention_mask = torch.cat(\n (new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels" } ]
import argparse import torch import requests from PIL import Image from io import BytesIO from transformers import TextStreamer, AutoTokenizer from model.utils import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from model.utils import process_images, tokenizer_image_token, KeywordsStoppingCriteria from model.utils import Conversation, SeparatorStyle from model.utils import load_image, load_pretrained_model, build_conversation from model import CvlmForCausalLM
5,602
def main(args): # Model tokenizer, model, image_processor, context_len = load_pretrained_model( args.model_path, args.load_8bit, args.load_4bit, device=args.device) conv = build_conversation() image = load_image(args.image_file) # Similar operation in model_worker.py
def main(args): # Model tokenizer, model, image_processor, context_len = load_pretrained_model( args.model_path, args.load_8bit, args.load_4bit, device=args.device) conv = build_conversation() image = load_image(args.image_file) # Similar operation in model_worker.py
image_tensor = process_images([image], image_processor, args)
4
2023-11-10 03:52:46+00:00
8k
vvvm23/TchAIkovsky
model/model.py
[ { "identifier": "MultiheadAttention", "path": "model/attention.py", "snippet": "class MultiheadAttention(Module):\n r\"\"\"\n Computes\n\n $$\\text{MultiheadAttention}(Q, K, V)\n = \\sum_i \\text{Attention}\\left(QW^Q_i, KW^K_i, VW^V_i\\right)W^O_i$$\n\n where:\n\n - The inputs are\n $Q \\in \\mathbb{R}^{d_\\text{seq} \\times d_\\text{query}}$,\n $K \\in \\mathbb{R}^{d_\\text{seq} \\times d_\\text{key}}$,\n $V \\in \\mathbb{R}^{d_\\text{seq} \\times d_\\text{value}}$.\n These are referred to as query, key, and value respectively. Meanwhile\n $d_\\text{seq}$ is the sequence length, and $d_\\text{query}$, $d_\\text{key}$,\n $d_\\text{value}$ are numbers of channels.\n\n - The trainable weights are\n $W^Q_i \\in \\mathbb{R}^{d_\\text{query} \\times d_\\text{qk}}$,\n $W^K_i \\in \\mathbb{R}^{d_\\text{key} \\times d_\\text{qk}}$,\n $W^V_i \\in \\mathbb{R}^{d_\\text{value} \\times d_\\text{vo}}$,\n $W^O_i \\in \\mathbb{R}^{d_\\text{vo} \\times d_\\text{output}}$,\n with $i \\in \\{1, \\ldots, h\\}$, where $h$ is the number of heads, and $d_\\text{qk}$,\n $d_\\text{vo}$, $d_\\text{output}$ are hyperparameters.\n\n - $\\text{Attention}$ is defined as\n $\\text{Attention}(\\widetilde{Q}, \\widetilde{K}, \\widetilde{V})\n = \\text{softmax}(\\frac{\\widetilde{Q}\\widetilde{K}^\\intercal}\n {\\sqrt{d_\\text{qk}}})\\widetilde{V}$.\n\n ??? cite\n\n [Attention is All You Need](https://arxiv.org/abs/1706.03762)\n\n ```bibtex\n @inproceedings{vaswani2017attention,\n author={Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and\n Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and\n Kaiser, {\\L}ukasz and Polosukhin, Illia},\n booktitle={Advances in Neural Information Processing Systems},\n publisher={Curran Associates, Inc.},\n title={Attention is All You Need},\n volume={30},\n year={2017}\n }\n ```\n\n !!! faq \"FAQ\"\n\n Different software libraries often implement multihead attention in slightly\n different ways. Some of them will or won't add on biases by default. Most of\n them will fix the values of $d_\\text{qk}, d_\\text{vo}, d_\\text{output}$ in\n terms of $d_\\text{query}$ or $d_\\text{key}$ or $d_\\text{value}$. Equinox\n chooses to expose all of these as options.\n\n Relative to the original\n [Attention is All You Need](https://arxiv.org/abs/1706.03762) paper: our\n $d_\\text{qk}$ is their \"$d_k$\". Our $d_\\text{vo}$ is their \"$d_\\text{v}$\". They\n fix $d_\\text{query} = d_\\text{key} = d_\\text{value} = d_\\text{output}$ and\n refer to it as \"$d_\\text{model}$\".\n \"\"\"\n\n query_proj: Linear\n key_proj: Linear\n value_proj: Linear\n output_proj: Linear\n dropout: Dropout\n\n num_heads: int = field(static=True)\n query_size: int = field(static=True)\n key_size: int = field(static=True)\n value_size: int = field(static=True)\n output_size: int = field(static=True)\n qk_size: int = field(static=True)\n vo_size: int = field(static=True)\n use_query_bias: bool = field(static=True)\n use_key_bias: bool = field(static=True)\n use_value_bias: bool = field(static=True)\n use_output_bias: bool = field(static=True)\n dtype: jnp.dtype = field(static=True)\n\n def __init__(\n self,\n num_heads: int,\n query_size: int,\n key_size: Optional[int] = None,\n value_size: Optional[int] = None,\n output_size: Optional[int] = None,\n qk_size: Optional[int] = None,\n vo_size: Optional[int] = None,\n use_query_bias: bool = False,\n use_key_bias: bool = False,\n use_value_bias: bool = False,\n use_output_bias: bool = False,\n dropout_p: float = 0.0,\n inference: bool = False,\n dtype: jnp.dtype = jnp.float32, # softmax is always fixed to f32\n *,\n key: PRNGKeyArray,\n **kwargs,\n ):\n r\"\"\"**Arguments:**\n\n - `num_heads`: Number of parallel attention heads $h$.\n - `query_size`: Number of input channels for query $Q$.\n - `key_size`: Number of input channels for key $K$. Defaults to `query_size`.\n - `value_size`: Number of input channels for value $V$. Defaults to\n `query_size`.\n - `output_size`: Number of output channels. Defaults to `query_size`.\n - `qk_size`: Number of channels to compare query and key over, per head.\n Defaults to `query_size // num_heads`.\n - `vo_size`: Number of channels to compare attention-weighted value and output\n over, per head. Defaults to `query_size // num_heads`.\n - `use_query_bias`: Whether to use a bias term in the query projections.\n - `use_key_bias`: Whether to use a bias term in the key projections.\n - `use_value_bias`: Whether to use a bias term in the value projections.\n - `use_output_bias`: Whether to use a bias term in the output projection.\n - `dropout_p`: Dropout probability on attention weights.\n - `inference`: Whether to actually apply dropout at all. If `True` then dropout\n is not applied. If `False` then dropout is applied. This may be toggled\n with [`equinox.nn.inference_mode`][] or overridden during\n [`equinox.nn.MultiheadAttention.__call__`][].\n - `key`: A `jax.random.PRNGKey` used to provide randomness for parameter\n initialisation. (Keyword only argument.)\n \"\"\"\n super().__init__(**kwargs)\n qkey, kkey, vkey, okey = jrandom.split(key, 4)\n\n if key_size is None:\n key_size = query_size\n if value_size is None:\n value_size = query_size\n if qk_size is None:\n qk_size = query_size // num_heads\n if vo_size is None:\n vo_size = query_size // num_heads\n if output_size is None:\n output_size = query_size\n\n self.query_proj = Linear(\n query_size, num_heads * qk_size, use_bias=use_query_bias, key=qkey\n )\n self.key_proj = Linear(\n key_size, num_heads * qk_size, use_bias=use_key_bias, key=kkey\n )\n self.value_proj = Linear(\n value_size, num_heads * vo_size, use_bias=use_value_bias, key=vkey\n )\n self.output_proj = Linear(\n num_heads * vo_size, output_size, use_bias=use_output_bias, key=okey\n )\n self.dropout = Dropout(dropout_p, inference=inference)\n\n self.num_heads = num_heads\n self.query_size = query_size\n self.key_size = key_size\n self.value_size = value_size\n self.output_size = output_size\n self.qk_size = qk_size\n self.vo_size = vo_size\n self.use_query_bias = use_query_bias\n self.use_key_bias = use_key_bias\n self.use_value_bias = use_value_bias\n self.use_output_bias = use_output_bias\n self.dtype = dtype\n\n @jax.named_scope(\"eqx.nn.MultiheadAttention\")\n def __call__(\n self,\n query: Float[Array, \"q_seq q_size\"],\n key_: Float[Array, \"kv_seq k_size\"],\n value: Float[Array, \"kv_seq v_size\"],\n mask: Union[\n None, Bool[Array, \"q_seq kv_seq\"], Bool[Array, \"num_heads q_seq kv_seq\"]\n ] = None,\n *,\n key: Optional[PRNGKeyArray] = None,\n inference: Optional[bool] = None,\n deterministic: Optional[bool] = None,\n ) -> Float[Array, \"q_seq o_size\"]:\n \"\"\"**Arguments:**\n\n - `query`: Query embedding. Should be a JAX array of shape\n `(query_seq_length, query_size)`.\n - `key_`: Key embedding. Should be a JAX array of shape\n `(kv_seq_length, key_size)`.\n - `value`: Value embedding. Should be a JAX array of shape\n `(kv_seq_length, value_size)`.\n - `mask`: Optional mask preventing attention to certain positions. Should either\n be a JAX array of shape `(query_seq_length, kv_seq_length)`, or (for custom\n per-head masking) `(num_heads, query_seq_length, kv_seq_length)`. A value of\n `False` at a position indicates that position should be ignored.\n - `key`: A `jax.random.PRNGKey` used for dropout. Unused if `dropout = 0`.\n (Keyword only argument.)\n - `inference`: As [`equinox.nn.Dropout.__call__`][]. (Keyword only\n argument.)\n - `deterministic`: (Deprecated in favour of `inference`.)\n\n **Returns:**\n\n A JAX array of shape `(query_seq_length, output_size)`.\n \"\"\"\n\n if deterministic is not None:\n inference = deterministic\n warnings.warn(\n \"MultiheadAttention()(deterministic=...) is deprecated \"\n \"in favour of MultiheadAttention()(inference=...)\"\n )\n\n query_seq_length, _ = query.shape\n kv_seq_length, _ = key_.shape\n kv_seq_length2, _ = value.shape\n if kv_seq_length != kv_seq_length2:\n # query length can be different\n raise ValueError(\"key and value must both be sequences of equal length.\")\n\n # query, key_, value = query.astype(self.dtype), key_.astype(self.dtype), value.astype(self.dtype)\n query_heads = self._project(self.query_proj, query)\n key_heads = self._project(self.key_proj, key_)\n value_heads = self._project(self.value_proj, value)\n\n query_heads = query_heads.astype(jnp.float32)\n key_heads = key_heads.astype(jnp.float32)\n value_heads = value_heads.astype(jnp.float32)\n\n attn_fn = partial(\n dot_product_attention, dropout=self.dropout, inference=inference\n )\n keys = None if key is None else jax.random.split(key, query_heads.shape[1])\n if mask is not None and mask.ndim == 3:\n # Batch `mask` and `keys` down their 0-th dimension.\n attn = jax.vmap(attn_fn, in_axes=1, out_axes=1)(\n query_heads, key_heads, value_heads, mask=mask, key=keys\n )\n else:\n # Batch `keys` down its 0-th dimension.\n attn = jax.vmap(ft.partial(attn_fn, mask=mask), in_axes=1, out_axes=1)(\n query_heads, key_heads, value_heads, key=keys\n )\n attn = attn.reshape(query_seq_length, -1)\n attn = attn.astype(self.dtype)\n\n return jax.vmap(self.output_proj)(attn)\n\n def _project(self, proj, x):\n seq_length, _ = x.shape\n projection = jax.vmap(proj)(x)\n return projection.reshape(seq_length, self.num_heads, -1)" }, { "identifier": "make_attention_mask", "path": "model/utils.py", "snippet": "def make_attention_mask(\n query_input,\n key_input,\n pairwise_fn: Callable[..., Any] = jnp.multiply,\n extra_batch_dims: int = 0,\n dtype=jnp.float32,\n):\n \"\"\"Mask-making helper for attention weights.\n\n In case of 1d inputs (i.e., `[batch..., len_q]`, `[batch..., len_kv]`, the\n attention weights will be `[batch..., heads, len_q, len_kv]` and this\n function will produce `[batch..., 1, len_q, len_kv]`.\n\n Args:\n query_input: a batched, flat input of query_length size\n key_input: a batched, flat input of key_length size\n pairwise_fn: broadcasting elementwise comparison function\n extra_batch_dims: number of extra batch dims to add singleton axes for, none\n by default\n dtype: mask return dtype\n\n Returns:\n A `[batch..., 1, len_q, len_kv]` shaped mask for 1d attention.\n \"\"\"\n mask = pairwise_fn(\n jnp.expand_dims(query_input, axis=-1), jnp.expand_dims(key_input, axis=-2)\n )\n mask = jnp.expand_dims(mask, axis=-3)\n mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims)))\n return mask.astype(dtype)" }, { "identifier": "make_causal_mask", "path": "model/utils.py", "snippet": "def make_causal_mask(x, extra_batch_dims: int = 0, dtype=jnp.float32):\n \"\"\"Make a causal mask for self-attention.\n\n In case of 1d inputs (i.e., `[batch..., len]`, the self-attention weights\n will be `[batch..., heads, len, len]` and this function will produce a\n causal mask of shape `[batch..., 1, len, len]`.\n\n Args:\n x: input array of shape `[batch..., len]`\n extra_batch_dims: number of batch dims to add singleton axes for, none by\n default\n dtype: mask return dtype\n\n Returns:\n A `[batch..., 1, len, len]` shaped causal mask for 1d attention.\n \"\"\"\n idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape)\n return make_attention_mask(\n idxs,\n idxs,\n jnp.greater_equal,\n extra_batch_dims=extra_batch_dims,\n dtype=dtype,\n )" } ]
from typing import List, Optional from jax.random import PRNGKey from .attention import MultiheadAttention from .utils import make_attention_mask, make_causal_mask import equinox as eqx import jax import jax.numpy as jnp
4,580
norm2: eqx.Module dtype: jnp.dtype = eqx.field(static=True) def __init__( self, key: PRNGKey, dim: int, num_heads: int, mult: int = 4, head_dim: Optional[int] = None, dropout: float = 0.0, dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype attn_key, fc_key = jax.random.split(key) if head_dim is None: assert dim % num_heads == 0 head_dim = dim // num_heads self.attn = MultiheadAttention( num_heads=num_heads, query_size=head_dim * num_heads, output_size=dim, use_output_bias=True, dropout_p=dropout, key=attn_key, dtype=self.dtype, ) self.norm1 = eqx.nn.LayerNorm(dim) self.fc = eqx.nn.MLP( dim, dim, width_size=dim * mult, depth=1, activation=jax.nn.silu, key=fc_key, ) self.norm2 = eqx.nn.LayerNorm(dim) def __call__(self, x, mask, key=None): x = x.astype(self.dtype) attn_norm = jax.vmap(self.norm1)(x) attn_output = self.attn(attn_norm, attn_norm, attn_norm, mask, key=key, inference=key is None) fc_output = jax.vmap(self.fc)(jax.vmap(self.norm2)(x)) return x + attn_output + fc_output class Decoder(eqx.Module): layers: List[eqx.Module] def __init__( self, key: PRNGKey, dim: int, num_heads: int, num_layers: int, head_dim: Optional[int] = None, dropout: float = 0.0, dtype: jnp.dtype = jnp.float32, ): keys = jax.random.split(key, num_layers) self.layers = [DecoderLayer(k, dim, num_heads, head_dim=head_dim, dropout=dropout, dtype=dtype) for k in keys] def __call__(self, x, mask, key=None): for layer in self.layers: key, subkey = jax.random.split(key) if (key is not None) else (None, None) x = layer(x, mask, subkey) return x class TchAIkovskyModel(eqx.Module): id_embeddings: eqx.Module pos_embeddings: eqx.Module decoder: eqx.Module norm_out: eqx.Module out_head: eqx.Module dtype: jnp.dtype = eqx.field(static=True) output_dtype: jnp.dtype = eqx.field(static=True) def __init__( self, dim: int, num_heads: int, num_layers: int, vocab_size: int, max_positions: int, head_dim: Optional[int] = None, dropout: float = 0.0, key: PRNGKey = None, dtype: jnp.dtype = jnp.float32, output_dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype self.output_dtype = output_dtype id_embeddings_key, pos_embeddings_key, decoder_key, out_key = jax.random.split(key, 4) self.id_embeddings = eqx.nn.Embedding(vocab_size, dim, key=id_embeddings_key) self.pos_embeddings = eqx.nn.Embedding(max_positions, dim, key=pos_embeddings_key) self.decoder = Decoder( decoder_key, dim, num_heads, num_layers, head_dim=head_dim, dropout=dropout, dtype=dtype, ) self.norm_out = eqx.nn.LayerNorm(dim) self.out_head = eqx.nn.Linear(dim, vocab_size, use_bias=True, key=out_key) def __call__(self, input_ids, position_ids, mask, key=None):
class DecoderLayer(eqx.Module): attn: eqx.Module fc: eqx.Module norm1: eqx.Module norm2: eqx.Module dtype: jnp.dtype = eqx.field(static=True) def __init__( self, key: PRNGKey, dim: int, num_heads: int, mult: int = 4, head_dim: Optional[int] = None, dropout: float = 0.0, dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype attn_key, fc_key = jax.random.split(key) if head_dim is None: assert dim % num_heads == 0 head_dim = dim // num_heads self.attn = MultiheadAttention( num_heads=num_heads, query_size=head_dim * num_heads, output_size=dim, use_output_bias=True, dropout_p=dropout, key=attn_key, dtype=self.dtype, ) self.norm1 = eqx.nn.LayerNorm(dim) self.fc = eqx.nn.MLP( dim, dim, width_size=dim * mult, depth=1, activation=jax.nn.silu, key=fc_key, ) self.norm2 = eqx.nn.LayerNorm(dim) def __call__(self, x, mask, key=None): x = x.astype(self.dtype) attn_norm = jax.vmap(self.norm1)(x) attn_output = self.attn(attn_norm, attn_norm, attn_norm, mask, key=key, inference=key is None) fc_output = jax.vmap(self.fc)(jax.vmap(self.norm2)(x)) return x + attn_output + fc_output class Decoder(eqx.Module): layers: List[eqx.Module] def __init__( self, key: PRNGKey, dim: int, num_heads: int, num_layers: int, head_dim: Optional[int] = None, dropout: float = 0.0, dtype: jnp.dtype = jnp.float32, ): keys = jax.random.split(key, num_layers) self.layers = [DecoderLayer(k, dim, num_heads, head_dim=head_dim, dropout=dropout, dtype=dtype) for k in keys] def __call__(self, x, mask, key=None): for layer in self.layers: key, subkey = jax.random.split(key) if (key is not None) else (None, None) x = layer(x, mask, subkey) return x class TchAIkovskyModel(eqx.Module): id_embeddings: eqx.Module pos_embeddings: eqx.Module decoder: eqx.Module norm_out: eqx.Module out_head: eqx.Module dtype: jnp.dtype = eqx.field(static=True) output_dtype: jnp.dtype = eqx.field(static=True) def __init__( self, dim: int, num_heads: int, num_layers: int, vocab_size: int, max_positions: int, head_dim: Optional[int] = None, dropout: float = 0.0, key: PRNGKey = None, dtype: jnp.dtype = jnp.float32, output_dtype: jnp.dtype = jnp.float32, ): self.dtype = dtype self.output_dtype = output_dtype id_embeddings_key, pos_embeddings_key, decoder_key, out_key = jax.random.split(key, 4) self.id_embeddings = eqx.nn.Embedding(vocab_size, dim, key=id_embeddings_key) self.pos_embeddings = eqx.nn.Embedding(max_positions, dim, key=pos_embeddings_key) self.decoder = Decoder( decoder_key, dim, num_heads, num_layers, head_dim=head_dim, dropout=dropout, dtype=dtype, ) self.norm_out = eqx.nn.LayerNorm(dim) self.out_head = eqx.nn.Linear(dim, vocab_size, use_bias=True, key=out_key) def __call__(self, input_ids, position_ids, mask, key=None):
causal_mask = make_causal_mask(input_ids)[0]
2
2023-11-13 07:31:30+00:00
8k
LiquidFun/aoc_tiles
aoc_tiles/drawer.py
[ { "identifier": "color_similarity", "path": "aoc_tiles/colors.py", "snippet": "def color_similarity(color_a, color_b, threshold):\n return abs(luminance(color_a) - luminance(color_b)) < threshold" }, { "identifier": "darker_color", "path": "aoc_tiles/colors.py", "snippet": "def darker_color(c: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:\n return c[0] - 10, c[1] - 10, c[2] - 10, 255" }, { "identifier": "extension_to_colors", "path": "aoc_tiles/colors.py", "snippet": "@lru_cache\ndef extension_to_colors() -> Dict[str, str]:\n extension_to_color = {}\n with open(GITHUB_LANGUAGES_PATH) as file:\n logger.debug(\"Loading github_languages.yaml from {}\", GITHUB_LANGUAGES_PATH)\n yaml_loader = yaml.CLoader if yaml.__with_libyaml__ else yaml.Loader\n if not yaml.__with_libyaml__:\n logger.warning(\"Using slow yaml parser (0.5s vs 0.1s)!\")\n github_languages = yaml.load(file, Loader=yaml_loader)\n logger.debug(\"Loaded github_languages.yaml from {}\", GITHUB_LANGUAGES_PATH)\n for language, data in github_languages.items():\n if \"color\" in data and \"extensions\" in data and data[\"type\"] == \"programming\" and language not in excludes:\n for extension in data[\"extensions\"]:\n extension_to_color[extension.lower()] = data[\"color\"]\n\n extension_to_color.update(includes)\n\n return extension_to_color" }, { "identifier": "Config", "path": "aoc_tiles/config.py", "snippet": "class Config:\n aoc_dir: Union[str, Path] = field(default=\"./\", metadata={\"help\": \"Path to the AoC directory.\", \"type\": str})\n readme_path: Union[str, Path] = field(init=False)\n session_cookie_path: Union[str, Path] = field(init=False)\n aoc_tiles_dir: Union[str, Path] = field(init=False)\n image_dir: Union[str, Path] = field(init=False)\n cache_dir: Union[str, Path] = field(init=False)\n\n verbose: bool = field(default=False, metadata={\"help\": \"Whether to print debug information.\"})\n\n what_to_show_on_right_side: Literal[\"auto\", \"checkmark\", \"time_and_rank\", \"loc\"] = field(\n default=\"auto\", metadata={\n \"help\": \"What information to display on the right side of each tile. \"\n \"'checkmark' only displays a checkmark for each part if the day is solved. \"\n \"'time_and_rank' displays the time and rank on the global leaderboard (requires session.cookie). \"\n \"'loc' displays the number of lines of code of the solution (not implemented). \"\n \"'auto' will use 'time_and_rank' if session.cookie exists, otherwise 'checkmark'.\"}\n )\n count_as_solved_when: Literal[\"auto\", \"on_leaderboard\", \"file_exists\", \"either\", \"both\"] = field(\n default=\"auto\",\n metadata={\n \"help\": \"Condition to count a task as solved. Note that 'on_leaderboard', 'either' and 'both' require a \"\n \"session cookie. 'auto' will use 'both' if session.cookie exists, otherwise 'file_exists'.\"\n },\n )\n language_sorting: List[str] = field(\n default_factory=list,\n metadata={\n \"help\": \"Preferred language extensions order for sorting. For example 'py,rs,js' will make Python \"\n \"solutions appear first, then Rust, then JavaScript, then everything else (alphabetically).\"\n },\n )\n create_all_days: bool = field(default=False, metadata={\"help\": \"Whether to create entries for all days upfront.\"})\n\n auto_add_tiles_to_git: Literal[\"no\", \"add\", \"amend\"] = field(default=\"no\", metadata={\n \"help\": \"Whether to automatically add the tile images to git. 'add' will add new files, 'amend' will add \"\n \"and amend the commit with the new files. 'no' will not add the files to git.\"})\n only_use_solutions_in_git: bool = field(default=True, metadata={\n \"help\": \"If true, only solutions will be considered which are tracked by git (git added), \"\n \"otherwise all solutions will be used. This is useful for example to ignore auto-generated\"\n \"files, like '.d' in Rust or '.o' files in C++.\"})\n\n show_total_stars_for_all_years: Literal[\"no\", \"auto\", \"yes\"] = field(default=\"auto\", metadata={\n \"help\": \"Whether to add an additional header in front which shows the total collected stars for all years.\"\n \"'auto' will only show the header if you have stars in at least 3 separate years. \"\n \"'yes' will always show the header. 'no' will never show the header.\"})\n\n year_pattern: str = field(\n default=r\"(?<!\\d)(20[123]\\d)(?!\\d)\",\n metadata={\n \"help\": \"Regex pattern for matching years. This extracts the first group as the year and parses it as an \"\n \"integer. Make sure that other numbers are not matched by this pattern! For example, \"\n \"using negative lookbehind and lookaheads is encouraged to avoid matching longer numbers!\"\n },\n )\n day_pattern: str = field(\n default=r\"(?<!\\d)([012]?\\d)(?!\\d)\", metadata={\"help\": \"Regex pattern for matching days. Same as year_pattern.\"}\n )\n exclude_patterns: List[str] = field(\n default_factory=list, metadata={\n \"help\": \"A list of comma separated glob patterns to ignore when looking for solutions. \"\n \"Listing the paths works too. \"\n \"For example: '*.py,*.js', '2023/05/05.c' or '2021/**.py'.\"\n \"Make sure to escape the patterns with single quotes when running from the shell! \"\n \"Do NOT escape them when using the flag in the yaml! \"\n \"Otherwise the qoute will be part of the pattern.\"\n }\n )\n\n overwrite_year: int = field(\n default=None,\n metadata={\n \"help\": \"If your repository only contains a single year and it cannot be parsed from the path, then you \"\n \"should use this to overwrite the year. Every solution is presumed to be for this year.\"\n },\n )\n\n contrast_improvement_type: Literal[\"none\", \"outline\", \"dark\"] = field(\n default=\"outline\",\n metadata={\n \"help\": \"Some languages have very light colors and are hard to see with a white font. Here you can choose \"\n \"how the text color changes when the background is too light. 'dark' makes the font dark, \"\n \"'outline' adds a black outline.\"\n },\n )\n contrast_improvement_threshold: int = field(\n default=30, metadata={\"help\": \"Threshold for contrast improvement feature (between 0 and 255).\"}\n )\n outline_color: Union[str, Tuple] = field(\n default=\"#6C6A6A\", metadata={\"help\": \"Color used for outlining elements.\", \"type\": str}\n )\n not_completed_color: Union[str, Tuple] = field(\n default=\"#333333\", metadata={\"help\": \"Color to signify incomplete tasks.\", \"type\": str}\n )\n top100_color: Union[str, Tuple] = field(\n default=\"#ffdd00\", metadata={\"help\": \"Color to highlight top 100 ranking. Only used if session\"\n \"cookie is provided.\", \"type\": str}\n )\n text_color: Union[str, Tuple] = field(default=\"#FFFFFF\", metadata={\"help\": \"Text color.\", \"type\": str})\n\n tile_width_px: str = field(default=\"161px\", metadata={\"help\": \"Width of tiles in pixels. You likely don't need\"\n \"to change this.\"})\n\n def __post_init__(self):\n self.aoc_dir = Path(self.aoc_dir)\n\n if not hasattr(self, \"readme_path\"):\n readmes = [path for path in self.aoc_dir.iterdir() if path.name.lower() == \"readme.md\"]\n if len(readmes) == 0:\n exit(f\"[ERROR] No README.md found in the root directory of the repository '{self.aoc_dir}'.\")\n elif len(readmes) > 1:\n exit(f\"[ERROR] Multiple README.md files found in the root directory of the repository {readmes}.\")\n self.readme_path = readmes[0]\n\n if not hasattr(self, \"aoc_tiles_dir\"):\n self.aoc_tiles_dir = self.aoc_dir / \".aoc_tiles\"\n self.aoc_tiles_dir.mkdir(exist_ok=True)\n\n self.running_lock_path = self.aoc_tiles_dir / \"running.lock\"\n\n if not hasattr(self, \"session_cookie_path\"):\n self.session_cookie_path = self.aoc_tiles_dir / \"session.cookie\"\n if not self.session_cookie_path.exists():\n self.session_cookie_path = self.aoc_dir / \"session.cookie\"\n\n if not hasattr(self, \"image_dir\"):\n self.image_dir = self.aoc_tiles_dir / \"tiles\"\n\n if not hasattr(self, \"cache_dir\"):\n self.cache_dir = self.aoc_tiles_dir / \"cache\"\n\n if self.count_as_solved_when == \"auto\":\n self.count_as_solved_when = \"both\" if self.session_cookie_path.exists() else \"file_exists\"\n\n if self.what_to_show_on_right_side == \"auto\":\n self.what_to_show_on_right_side = \"time_and_rank\" if self.session_cookie_path.exists() else \"checkmark\"\n\n self.outline_color = ImageColor.getrgb(self.outline_color)\n self.not_completed_color = ImageColor.getrgb(self.not_completed_color)\n self.text_color = ImageColor.getrgb(self.text_color)\n self.top100_color = ImageColor.getrgb(self.top100_color)\n\n for i, suffix in enumerate(self.language_sorting):\n if not suffix.startswith(\".\"):\n self.language_sorting[i] = \".\" + suffix\n\n logger.remove()\n if self.verbose:\n logger.add(sys.stderr, level=\"DEBUG\")\n\n logger.debug(self)" }, { "identifier": "main_font", "path": "aoc_tiles/fonts.py", "snippet": "def main_font(size: int) -> ImageFont:\n return get_font(size, FONTS_PATH / \"PaytoneOne.ttf\")" }, { "identifier": "secondary_font", "path": "aoc_tiles/fonts.py", "snippet": "def secondary_font(size: int) -> ImageFont:\n return get_font(size, FONTS_PATH / \"SourceCodePro-Regular.otf\")" }, { "identifier": "DayScores", "path": "aoc_tiles/leaderboard.py", "snippet": "class DayScores:\n time1: Union[str, None] = None\n rank1: Union[str, None] = None\n score1: Union[str, None] = None\n time2: Union[str, None] = None\n rank2: Union[str, None] = None\n score2: Union[str, None] = None" } ]
import math from functools import partial from pathlib import Path from typing import List, Tuple, Union, Dict from PIL import ImageColor, Image from PIL.ImageDraw import ImageDraw from aoc_tiles.colors import color_similarity, darker_color, extension_to_colors from aoc_tiles.config import Config from aoc_tiles.fonts import main_font, secondary_font from aoc_tiles.leaderboard import DayScores
3,710
def format_time(time: str) -> str: """Formats time as mm:ss if the time is below 1 hour, otherwise it returns >1h to a max of >24h >>> format_time("00:58:32") '58:32' >>> format_time(">1h") ' >1h' """ time = time.replace("&gt;", ">") if ">" in time: formatted = time else: h, m, s = time.split(":") formatted = f">{h}h" if int(h) >= 1 else f"{m:02}:{s:02}" return f"{formatted:>5}" class TileDrawer: def __init__(self, config: Config): self.config = config def draw_tile( self, day: str, languages: List[str], day_scores: Union[DayScores, None], path: Path, stars: int ): """Saves a graphic for a given day and year. Returns the path to it.""" image = self.get_alternating_background(languages, stars == 2) drawer = ImageDraw(image) text_kwargs = {"fill": self.config.text_color} # Get all colors of the day, check if any one is similar to TEXT_COLOR # If yes, add outline for language in languages: color = ImageColor.getrgb(extension_to_colors()[language]) if color_similarity(color, self.config.text_color, self.config.contrast_improvement_threshold): if "outline" in self.config.contrast_improvement_type: text_kwargs["stroke_width"] = 1 text_kwargs["stroke_fill"] = self.config.outline_color if "dark" in self.config.contrast_improvement_type: text_kwargs["fill"] = self.config.not_completed_color break draw_text = lambda *args, **kwargs: drawer.text(*args, **kwargs, **text_kwargs) draw_line = partial(drawer.line, fill=text_kwargs["fill"], width=2) # === Left side === draw_text((3, -5), "Day", align="left", font=main_font(20)) draw_text((1, -10), str(day), align="center", font=main_font(75)) # Calculate font size based on number of characters, because it might overflow lang_as_str = " ".join(languages) lang_font_size = max(6, int(18 - max(0, len(lang_as_str) - 8) * 1.3)) draw_text((0, 74), lang_as_str, align="left", font=secondary_font(lang_font_size)) # === Right side (P1 & P2) === for part in (1, 2): y = 50 if part == 2 else 0 time = getattr(day_scores, f"time{part}", None) rank = getattr(day_scores, f"rank{part}", None) color_override = self.config.top100_color if rank and int(rank) <= 100 else self.config.text_color text_kwargs["fill"] = color_override if stars >= part: draw_text((104, -5 + y), f"P{part} ", align="left", font=main_font(25)) if self.config.what_to_show_on_right_side == "checkmark" or day_scores is None: draw_line((160, 35 + y, 150, 25 + y)) draw_line((160, 35 + y, 180, 15 + y)) elif self.config.what_to_show_on_right_side == "time_and_rank": draw_text((105, 25 + y), "time", align="right", font=secondary_font(10)) draw_text((105, 35 + y), "rank", align="right", font=secondary_font(10)) draw_text((143, 3 + y), format_time(time), align="right", font=secondary_font(18)) draw_text((133, 23 + y), f"{rank:>6}", align="right", font=secondary_font(18)) elif self.config.what_to_show_on_right_side == "loc": raise NotImplementedError("loc is not implemented yet") else: # Draw cross draw_line((140, 15 + y, 160, 35 + y)) draw_line((140, 35 + y, 160, 15 + y)) if day_scores is None and not languages: draw_line((15, 85, 85, 85)) # === Divider lines === draw_line((100, 5, 100, 95), width=1) draw_line((105, 50, 195, 50), width=1) image.save(path) def get_alternating_background(self, languages, both_parts_completed=True, *, stripe_width=20): colors = [ImageColor.getrgb(extension_to_colors()[language]) for language in languages] if len(colors) == 1:
def format_time(time: str) -> str: """Formats time as mm:ss if the time is below 1 hour, otherwise it returns >1h to a max of >24h >>> format_time("00:58:32") '58:32' >>> format_time(">1h") ' >1h' """ time = time.replace("&gt;", ">") if ">" in time: formatted = time else: h, m, s = time.split(":") formatted = f">{h}h" if int(h) >= 1 else f"{m:02}:{s:02}" return f"{formatted:>5}" class TileDrawer: def __init__(self, config: Config): self.config = config def draw_tile( self, day: str, languages: List[str], day_scores: Union[DayScores, None], path: Path, stars: int ): """Saves a graphic for a given day and year. Returns the path to it.""" image = self.get_alternating_background(languages, stars == 2) drawer = ImageDraw(image) text_kwargs = {"fill": self.config.text_color} # Get all colors of the day, check if any one is similar to TEXT_COLOR # If yes, add outline for language in languages: color = ImageColor.getrgb(extension_to_colors()[language]) if color_similarity(color, self.config.text_color, self.config.contrast_improvement_threshold): if "outline" in self.config.contrast_improvement_type: text_kwargs["stroke_width"] = 1 text_kwargs["stroke_fill"] = self.config.outline_color if "dark" in self.config.contrast_improvement_type: text_kwargs["fill"] = self.config.not_completed_color break draw_text = lambda *args, **kwargs: drawer.text(*args, **kwargs, **text_kwargs) draw_line = partial(drawer.line, fill=text_kwargs["fill"], width=2) # === Left side === draw_text((3, -5), "Day", align="left", font=main_font(20)) draw_text((1, -10), str(day), align="center", font=main_font(75)) # Calculate font size based on number of characters, because it might overflow lang_as_str = " ".join(languages) lang_font_size = max(6, int(18 - max(0, len(lang_as_str) - 8) * 1.3)) draw_text((0, 74), lang_as_str, align="left", font=secondary_font(lang_font_size)) # === Right side (P1 & P2) === for part in (1, 2): y = 50 if part == 2 else 0 time = getattr(day_scores, f"time{part}", None) rank = getattr(day_scores, f"rank{part}", None) color_override = self.config.top100_color if rank and int(rank) <= 100 else self.config.text_color text_kwargs["fill"] = color_override if stars >= part: draw_text((104, -5 + y), f"P{part} ", align="left", font=main_font(25)) if self.config.what_to_show_on_right_side == "checkmark" or day_scores is None: draw_line((160, 35 + y, 150, 25 + y)) draw_line((160, 35 + y, 180, 15 + y)) elif self.config.what_to_show_on_right_side == "time_and_rank": draw_text((105, 25 + y), "time", align="right", font=secondary_font(10)) draw_text((105, 35 + y), "rank", align="right", font=secondary_font(10)) draw_text((143, 3 + y), format_time(time), align="right", font=secondary_font(18)) draw_text((133, 23 + y), f"{rank:>6}", align="right", font=secondary_font(18)) elif self.config.what_to_show_on_right_side == "loc": raise NotImplementedError("loc is not implemented yet") else: # Draw cross draw_line((140, 15 + y, 160, 35 + y)) draw_line((140, 35 + y, 160, 15 + y)) if day_scores is None and not languages: draw_line((15, 85, 85, 85)) # === Divider lines === draw_line((100, 5, 100, 95), width=1) draw_line((105, 50, 195, 50), width=1) image.save(path) def get_alternating_background(self, languages, both_parts_completed=True, *, stripe_width=20): colors = [ImageColor.getrgb(extension_to_colors()[language]) for language in languages] if len(colors) == 1:
colors.append(darker_color(colors[0]))
1
2023-11-14 21:41:12+00:00
8k
etri-crossmodal/gbswt5
gbswt5/modeling_gbst5.py
[ { "identifier": "GBSWT5Config", "path": "gbswt5/configuration_gbst5.py", "snippet": "class GBSWT5Config(PretrainedConfig):\n \"\"\" Based on models.t5. configuration_t5. T5Config in hf Transformers. \"\"\"\n model_type = \"gbswt5\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n attribute_map = {\"hidden_size\": \"d_model\",\n \"num_attention_heads\": \"num_heads\",\n \"num_hidden_layers\": \"num_layers\"}\n\n def __init__(\n self,\n vocab_size=384,\n d_model=512,\n d_kv=64,\n d_ff=2048,\n num_layers=6,\n num_decoder_layers=None,\n num_heads=8,\n relative_attention_num_buckets=32,\n relative_attention_max_distance=128,\n dropout_rate=0.1,\n layer_norm_epsilon=1e-6,\n initializer_factor=1.0,\n feed_forward_proj=\"relu\",\n is_encoder_decoder=True,\n use_cache=True,\n pad_token_id=0,\n eos_token_id=1,\n max_subword_block_size=None, # GBSWT-related options here from\n subword_blocks=_BLOCKS,\n downsample_factor=1,\n score_consensus_attn=True,\n z_loss=1e-4,\n gbst_batchnorm=False,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.d_model = d_model\n self.d_kv = d_kv\n self.d_ff = d_ff\n self.num_layers = num_layers\n self.num_decoder_layers = (\n num_decoder_layers if num_decoder_layers is not None else self.num_layers\n ) # default = symmetry\n self.num_heads = num_heads\n self.relative_attention_num_buckets = relative_attention_num_buckets\n self.relative_attention_max_distance = relative_attention_max_distance\n self.dropout_rate = dropout_rate\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_factor = initializer_factor\n self.feed_forward_proj = feed_forward_proj\n self.use_cache = use_cache\n\n act_info = self.feed_forward_proj.split(\"-\")\n self.dense_act_fn = act_info[-1]\n self.is_gated_act = act_info[0] == \"gated\"\n\n # GBSWT-related configurations\n self.max_subword_block_size = max_subword_block_size\n self.subword_blocks = subword_blocks\n self.downsample_factor = downsample_factor\n self.score_consensus_attn = score_consensus_attn\n self.gbst_batchnorm = gbst_batchnorm\n\n # z_loss for computational stability.\n # see https://github.com/tensorflow/mesh/blob \\\n # /fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\n # (1) logits이 0으로 부터 너무 멀어지게 드리프팅 되지 않도록 하여, bf16에서 발생하는\n # round-off error를 방지하기 위함. (2) 로짓이 normalized log-probabilities가 되도록 제고한다.\n self.z_loss = z_loss\n\n if self.subword_blocks is not None and isinstance(self.subword_blocks, list):\n for idx, elem in enumerate(self.subword_blocks):\n self.subword_blocks[idx] = tuple(elem)\n self.subword_blocks = tuple(self.subword_blocks)\n\n if len(act_info) > 1 and act_info[0] != \"gated\" or len(act_info) > 2:\n raise ValueError(\n f\"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.\"\n \"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. \"\n \"'gated-gelu' or 'relu'\"\n )\n\n # for backwards compatibility\n if feed_forward_proj == \"gated-gelu\":\n self.dense_act_fn = \"gelu_new\"\n\n super().__init__(\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n is_encoder_decoder=is_encoder_decoder,\n **kwargs,\n )" }, { "identifier": "GBSWT", "path": "gbswt5/gbst.py", "snippet": "class GBSWT(nn.Module):\n \"\"\" Gradient-based Sub-Word Tokenizer implementation. \"\"\"\n def __init__(self, embed_tokens,\n max_block_size=None,\n blocks=_BLOCKS,\n downsample_factor=1,\n score_consensus_attn=True,\n use_bn=False,):\n super().__init__()\n num_tokens, dim = embed_tokens.weight.shape\n\n assert (max_block_size is not None) ^ (blocks is not None), \\\n 'max_block_size or blocks must be given.'\n if blocks is None:\n self.blocks = tuple(map(lambda elem: (elem, 0), range(1, max_block_size+1)))\n else:\n if not isinstance(blocks, tuple):\n raise ValueError('blocks must be assigned as a tuple')\n self.blocks = tuple(map(lambda elem: elem if isinstance(elem, tuple) else (elem, 0), blocks))\n if not all([(offset < block_size) for block_size, offset in self.blocks]):\n raise ValueError('Offset must be smaller than given block size.')\n max_block_size = max(list(map(lambda x: x[0], self.blocks)))\n\n assert downsample_factor <= max_block_size, \\\n 'downsample factor must be less than the max_block_size.'\n\n self.downsample_factor = downsample_factor\n self.score_consensus_attn = score_consensus_attn\n self.use_bn = use_bn\n logger.debug(f\"GBSWT Subword Block Combinations: {self.blocks}\")\n logger.debug(f\"GBSWT Downsampling factor: {self.downsample_factor}, use BatchNorm: {self.use_bn}\")\n\n def lcm(*num):\n return int(functools.reduce(lambda x, y: int((x * y) / math.gcd(x, y)), num, 1))\n\n self.block_pad_multiple = lcm(*[block_size for block_size, _ in self.blocks])\n #print(f\"block_pad_multiple: {self.block_pad_multiple}\")\n\n # layer definition\n self.embeds = embed_tokens\n self.positional_convol = nn.Sequential(\n Padding((0, 0, 0, max_block_size-1)),\n Rearrange('b s d -> b d s'),\n Depthwise1dConv(dim, dim, krnl_size=max_block_size, use_bn=self.use_bn,),\n Rearrange('b d s -> b s d'))\n self.cand_scoring = nn.Sequential(\n nn.Linear(dim, 1),\n Rearrange('... () -> ...'))\n\n def _init_weights(self, factor:float=0.05):\n self.positional_convol[2]._init_weights(factor)\n #print(f\"GBSTW weight initialization called: before: {self.cand_scoring[0].weight.data}\")\n self.cand_scoring[0].weight.data.normal_(mean=0.0, std=factor * 1.0)\n #print(f\"GBSTW weight initialization called: after: {self.cand_scoring[0].weight.data}\")\n\n def get_blocks(self):\n \"\"\" return GBST candidate blocking list. \"\"\"\n return self.blocks\n\n @torch.cuda.amp.autocast()\n def forward(self, in_tensor, attention_mask=None):\n b, s = in_tensor.shape\n #print(f\"initial shape: b, s : {b}, {s}, in_tensor.shape: {in_tensor.shape}\")\n mask = attention_mask\n #print(f\"mask: {mask}\")\n block_multi, ds_factor = self.block_pad_multiple, self.downsample_factor\n\n in_tensor = self.embeds(in_tensor)\n in_tensor = self.positional_convol(in_tensor)\n in_tensor = pad_to_multiple(in_tensor, block_multi,\n seq_dim=1, dim=-2, value=0.0)\n if mask is not None:\n mask = pad_to_multiple(mask, block_multi,\n seq_dim=1, dim=-1, value=False)\n\n def _masked_mean(in_tensor:Tensor, mask:Tensor, dim:int=-1):\n len_diff = len(in_tensor.shape) - len(mask.shape)\n mask = torch.unsqueeze(mask, dim=-len_diff)\n in_tensor.masked_fill_(~(mask.bool()), 0.)\n\n total_elems = mask.sum(dim=dim)\n mean = in_tensor.sum(dim=dim) / total_elems.clamp(min=1.)\n mean.masked_fill_((total_elems == 0), 0.)\n return mean.float()\n\n block_reprs, block_masks = [], []\n\n # 이제 입력 시퀀스를 cloning해서 후보를 세팅\n for block_size, offset in self.blocks:\n block_in = in_tensor.clone()\n if mask is not None:\n block_mask = mask.clone()\n need_padding = offset > 0\n\n if need_padding:\n loff, roff = (block_size - offset), offset\n #print(f\"loff: {loff}, roff: {roff}\")\n block_in = F.pad(block_in, (0, 0, loff, roff), value=0.0)\n if mask is not None:\n block_mask = F.pad(block_mask, (0, 0, loff, roff), value=False)\n\n blks = rearrange(block_in, 'b (s m) d -> b s m d', m=block_size)\n if mask is not None:\n mask_blks = rearrange(block_mask, 'b (s m) -> b s m', m=block_size)\n blk_repr = _masked_mean(blks, mask_blks, dim=-2)\n else:\n blk_repr = blks.mean(dim=-2)\n\n blk_repr = repeat(blk_repr, 'b s d -> b (s m) d', m=block_size)\n\n if need_padding:\n blk_repr = blk_repr[:, loff:-roff]\n\n block_reprs.append(blk_repr)\n\n if mask is not None:\n mask_blks = torch.any(mask_blks, dim=-1)\n mask_blks = repeat(mask_blks, 'b s -> b (s m)', m=block_size)\n if need_padding:\n mask_blks = mask_blks[:, loff:-roff]\n block_masks.append(mask_blks)\n\n # stack them all\n block_reprs = torch.stack(block_reprs, dim=2,)\n scores = self.cand_scoring(block_reprs)\n\n if mask is not None:\n block_masks = torch.stack(block_masks, dim=2)\n max_neg_val = -torch.finfo(scores.dtype).max\n scores = scores.masked_fill(~block_masks, max_neg_val)\n\n scores = scores.softmax(dim=2)\n\n # cheap consensus attention, as equation (5) in paper.\n if self.score_consensus_attn:\n score_sim = einsum('b i d, b j d -> b i j', scores, scores)\n\n if mask is not None:\n cross_mask = rearrange(mask, 'b i -> b i ()') * rearrange(mask, 'b j -> b () j')\n max_neg_val = -torch.finfo(score_sim.dtype).max\n score_sim = score_sim.masked_fill((~(cross_mask.bool())), max_neg_val)\n\n score_attn = score_sim.softmax(dim=-1)\n scores = einsum('b i j, b j m -> b i m', score_attn, scores)\n\n scores = rearrange(scores, 'b n m -> b n m ()')\n in_tensor = (block_reprs * scores).sum(dim=2)\n\n @torch.jit.script\n def _reshape_input_tensor(in_tensor:Tensor, s:int, d:int):\n # get divisible length to pad\n m = int(math.ceil(s / d) * d)\n #print(f\"_reshape_input_tensor: {m}\")\n return in_tensor[:, :m]\n\n in_tensor = _reshape_input_tensor(in_tensor, s, ds_factor)\n if mask is not None:\n mask = _reshape_input_tensor(mask, s, ds_factor)\n\n # downsample with mean pooling\n in_tensor = rearrange(in_tensor, 'b (n m) d -> b n m d', m=ds_factor)\n if mask is not None:\n mask = rearrange(mask, 'b (n m) -> b n m', m=ds_factor)\n in_tensor = _masked_mean(in_tensor, mask, dim=2)\n mask = torch.any(mask, dim=-1)\n else:\n in_tensor = in_tensor.mean(dim=-2)\n\n # tuple을 반환하기 때문에, forward()에서 [0]을 취해 바꿔줘야 한다\n return in_tensor, mask" } ]
import copy import torch from typing import Optional, Union, Tuple from torch import nn from transformers import add_start_docstrings from transformers.utils import logging from transformers.modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from transformers.models.t5.modeling_t5 import ( T5LayerNorm, T5Block, T5Stack, T5Model, T5PreTrainedModel, T5ForConditionalGeneration, T5EncoderModel, T5DenseActDense, T5DenseGatedActDense, T5Attention, T5_START_DOCSTRING ) from .configuration_gbst5 import GBSWT5Config from .gbst import GBSWT
4,348
""" hf transformers-compatible GBST + T5 Model implementation. several methods are copying from huggingface/transformers/models/t5/modeling_t5.py as Implementation Standards for compatibility. (version 4.28.1) hf transformers' modeling_t5.py file is distributed under Apache 2.0 License. Copyright (C) 2023, ETRI LIRS, Jong-hun Shin. """ logger = logging.get_logger(__name__) class GBSWT5PreTrainedModel(T5PreTrainedModel): config_class = GBSWT5Config base_model_prefix = "GBSWT5" is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ["T5Block"] _keep_in_fp32_modules = ["wo"] def _init_weights(self, module): """Initialize the weights. 대부분은 T5PreTrainedModel을 따른다. """ factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, T5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance( module, ( GBSWT5Model, GBSWT5ForConditionalGeneration, GBSWT5EncoderModel,), ): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "qa_outputs"): module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) module.qa_outputs.bias.data.zero_() elif isinstance(module, T5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
""" hf transformers-compatible GBST + T5 Model implementation. several methods are copying from huggingface/transformers/models/t5/modeling_t5.py as Implementation Standards for compatibility. (version 4.28.1) hf transformers' modeling_t5.py file is distributed under Apache 2.0 License. Copyright (C) 2023, ETRI LIRS, Jong-hun Shin. """ logger = logging.get_logger(__name__) class GBSWT5PreTrainedModel(T5PreTrainedModel): config_class = GBSWT5Config base_model_prefix = "GBSWT5" is_parallelizable = True supports_gradient_checkpointing = True _no_split_modules = ["T5Block"] _keep_in_fp32_modules = ["wo"] def _init_weights(self, module): """Initialize the weights. 대부분은 T5PreTrainedModel을 따른다. """ factor = self.config.initializer_factor # Used for testing weights initialization if isinstance(module, T5LayerNorm): module.weight.data.fill_(factor * 1.0) elif isinstance( module, ( GBSWT5Model, GBSWT5ForConditionalGeneration, GBSWT5EncoderModel,), ): # Mesh TensorFlow embeddings initialization # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L1624 module.shared.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "lm_head") and not self.config.tie_word_embeddings: module.lm_head.weight.data.normal_(mean=0.0, std=factor * 1.0) if hasattr(module, "qa_outputs"): module.qa_outputs.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) module.qa_outputs.bias.data.zero_() elif isinstance(module, T5DenseActDense): # Mesh TensorFlow FF initialization # See https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/transformer_layers.py#L56 # and https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L89 module.wi.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi, "bias") and module.wi.bias is not None: module.wi.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5DenseGatedActDense): module.wi_0.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_0, "bias") and module.wi_0.bias is not None: module.wi_0.bias.data.zero_() module.wi_1.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_model) ** -0.5)) if hasattr(module.wi_1, "bias") and module.wi_1.bias is not None: module.wi_1.bias.data.zero_() module.wo.weight.data.normal_(mean=0.0, std=factor * ((self.config.d_ff) ** -0.5)) if hasattr(module.wo, "bias") and module.wo.bias is not None: module.wo.bias.data.zero_() elif isinstance(module, T5Attention): # Mesh TensorFlow attention initialization to avoid scaling before softmax # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/attention.py#L136 d_model = self.config.d_model key_value_proj_dim = self.config.d_kv n_heads = self.config.num_heads module.q.weight.data.normal_(mean=0.0, std=factor * ((d_model * key_value_proj_dim) ** -0.5)) module.k.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.v.weight.data.normal_(mean=0.0, std=factor * (d_model**-0.5)) module.o.weight.data.normal_(mean=0.0, std=factor * ((n_heads * key_value_proj_dim) ** -0.5)) if module.has_relative_attention_bias: module.relative_attention_bias.weight.data.normal_(mean=0.0, std=factor * ((d_model) ** -0.5))
elif isinstance(module, GBSWT):
1
2023-11-17 02:04:46+00:00
8k
dazhangyu123/ACMIL
wsi_core/WholeSlideImage.py
[ { "identifier": "savePatchIter_bag_hdf5", "path": "wsi_core/wsi_utils.py", "snippet": "def savePatchIter_bag_hdf5(patch):\n x, y, cont_idx, patch_level, downsample, downsampled_level_dim, level_dim, img_patch, name, save_path= tuple(patch.values())\n img_patch = np.array(img_patch)[np.newaxis,...]\n img_shape = img_patch.shape\n\n file_path = os.path.join(save_path, name)+'.h5'\n file = h5py.File(file_path, \"a\")\n\n dset = file['imgs']\n dset.resize(len(dset) + img_shape[0], axis=0)\n dset[-img_shape[0]:] = img_patch\n\n if 'coords' in file:\n coord_dset = file['coords']\n coord_dset.resize(len(coord_dset) + img_shape[0], axis=0)\n coord_dset[-img_shape[0]:] = (x,y)\n\n file.close()" }, { "identifier": "initialize_hdf5_bag", "path": "wsi_core/wsi_utils.py", "snippet": "def initialize_hdf5_bag(first_patch, save_coord=False):\n x, y, cont_idx, patch_level, downsample, downsampled_level_dim, level_dim, img_patch, name, save_path = tuple(first_patch.values())\n file_path = os.path.join(save_path, name)+'.h5'\n file = h5py.File(file_path, \"w\")\n img_patch = np.array(img_patch)[np.newaxis,...]\n dtype = img_patch.dtype\n\n # Initialize a resizable dataset to hold the output\n img_shape = img_patch.shape\n maxshape = (None,) + img_shape[1:] #maximum dimensions up to which dataset maybe resized (None means unlimited)\n dset = file.create_dataset('imgs', \n shape=img_shape, maxshape=maxshape, chunks=img_shape, dtype=dtype)\n\n dset[:] = img_patch\n dset.attrs['patch_level'] = patch_level\n dset.attrs['wsi_name'] = name\n dset.attrs['downsample'] = downsample\n dset.attrs['level_dim'] = level_dim\n dset.attrs['downsampled_level_dim'] = downsampled_level_dim\n\n if save_coord:\n coord_dset = file.create_dataset('coords', shape=(1, 2), maxshape=(None, 2), chunks=(1, 2), dtype=np.int32)\n coord_dset[:] = (x,y)\n\n file.close()\n return file_path" }, { "identifier": "coord_generator", "path": "wsi_core/wsi_utils.py", "snippet": "def coord_generator(x_start, x_end, x_step, y_start, y_end, y_step, args_dict=None):\n for x in range(x_start, x_end, x_step):\n for y in range(y_start, y_end, y_step):\n if args_dict is not None:\n process_dict = args_dict.copy()\n process_dict.update({'pt':(x,y)})\n yield process_dict\n else:\n yield (x,y)" }, { "identifier": "save_hdf5", "path": "wsi_core/wsi_utils.py", "snippet": "def save_hdf5(output_path, asset_dict, attr_dict= None, mode='a'):\n file = h5py.File(output_path, mode)\n for key, val in asset_dict.items():\n data_shape = val.shape\n if key not in file:\n data_type = val.dtype\n chunk_shape = (1, ) + data_shape[1:]\n maxshape = (None, ) + data_shape[1:]\n dset = file.create_dataset(key, shape=data_shape, maxshape=maxshape, chunks=chunk_shape, dtype=data_type)\n dset[:] = val\n if attr_dict is not None:\n if key in attr_dict.keys():\n for attr_key, attr_val in attr_dict[key].items():\n dset.attrs[attr_key] = attr_val\n else:\n dset = file[key]\n dset.resize(len(dset) + data_shape[0], axis=0)\n dset[-data_shape[0]:] = val\n file.close()\n return output_path" }, { "identifier": "sample_indices", "path": "wsi_core/wsi_utils.py", "snippet": "def sample_indices(scores, k, start=0.48, end=0.52, convert_to_percentile=False, seed=1):\n np.random.seed(seed)\n if convert_to_percentile:\n end_value = np.quantile(scores, end)\n start_value = np.quantile(scores, start)\n else:\n end_value = end\n start_value = start\n score_window = np.logical_and(scores >= start_value, scores <= end_value)\n indices = np.where(score_window)[0]\n if len(indices) < 1:\n return -1 \n else:\n return np.random.choice(indices, min(k, len(indices)), replace=False)" }, { "identifier": "screen_coords", "path": "wsi_core/wsi_utils.py", "snippet": "def screen_coords(scores, coords, top_left, bot_right):\n bot_right = np.array(bot_right)\n top_left = np.array(top_left)\n mask = np.logical_and(np.all(coords >= top_left, axis=1), np.all(coords <= bot_right, axis=1))\n scores = scores[mask]\n coords = coords[mask]\n return scores, coords" }, { "identifier": "isBlackPatch", "path": "wsi_core/wsi_utils.py", "snippet": "def isBlackPatch(patch, rgbThresh=40):\n return True if np.all(np.mean(patch, axis = (0,1)) < rgbThresh) else False" }, { "identifier": "isWhitePatch", "path": "wsi_core/wsi_utils.py", "snippet": "def isWhitePatch(patch, satThresh=5):\n patch_hsv = cv2.cvtColor(patch, cv2.COLOR_RGB2HSV)\n return True if np.mean(patch_hsv[:,:,1]) < satThresh else False" }, { "identifier": "to_percentiles", "path": "wsi_core/wsi_utils.py", "snippet": "def to_percentiles(scores):\n from scipy.stats import rankdata\n scores = rankdata(scores, 'average')/len(scores) * 100 \n return scores" }, { "identifier": "isInContourV1", "path": "wsi_core/util_classes.py", "snippet": "class isInContourV1(Contour_Checking_fn):\n\tdef __init__(self, contour):\n\t\tself.cont = contour\n\n\tdef __call__(self, pt): \n\t\treturn 1 if cv2.pointPolygonTest(self.cont, pt, False) >= 0 else 0" }, { "identifier": "isInContourV2", "path": "wsi_core/util_classes.py", "snippet": "class isInContourV2(Contour_Checking_fn):\n\tdef __init__(self, contour, patch_size):\n\t\tself.cont = contour\n\t\tself.patch_size = patch_size\n\n\tdef __call__(self, pt): \n\t\treturn 1 if cv2.pointPolygonTest(self.cont, (pt[0]+self.patch_size//2, pt[1]+self.patch_size//2), False) >= 0 else 0" }, { "identifier": "isInContourV3_Easy", "path": "wsi_core/util_classes.py", "snippet": "class isInContourV3_Easy(Contour_Checking_fn):\n\tdef __init__(self, contour, patch_size, center_shift=0.5):\n\t\tself.cont = contour\n\t\tself.patch_size = patch_size\n\t\tself.shift = int(patch_size//2*center_shift)\n\tdef __call__(self, pt): \n\t\tcenter = (pt[0]+self.patch_size//2, pt[1]+self.patch_size//2)\n\t\tif self.shift > 0:\n\t\t\tall_points = [(center[0]-self.shift, center[1]-self.shift),\n\t\t\t\t\t\t (center[0]+self.shift, center[1]+self.shift),\n\t\t\t\t\t\t (center[0]+self.shift, center[1]-self.shift),\n\t\t\t\t\t\t (center[0]-self.shift, center[1]+self.shift)\n\t\t\t\t\t\t ]\n\t\telse:\n\t\t\tall_points = [center]\n\t\t\n\t\tfor points in all_points:\n\t\t\tpoints = (int(points[0]),int(points[1]))\n\t\t\tif cv2.pointPolygonTest(self.cont, points, False) >= 0:\n\t\t\t\treturn 1\n\t\treturn 0" }, { "identifier": "isInContourV3_Hard", "path": "wsi_core/util_classes.py", "snippet": "class isInContourV3_Hard(Contour_Checking_fn):\n\tdef __init__(self, contour, patch_size, center_shift=0.5):\n\t\tself.cont = contour\n\t\tself.patch_size = patch_size\n\t\tself.shift = int(patch_size//2*center_shift)\n\tdef __call__(self, pt): \n\t\tcenter = (pt[0]+self.patch_size//2, pt[1]+self.patch_size//2)\n\t\tif self.shift > 0:\n\t\t\tall_points = [(center[0]-self.shift, center[1]-self.shift),\n\t\t\t\t\t\t (center[0]+self.shift, center[1]+self.shift),\n\t\t\t\t\t\t (center[0]+self.shift, center[1]-self.shift),\n\t\t\t\t\t\t (center[0]-self.shift, center[1]+self.shift)\n\t\t\t\t\t\t ]\n\t\telse:\n\t\t\tall_points = [center]\n\t\t\n\t\tfor points in all_points:\n\t\t\tpoints = (int(points[0]),int(points[1]))\n\t\t\tif cv2.pointPolygonTest(self.cont, points, False) < 0:\n\t\t\t\treturn 0\n\t\treturn 1" }, { "identifier": "Contour_Checking_fn", "path": "wsi_core/util_classes.py", "snippet": "class Contour_Checking_fn(object):\n\t# Defining __call__ method \n\tdef __call__(self, pt): \n\t\traise NotImplementedError" }, { "identifier": "load_pkl", "path": "utils/file_utils.py", "snippet": "def load_pkl(filename):\n\tloader = open(filename,'rb')\n\tfile = pickle.load(loader)\n\tloader.close()\n\treturn file" }, { "identifier": "save_pkl", "path": "utils/file_utils.py", "snippet": "def save_pkl(filename, save_object):\n\twriter = open(filename,'wb')\n\tpickle.dump(save_object, writer)\n\twriter.close()" }, { "identifier": "kfbslide", "path": "wsi_core/KfbSlide/kfbslide.py", "snippet": "class kfbRef:\r\nclass KfbSlide():\r\nclass _KfbPropertyMap(Mapping):\r\nclass _AssociatedImageMap():\r\n def __init__(self, filename):\r\n def __repr__(self):\r\n def detect_format(cls, filename):\r\n def close(self):\r\n def level_count(self):\r\n def level_dimensions(self):\r\n def dimensions(self):\r\n def level_downsamples(self):\r\n def properties(self):\r\n def associated_images(self):\r\n def get_best_level_for_downsample(self, downsample):\r\n def _fixed_read_region(self, location, level, size=(256, 256)):\r\n def get_dzi(self):\r\n def read_region(self, location=[0,0], level=0, size = [256, 256] ):\r\n def _keys(self):\r\n def __getitem__(self, key):\r\n def __init__(self, osr):\r\n def __repr__(self):\r\n def __len__(self):\r\n def __iter__(self):\r\n def __init__(self, osr):\r\n def _keys(self):\r\n def __getitem__(self, key):\r\ndef open_kfbslide(filename):\r\n TILE_SIZE = 256\r" } ]
import math import os import time import xml.etree.ElementTree as ET import multiprocessing as mp import cv2 import matplotlib.pyplot as plt import numpy as np import openslide import pdb import h5py import math import itertools import pickle from xml.dom import minidom from PIL import Image from wsi_core.wsi_utils import savePatchIter_bag_hdf5, initialize_hdf5_bag, coord_generator, save_hdf5, sample_indices, screen_coords, isBlackPatch, isWhitePatch, to_percentiles from wsi_core.util_classes import isInContourV1, isInContourV2, isInContourV3_Easy, isInContourV3_Hard, Contour_Checking_fn from utils.file_utils import load_pkl, save_pkl from skimage.color import rgb2hed, hed2rgb from wsi_core.KfbSlide import kfbslide
5,798
# img_h_gray = 255-cv2.medianBlur(cv2.cvtColor(img_h, cv2.COLOR_BGR2GRAY),mthresh) # # _, img_otsu = cv2.threshold(img_h_gray, sthresh, sthresh_up, cv2.THRESH_BINARY) # otsu_thresh, img_otsu = cv2.threshold(img_h_gray, 0, sthresh_up, cv2.THRESH_OTSU + cv2.THRESH_BINARY) # adjust_thresh = max(sthresh,otsu_thresh-20) # _, img_otsu = cv2.threshold(img_h_gray, adjust_thresh, sthresh_up, cv2.THRESH_BINARY) # img_d = hed2rgb(np.stack((img_hed[:, :, 2], img_hed[:, :, 2], img_hed[:, :, 2]), axis=-1)) # filter this? # Morphological closing if close > 0: kernel = np.ones((close, close), np.uint8) img_otsu = cv2.morphologyEx(img_otsu, cv2.MORPH_CLOSE, kernel) scale = self.level_downsamples[seg_level] scaled_ref_patch_area = int(ref_patch_size**2 / (scale[0] * scale[1])) print('scaled_ref_patch_area',scaled_ref_patch_area) print('ref_patch_size',ref_patch_size) print('scale',scale,'seg_level',seg_level) filter_params = filter_params.copy() filter_params['a_t'] = filter_params['a_t'] * scaled_ref_patch_area filter_params['a_h'] = filter_params['a_h'] * scaled_ref_patch_area # Find and filter contours contours, hierarchy = cv2.findContours(img_otsu, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # Find contours hierarchy = np.squeeze(hierarchy, axis=(0,))[:, 2:] # pdb.set_trace() if filter_params: foreground_contours, hole_contours = _filter_contours(contours, hierarchy, filter_params) # Necessary for filtering out artifacts self.contours_tissue = self.scaleContourDim(foreground_contours, scale) self.holes_tissue = self.scaleHolesDim(hole_contours, scale) #exclude_ids = [0,7,9] if len(keep_ids) > 0: contour_ids = set(keep_ids) - set(exclude_ids) else: contour_ids = set(np.arange(len(self.contours_tissue))) - set(exclude_ids) self.contours_tissue = [self.contours_tissue[i] for i in contour_ids] self.holes_tissue = [self.holes_tissue[i] for i in contour_ids] def visWSI(self, vis_level=0, color = (0,255,0), hole_color = (0,0,255), annot_color=(255,0,0), line_thickness=250, max_size=None, top_left=None, bot_right=None, custom_downsample=1, view_slide_only=False, number_contours=False, seg_display=True, annot_display=True): downsample = self.level_downsamples[vis_level] scale = [1/downsample[0], 1/downsample[1]] # pdb.set_trace() if top_left is not None and bot_right is not None: top_left = tuple(top_left) bot_right = tuple(bot_right) w, h = tuple((np.array(bot_right) * scale).astype(int) - (np.array(top_left) * scale).astype(int)) region_size = (w, h) else: top_left = (0,0) region_size = self.level_dim[vis_level] img = self.wsi.read_region(top_left, vis_level, region_size) try: img = np.array(img.convert("RGB")) except: pass # view_slide_only= True if not view_slide_only: offset = tuple(-(np.array(top_left) * scale).astype(int)) line_thickness = int(line_thickness * math.sqrt(scale[0] * scale[1])) if self.contours_tissue is not None and seg_display: if not number_contours: cv2.drawContours(img, self.scaleContourDim(self.contours_tissue, scale), -1, color, line_thickness, lineType=cv2.LINE_8, offset=offset) else: # add numbering to each contour for idx, cont in enumerate(self.contours_tissue): contour = np.array(self.scaleContourDim(cont, scale)) M = cv2.moments(contour) cX = int(M["m10"] / (M["m00"] + 1e-9)) cY = int(M["m01"] / (M["m00"] + 1e-9)) # draw the contour and put text next to center cv2.drawContours(img, [contour], -1, color, line_thickness, lineType=cv2.LINE_8, offset=offset) cv2.putText(img, "{}".format(idx), (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 10) for holes in self.holes_tissue: cv2.drawContours(img, self.scaleContourDim(holes, scale), -1, hole_color, line_thickness, lineType=cv2.LINE_8) if self.contours_tumor is not None and annot_display: cv2.drawContours(img, self.scaleContourDim(self.contours_tumor, scale), -1, annot_color, line_thickness, lineType=cv2.LINE_8, offset=offset) img = Image.fromarray(img) w, h = img.size if custom_downsample > 1: img = img.resize((int(w/custom_downsample), int(h/custom_downsample))) if max_size is not None and (w > max_size or h > max_size): resizeFactor = max_size/w if w > h else max_size/h img = img.resize((int(w*resizeFactor), int(h*resizeFactor))) return img def createPatches_bag_hdf5(self, save_path, patch_level=0, patch_size=256, step_size=256, save_coord=True, **kwargs): contours = self.contours_tissue contour_holes = self.holes_tissue print("Creating patches for: ", self.name, "...",) elapsed = time.time() for idx, cont in enumerate(contours): patch_gen = self._getPatchGenerator(cont, idx, patch_level, save_path, patch_size, step_size, **kwargs) if self.hdf5_file is None: try: first_patch = next(patch_gen) # empty contour, continue except StopIteration: continue
Image.MAX_IMAGE_PIXELS = 933120000 class WholeSlideImage(object): def __init__(self, path): """ Args: path (str): fullpath to WSI file """ # self.name = ".".join(path.split("/")[-1].split('.')[:-1]) self.name = os.path.splitext(os.path.basename(path))[0] # pdb.set_trace() try: self.wsi = openslide.open_slide(path) except: self.wsi = kfbslide.open_kfbslide(path) # self.wsi = openSlide(path) # pdb.set_trace() self.level_downsamples = self._assertLevelDownsamples() self.level_dim = self.wsi.level_dimensions self.contours_tissue = None self.contours_tumor = None self.hdf5_file = None def getOpenSlide(self): return self.wsi def initXML(self, xml_path): def _createContour(coord_list): return np.array([[[int(float(coord.attributes['X'].value)), int(float(coord.attributes['Y'].value))]] for coord in coord_list], dtype = 'int32') xmldoc = minidom.parse(xml_path) annotations = [anno.getElementsByTagName('Coordinate') for anno in xmldoc.getElementsByTagName('Annotation')] self.contours_tumor = [_createContour(coord_list) for coord_list in annotations] self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True) def initTxt(self,annot_path): def _create_contours_from_dict(annot): all_cnts = [] for idx, annot_group in enumerate(annot): contour_group = annot_group['coordinates'] if annot_group['type'] == 'Polygon': for idx, contour in enumerate(contour_group): contour = np.array(contour).astype(np.int32).reshape(-1,1,2) all_cnts.append(contour) else: for idx, sgmt_group in enumerate(contour_group): contour = [] for sgmt in sgmt_group: contour.extend(sgmt) contour = np.array(contour).astype(np.int32).reshape(-1,1,2) all_cnts.append(contour) return all_cnts with open(annot_path, "r") as f: annot = f.read() annot = eval(annot) self.contours_tumor = _create_contours_from_dict(annot) self.contours_tumor = sorted(self.contours_tumor, key=cv2.contourArea, reverse=True) def initSegmentation(self, mask_file): # load segmentation results from pickle file asset_dict = load_pkl(mask_file) self.holes_tissue = asset_dict['holes'] self.contours_tissue = asset_dict['tissue'] def saveSegmentation(self, mask_file): # save segmentation results using pickle asset_dict = {'holes': self.holes_tissue, 'tissue': self.contours_tissue} save_pkl(mask_file, asset_dict) def segmentTissue(self, seg_level=0, sthresh=20, sthresh_up = 255, mthresh=7, close = 0, use_otsu=False, filter_params={'a_t':100}, ref_patch_size=512, exclude_ids=[], keep_ids=[]): """ Segment the tissue via HSV -> Median thresholding -> Binary threshold """ def _filter_contours(contours, hierarchy, filter_params): """ Filter contours by: area. """ filtered = [] # find indices of foreground contours (parent == -1) hierarchy_1 = np.flatnonzero(hierarchy[:,1] == -1) all_holes = [] # loop through foreground contour indices for cont_idx in hierarchy_1: # actual contour # pdb.set_trace() cont = contours[cont_idx] # indices of holes contained in this contour (children of parent contour) holes = np.flatnonzero(hierarchy[:, 1] == cont_idx) # take contour area (includes holes) a = cv2.contourArea(cont) # calculate the contour area of each hole hole_areas = [cv2.contourArea(contours[hole_idx]) for hole_idx in holes] # actual area of foreground contour region a = a - np.array(hole_areas).sum() if a == 0: continue # print(tuple((filter_params['a_t'],)),tuple((a,))) if tuple((filter_params['a_t'],)) < tuple((a,)): filtered.append(cont_idx) all_holes.append(holes) foreground_contours = [contours[cont_idx] for cont_idx in filtered] hole_contours = [] for hole_ids in all_holes: unfiltered_holes = [contours[idx] for idx in hole_ids ] unfilered_holes = sorted(unfiltered_holes, key=cv2.contourArea, reverse=True) # take max_n_holes largest holes by area unfilered_holes = unfilered_holes[:filter_params['max_n_holes']] filtered_holes = [] # filter these holes for hole in unfilered_holes: if cv2.contourArea(hole) > filter_params['a_h']: filtered_holes.append(hole) hole_contours.append(filtered_holes) return foreground_contours, hole_contours # pdb.set_trace() try: img = np.array(self.wsi.read_region((0,0), seg_level, self.level_dim[seg_level])) except: print('failed read region') img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # Convert to HSV space img_med = cv2.medianBlur(img_hsv[:,:,1], mthresh) # Apply median blurring # Thresholding # if use_otsu: if False: otsu_thresh, img_otsu = cv2.threshold(img_med, 0, sthresh_up, cv2.THRESH_OTSU+cv2.THRESH_BINARY) # adjust_thresh = max(sthresh,otsu_thresh-20) adjust_thresh = otsu_thresh _, img_otsu = cv2.threshold(img_med, adjust_thresh, sthresh_up, cv2.THRESH_BINARY) print('otsu_threshold:',otsu_thresh,'adjust_thresh:',adjust_thresh) else: print('not otsu') _, img_otsu = cv2.threshold(img_med, sthresh, sthresh_up, cv2.THRESH_BINARY) # pdb.set_trace() ## hed operas # img_hed = rgb2hed(cv2.cvtColor(img, cv2.COLOR_RGBA2RGB)) # # img_e = hed2rgb(np.stack((img_hed[:, :, 1], img_hed[:, :, 1], img_hed[:, :, 1]), axis=-1)) # img_h = hed2rgb(np.stack((img_hed[:, :, 0], np.zeros_like(img_hed[:, :, 0]), np.zeros_like(img_hed[:, :, 0])), axis=-1)) # img_h = (img_h*255).astype(np.uint8) # img_h_gray = 255-cv2.medianBlur(cv2.cvtColor(img_h, cv2.COLOR_BGR2GRAY),mthresh) # # _, img_otsu = cv2.threshold(img_h_gray, sthresh, sthresh_up, cv2.THRESH_BINARY) # otsu_thresh, img_otsu = cv2.threshold(img_h_gray, 0, sthresh_up, cv2.THRESH_OTSU + cv2.THRESH_BINARY) # adjust_thresh = max(sthresh,otsu_thresh-20) # _, img_otsu = cv2.threshold(img_h_gray, adjust_thresh, sthresh_up, cv2.THRESH_BINARY) # img_d = hed2rgb(np.stack((img_hed[:, :, 2], img_hed[:, :, 2], img_hed[:, :, 2]), axis=-1)) # filter this? # Morphological closing if close > 0: kernel = np.ones((close, close), np.uint8) img_otsu = cv2.morphologyEx(img_otsu, cv2.MORPH_CLOSE, kernel) scale = self.level_downsamples[seg_level] scaled_ref_patch_area = int(ref_patch_size**2 / (scale[0] * scale[1])) print('scaled_ref_patch_area',scaled_ref_patch_area) print('ref_patch_size',ref_patch_size) print('scale',scale,'seg_level',seg_level) filter_params = filter_params.copy() filter_params['a_t'] = filter_params['a_t'] * scaled_ref_patch_area filter_params['a_h'] = filter_params['a_h'] * scaled_ref_patch_area # Find and filter contours contours, hierarchy = cv2.findContours(img_otsu, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) # Find contours hierarchy = np.squeeze(hierarchy, axis=(0,))[:, 2:] # pdb.set_trace() if filter_params: foreground_contours, hole_contours = _filter_contours(contours, hierarchy, filter_params) # Necessary for filtering out artifacts self.contours_tissue = self.scaleContourDim(foreground_contours, scale) self.holes_tissue = self.scaleHolesDim(hole_contours, scale) #exclude_ids = [0,7,9] if len(keep_ids) > 0: contour_ids = set(keep_ids) - set(exclude_ids) else: contour_ids = set(np.arange(len(self.contours_tissue))) - set(exclude_ids) self.contours_tissue = [self.contours_tissue[i] for i in contour_ids] self.holes_tissue = [self.holes_tissue[i] for i in contour_ids] def visWSI(self, vis_level=0, color = (0,255,0), hole_color = (0,0,255), annot_color=(255,0,0), line_thickness=250, max_size=None, top_left=None, bot_right=None, custom_downsample=1, view_slide_only=False, number_contours=False, seg_display=True, annot_display=True): downsample = self.level_downsamples[vis_level] scale = [1/downsample[0], 1/downsample[1]] # pdb.set_trace() if top_left is not None and bot_right is not None: top_left = tuple(top_left) bot_right = tuple(bot_right) w, h = tuple((np.array(bot_right) * scale).astype(int) - (np.array(top_left) * scale).astype(int)) region_size = (w, h) else: top_left = (0,0) region_size = self.level_dim[vis_level] img = self.wsi.read_region(top_left, vis_level, region_size) try: img = np.array(img.convert("RGB")) except: pass # view_slide_only= True if not view_slide_only: offset = tuple(-(np.array(top_left) * scale).astype(int)) line_thickness = int(line_thickness * math.sqrt(scale[0] * scale[1])) if self.contours_tissue is not None and seg_display: if not number_contours: cv2.drawContours(img, self.scaleContourDim(self.contours_tissue, scale), -1, color, line_thickness, lineType=cv2.LINE_8, offset=offset) else: # add numbering to each contour for idx, cont in enumerate(self.contours_tissue): contour = np.array(self.scaleContourDim(cont, scale)) M = cv2.moments(contour) cX = int(M["m10"] / (M["m00"] + 1e-9)) cY = int(M["m01"] / (M["m00"] + 1e-9)) # draw the contour and put text next to center cv2.drawContours(img, [contour], -1, color, line_thickness, lineType=cv2.LINE_8, offset=offset) cv2.putText(img, "{}".format(idx), (cX, cY), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 10) for holes in self.holes_tissue: cv2.drawContours(img, self.scaleContourDim(holes, scale), -1, hole_color, line_thickness, lineType=cv2.LINE_8) if self.contours_tumor is not None and annot_display: cv2.drawContours(img, self.scaleContourDim(self.contours_tumor, scale), -1, annot_color, line_thickness, lineType=cv2.LINE_8, offset=offset) img = Image.fromarray(img) w, h = img.size if custom_downsample > 1: img = img.resize((int(w/custom_downsample), int(h/custom_downsample))) if max_size is not None and (w > max_size or h > max_size): resizeFactor = max_size/w if w > h else max_size/h img = img.resize((int(w*resizeFactor), int(h*resizeFactor))) return img def createPatches_bag_hdf5(self, save_path, patch_level=0, patch_size=256, step_size=256, save_coord=True, **kwargs): contours = self.contours_tissue contour_holes = self.holes_tissue print("Creating patches for: ", self.name, "...",) elapsed = time.time() for idx, cont in enumerate(contours): patch_gen = self._getPatchGenerator(cont, idx, patch_level, save_path, patch_size, step_size, **kwargs) if self.hdf5_file is None: try: first_patch = next(patch_gen) # empty contour, continue except StopIteration: continue
file_path = initialize_hdf5_bag(first_patch, save_coord=save_coord)
1
2023-11-12 14:07:34+00:00
8k
juftin/hatch-pip-compile
tests/conftest.py
[ { "identifier": "PipInstaller", "path": "hatch_pip_compile/installer.py", "snippet": "class PipInstaller(PluginInstaller):\n \"\"\"\n Plugin Installer for `pip`\n \"\"\"\n\n def install_dependencies(self) -> None:\n \"\"\"\n Install the dependencies with `pip`\n \"\"\"\n with self.environment.safe_activation():\n if not self.environment.piptools_lock_file.exists():\n return\n extra_args = self.environment.config.get(\"pip-compile-install-args\", [])\n args = [*extra_args, \"--requirement\", str(self.environment.piptools_lock_file)]\n install_command = self.environment.construct_pip_install_command(args=args)\n self.environment.plugin_check_command(install_command)" }, { "identifier": "PipSyncInstaller", "path": "hatch_pip_compile/installer.py", "snippet": "class PipSyncInstaller(PluginInstaller):\n \"\"\"\n Plugin Installer for `pip-sync`\n \"\"\"\n\n def install_dependencies(self) -> None:\n \"\"\"\n Install the dependencies with `pip-sync`\n\n In the event that there are no dependencies, pip-sync will\n uninstall everything in the environment before deleting the\n lockfile.\n \"\"\"\n self.environment.install_pip_tools()\n cmd = [\n self.environment.virtual_env.python_info.executable,\n \"-m\",\n \"piptools\",\n \"sync\",\n \"--verbose\"\n if self.environment.config.get(\"pip-compile-verbose\", None) is True\n else \"--quiet\",\n \"--python-executable\",\n str(self.environment.virtual_env.python_info.executable),\n ]\n if not self.environment.dependencies:\n self.environment.piptools_lock_file.write_text(\"\")\n extra_args = self.environment.config.get(\"pip-compile-install-args\", [])\n cmd.extend(extra_args)\n cmd.append(str(self.environment.piptools_lock_file))\n self.environment.plugin_check_command(cmd)\n if not self.environment.dependencies:\n self.environment.piptools_lock_file.unlink()\n\n def _full_install(self) -> None:\n \"\"\"\n Run the full install process\n\n 1) Run pip-compile (if necessary)\n 2) Run pip-sync\n 3) (re)install project\n \"\"\"\n with self.environment.safe_activation():\n self.environment.run_pip_compile()\n self.install_dependencies()\n if not self.environment.skip_install:\n if self.environment.dev_mode:\n super().install_project_dev_mode()\n else:\n super().install_project()\n\n def sync_dependencies(self):\n \"\"\"\n Sync dependencies\n \"\"\"\n self._full_install()\n\n def install_project(self):\n \"\"\"\n Install the project the first time\n\n The same implementation as `_full_install`\n due to the way `pip-sync` uninstalls our root package\n \"\"\"\n self._full_install()\n\n def install_project_dev_mode(self):\n \"\"\"\n Install the project the first time in dev mode\n\n The same implementation as `_full_install`\n due to the way `pip-sync` uninstalls our root package\n \"\"\"\n self._full_install()" }, { "identifier": "PluginInstaller", "path": "hatch_pip_compile/installer.py", "snippet": "class PluginInstaller(ABC):\n \"\"\"\n Package Installer for the plugin\n\n This abstract base class is used to define the interface for\n how the plugin should install packages and dependencies.\n \"\"\"\n\n environment: \"PipCompileEnvironment\"\n\n @abstractmethod\n def install_dependencies(self) -> None:\n \"\"\"\n Install the dependencies\n \"\"\"\n\n def sync_dependencies(self) -> None:\n \"\"\"\n Sync the dependencies - same as `install_dependencies`\n \"\"\"\n self.install_dependencies()\n\n def install_project(self) -> None:\n \"\"\"\n Install the project (`--no-deps`)\n \"\"\"\n with self.environment.safe_activation():\n self.environment.plugin_check_command(\n self.environment.construct_pip_install_command(\n args=[\"--no-deps\", str(self.environment.root)]\n )\n )\n\n def install_project_dev_mode(self) -> None:\n \"\"\"\n Install the project in editable mode (`--no-deps`)\n \"\"\"\n with self.environment.safe_activation():\n self.environment.plugin_check_command(\n self.environment.construct_pip_install_command(\n args=[\"--no-deps\", \"--editable\", str(self.environment.root)]\n )\n )" }, { "identifier": "PipCompileEnvironment", "path": "hatch_pip_compile/plugin.py", "snippet": "class PipCompileEnvironment(VirtualEnvironment):\n \"\"\"\n Virtual Environment supported by pip-compile\n \"\"\"\n\n PLUGIN_NAME = \"pip-compile\"\n\n default_env_name = \"default\"\n\n def __repr__(self):\n \"\"\"\n Get representation of PipCompileEnvironment\n \"\"\"\n return f\"<{self.__class__.__name__} - {self.name}>\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"\n Initialize PipCompileEnvironment with extra attributes\n \"\"\"\n super().__init__(*args, **kwargs)\n lock_filename_config = self.config.get(\"lock-filename\")\n if lock_filename_config is None:\n if self.name == self.default_env_name:\n lock_filename = \"requirements.txt\"\n else:\n lock_filename = f\"requirements/requirements-{self.name}.txt\"\n else:\n with self.metadata.context.apply_context(self.context):\n lock_filename = self.metadata.context.format(lock_filename_config)\n self.piptools_lock_file = self.root / lock_filename\n self.piptools_lock = PipCompileLock(\n lock_file=self.piptools_lock_file,\n dependencies=self.dependencies,\n virtualenv=self.virtual_env,\n constraints_file=self.piptools_constraints_file,\n project_root=self.root,\n env_name=self.name,\n project_name=self.metadata.name,\n )\n install_method = self.config.get(\"pip-compile-installer\", \"pip\")\n self.installer: PluginInstaller\n if install_method == \"pip\":\n self.installer = PipInstaller(environment=self)\n elif install_method == \"pip-sync\":\n self.installer = PipSyncInstaller(environment=self)\n else:\n msg = (\n f\"Invalid pip-tools install method: {install_method} - \"\n \"must be 'pip' or 'pip-sync'\"\n )\n raise HatchPipCompileError(msg)\n\n @staticmethod\n def get_option_types() -> Dict[str, Any]:\n \"\"\"\n Get option types\n \"\"\"\n return { # pragma: no cover\n \"lock-filename\": str,\n \"pip-compile-hashes\": bool,\n \"pip-compile-args\": List[str],\n \"pip-compile-constraint\": str,\n \"pip-compile-installer\": str,\n \"pip-compile-install-args\": List[str],\n }\n\n def dependency_hash(self) -> str:\n \"\"\"\n Get the dependency hash\n \"\"\"\n self.run_pip_compile()\n hatch_hash = super().dependency_hash()\n if not self.dependencies:\n return hatch_hash\n else:\n lockfile_hash = self.piptools_lock.get_file_content_hash()\n return hashlib.sha256(f\"{hatch_hash}-{lockfile_hash}\".encode()).hexdigest()\n\n def install_pip_tools(self) -> None:\n \"\"\"\n Install pip-tools (if not already installed)\n \"\"\"\n with self.safe_activation():\n in_sync = dependencies_in_sync(\n requirements=[Requirement(\"pip-tools\")],\n sys_path=self.virtual_env.sys_path,\n environment=self.virtual_env.environment,\n )\n if not in_sync:\n self.plugin_check_command(self.construct_pip_install_command([\"pip-tools\"]))\n\n def run_pip_compile(self) -> None:\n \"\"\"\n Run pip-compile if necessary\n \"\"\"\n if not self.lockfile_up_to_date:\n with self.safe_activation():\n self.install_pip_tools()\n if self.piptools_lock_file.exists():\n _ = self.piptools_lock.compare_python_versions(\n verbose=self.config.get(\"pip-compile-verbose\", None)\n )\n self.pip_compile_cli()\n\n def pip_compile_cli(self) -> None:\n \"\"\"\n Run pip-compile\n \"\"\"\n if not self.dependencies:\n self.piptools_lock_file.unlink(missing_ok=True)\n self.lockfile_up_to_date = True\n return\n no_compile = bool(os.getenv(\"PIP_COMPILE_DISABLE\"))\n if no_compile:\n msg = \"hatch-pip-compile is disabled but attempted to run a lockfile update.\"\n raise HatchPipCompileError(msg)\n upgrade = bool(os.getenv(\"PIP_COMPILE_UPGRADE\"))\n upgrade_packages = os.getenv(\"PIP_COMPILE_UPGRADE_PACKAGE\") or None\n upgrade_args = []\n upgrade_package_args = []\n if upgrade:\n upgrade_args.append(\"--upgrade\")\n if upgrade_packages:\n upgrade_packages_sep = upgrade_packages.split(\",\")\n for package in upgrade_packages_sep:\n upgrade_package_args.append(f\"--upgrade-package={package.strip()}\")\n cmd = [\n self.virtual_env.python_info.executable,\n \"-m\",\n \"piptools\",\n \"compile\",\n \"--verbose\" if self.config.get(\"pip-compile-verbose\", None) is True else \"--quiet\",\n \"--strip-extras\",\n \"--no-header\",\n \"--resolver=backtracking\",\n ]\n if self.config.get(\"pip-compile-hashes\", False) is True:\n cmd.append(\"--generate-hashes\")\n if self.piptools_constraints_file is not None:\n cmd.extend([\"--constraint\", str(self.piptools_constraints_file)])\n cmd.extend(self.config.get(\"pip-compile-args\", []))\n cmd.extend(upgrade_args)\n cmd.extend(upgrade_package_args)\n with tempfile.TemporaryDirectory() as tmpdir:\n tmp_path = pathlib.Path(tmpdir)\n input_file = tmp_path / f\"{self.name}.in\"\n output_file = tmp_path / \"lock.txt\"\n cmd.extend([\"--output-file\", str(output_file), str(input_file)])\n input_file.write_text(\"\\n\".join([*self.dependencies, \"\"]))\n if self.piptools_lock_file.exists():\n shutil.copy(self.piptools_lock_file, output_file)\n self.piptools_lock_file.parent.mkdir(exist_ok=True, parents=True)\n self.plugin_check_command(cmd)\n self.piptools_lock.process_lock(lockfile=output_file)\n shutil.move(output_file, self.piptools_lock_file)\n self.lockfile_up_to_date = True\n\n def install_project(self) -> None:\n \"\"\"\n Install the project (`--no-deps`)\n \"\"\"\n self.installer.install_project()\n\n def install_project_dev_mode(self) -> None:\n \"\"\"\n Install the project in editable mode (`--no-deps`)\n \"\"\"\n self.installer.install_project_dev_mode()\n\n @functools.cached_property\n def lockfile_up_to_date(self) -> bool:\n \"\"\"\n Check if the lockfile is up-to-date\n\n Behavior\n --------\n 1) If there are no dependencies and no lock file, exit early and return True.\n 2) If the constraint file / environment is out of date, sync it and return False.\n 3) If there are no dependencies and a lock file, return False.\n 4) If there are dependencies and no lock file, return False.\n 5) If a force upgrade is requested, return False.\n 6) If there are dependencies and a lock file...\n a) If there is a constraint file...\n i) If the file is valid but the SHA is different, return False.\n b) If the lock file dependencies aren't current, return False.\n c) If the lock file dependencies are current but the lockfile\n has a different sha than its constraints file, return False.\n 7) Otherwise, return True.\n \"\"\"\n upgrade = os.getenv(\"PIP_COMPILE_UPGRADE\") or False\n upgrade_packages = os.getenv(\"PIP_COMPILE_UPGRADE_PACKAGE\") or False\n force_upgrade = upgrade is not False or upgrade_packages is not False\n if not self.dependencies and not self.piptools_lock_file.exists():\n return True\n if self.piptools_constraints_file:\n valid_constraint = self.validate_constraints_file(\n constraints_file=self.piptools_constraints_file, environment=self.constraint_env\n )\n if not valid_constraint:\n return False\n if not self.dependencies and self.piptools_lock_file.exists():\n return False\n elif force_upgrade:\n return False\n elif self.dependencies and not self.piptools_lock_file.exists():\n return False\n elif self.dependencies and self.piptools_lock_file.exists():\n if self.piptools_constraints_file:\n current_sha = self.constraint_env.piptools_lock.get_file_content_hash()\n sha_match = self.piptools_lock.compare_constraint_sha(sha=current_sha)\n if sha_match is False:\n return False\n expected_dependencies = self.piptools_lock.compare_requirements(\n requirements=self.dependencies_complex\n )\n if not expected_dependencies:\n return False\n return True\n\n def dependencies_in_sync(self):\n \"\"\"\n Whether the dependencies are in sync\n \"\"\"\n if not self.lockfile_up_to_date:\n return False\n else:\n with self.safe_activation():\n return dependencies_in_sync(\n self.piptools_lock.read_lock_requirements(),\n sys_path=self.virtual_env.sys_path,\n environment=self.virtual_env.environment,\n )\n\n def sync_dependencies(self) -> None:\n \"\"\"\n Sync dependencies\n \"\"\"\n self.run_pip_compile()\n self.installer.sync_dependencies()\n\n @property\n def piptools_constraints_file(self) -> Optional[pathlib.Path]:\n \"\"\"\n Get the constraint file path\n \"\"\"\n if self.constraint_env.name == self.name:\n return None\n else:\n return self.constraint_env.piptools_lock_file\n\n def get_piptools_environment(self, environment_name: str) -> \"PipCompileEnvironment\":\n \"\"\"\n Get a `PipCompileEnvironment` instance for an environment\n other than the current instance. This is useful\n for recursively checking other environments for lock file\n validity and defining inheritance.\n \"\"\"\n if environment_name not in self.pipools_environment_dict.keys():\n error_message = (\n f\"[hatch-pip-compile] The environment {environment_name} does not exist.\"\n )\n raise HatchPipCompileError(error_message)\n return PipCompileEnvironment(\n root=self.root,\n metadata=self.metadata,\n name=environment_name,\n config=self.pipools_environment_dict.get(environment_name, {}),\n matrix_variables=self.matrix_variables,\n data_directory=self.data_directory,\n isolated_data_directory=self.isolated_data_directory,\n platform=Platform(),\n verbosity=self.verbosity,\n app=None,\n )\n\n @functools.cached_property\n def constraint_env(self) -> \"PipCompileEnvironment\":\n \"\"\"\n Get the constraint environment\n \"\"\"\n constraint_env = self.config.get(\"pip-compile-constraint\")\n if not constraint_env:\n return self\n elif self.name == constraint_env:\n return self\n environment = self.get_piptools_environment(environment_name=constraint_env)\n if environment.config.get(\"type\") != self.PLUGIN_NAME:\n logger.error(\"The constraint environment is not a hatch-pip-compile environment.\")\n return self\n elif not environment.dependencies:\n return self\n else:\n try:\n _ = environment.virtual_env.executables_directory\n except OSError:\n environment.create()\n return environment\n\n def validate_constraints_file(\n self, constraints_file: pathlib.Path, environment: \"PipCompileEnvironment\"\n ) -> bool:\n \"\"\"\n Validate the constraints file\n\n Parameters\n ----------\n constraints_file : pathlib.Path\n The lock file\n environment : PipCompileEnvironment\n The environment to validate against\n\n Returns\n -------\n bool\n Whether the constraints file is valid\n \"\"\"\n if not constraints_file.exists():\n self.constraint_env.run_pip_compile()\n return False\n else:\n up_to_date = environment.piptools_lock.compare_requirements(\n requirements=environment.dependencies_complex\n )\n if not up_to_date:\n self.constraint_env.run_pip_compile()\n return False\n return True\n\n @property\n def pipools_environment_dict(self) -> Dict[str, Any]:\n \"\"\"\n Get the environment dictionary\n \"\"\"\n return self.metadata.hatch.config.get(\"envs\", {})\n\n def plugin_check_command(\n self, command: Union[str, List[str]], *, shell: bool = False, **kwargs: Any\n ) -> CompletedProcess:\n \"\"\"\n Run a command from the virtualenv\n \"\"\"\n with self.safe_activation():\n return self.virtual_env.platform.check_command(\n command=command,\n shell=shell,\n **kwargs,\n )" } ]
import os import pathlib import shutil import pytest import tomlkit from dataclasses import dataclass, field from subprocess import CompletedProcess from typing import Dict, Generator, Type from unittest.mock import patch from hatch.cli.application import Application from hatch.config.constants import AppEnvVars, ConfigEnvVars, PublishEnvVars from hatch.project.core import Project from hatch.utils.fs import Path, temp_directory from hatch.utils.platform import Platform from hatch_pip_compile.installer import PipInstaller, PipSyncInstaller, PluginInstaller from hatch_pip_compile.plugin import PipCompileEnvironment
4,554
""" Shared fixtures for tests. """ @pytest.fixture def mock_check_command() -> Generator[patch, None, None]: """ Disable the `plugin_check_command` for testing """ with patch("hatch_pip_compile.plugin.PipCompileEnvironment.plugin_check_command") as mock: mock.return_value = CompletedProcess(args=[], returncode=0, stdout=b"", stderr=b"") yield mock @pytest.fixture def platform() -> Platform: """ Platform """ return Platform() @pytest.fixture def isolation(platform: Platform) -> Generator[Path, None, None]: """ Isolated hatch environment for testing. """ with temp_directory() as temp_dir: data_dir = pathlib.Path(__file__).parent / "data" shutil.copytree(data_dir, temp_dir, dirs_exist_ok=True) data_dir = temp_dir / "data" data_dir.mkdir() cache_dir = temp_dir / "cache" cache_dir.mkdir() default_env_vars = { AppEnvVars.NO_COLOR: "1", ConfigEnvVars.DATA: str(data_dir), ConfigEnvVars.CACHE: str(cache_dir), PublishEnvVars.REPO: "dev", "HATCH_SELF_TESTING": "true", "PYAPP_COMMAND_NAME": os.urandom(4).hex(), "GIT_AUTHOR_NAME": "Foo Bar", "GIT_AUTHOR_EMAIL": "[email protected]", "COLUMNS": "80", "LINES": "24", } if platform.windows: # pragma: no cover default_env_vars["COMSPEC"] = "cmd.exe" else: default_env_vars["SHELL"] = "sh" with temp_dir.as_cwd(default_env_vars): os.environ.pop(AppEnvVars.ENV_ACTIVE, None) os.environ.pop(AppEnvVars.FORCE_COLOR, None) yield temp_dir @dataclass class PipCompileFixture: """ Testing Fixture Data Container """ __test__ = False isolation: pathlib.Path toml_doc: tomlkit.TOMLDocument pyproject: pathlib.Path project: Project platform: Platform isolated_data_dir: pathlib.Path application: Application = field(init=False)
""" Shared fixtures for tests. """ @pytest.fixture def mock_check_command() -> Generator[patch, None, None]: """ Disable the `plugin_check_command` for testing """ with patch("hatch_pip_compile.plugin.PipCompileEnvironment.plugin_check_command") as mock: mock.return_value = CompletedProcess(args=[], returncode=0, stdout=b"", stderr=b"") yield mock @pytest.fixture def platform() -> Platform: """ Platform """ return Platform() @pytest.fixture def isolation(platform: Platform) -> Generator[Path, None, None]: """ Isolated hatch environment for testing. """ with temp_directory() as temp_dir: data_dir = pathlib.Path(__file__).parent / "data" shutil.copytree(data_dir, temp_dir, dirs_exist_ok=True) data_dir = temp_dir / "data" data_dir.mkdir() cache_dir = temp_dir / "cache" cache_dir.mkdir() default_env_vars = { AppEnvVars.NO_COLOR: "1", ConfigEnvVars.DATA: str(data_dir), ConfigEnvVars.CACHE: str(cache_dir), PublishEnvVars.REPO: "dev", "HATCH_SELF_TESTING": "true", "PYAPP_COMMAND_NAME": os.urandom(4).hex(), "GIT_AUTHOR_NAME": "Foo Bar", "GIT_AUTHOR_EMAIL": "[email protected]", "COLUMNS": "80", "LINES": "24", } if platform.windows: # pragma: no cover default_env_vars["COMSPEC"] = "cmd.exe" else: default_env_vars["SHELL"] = "sh" with temp_dir.as_cwd(default_env_vars): os.environ.pop(AppEnvVars.ENV_ACTIVE, None) os.environ.pop(AppEnvVars.FORCE_COLOR, None) yield temp_dir @dataclass class PipCompileFixture: """ Testing Fixture Data Container """ __test__ = False isolation: pathlib.Path toml_doc: tomlkit.TOMLDocument pyproject: pathlib.Path project: Project platform: Platform isolated_data_dir: pathlib.Path application: Application = field(init=False)
default_environment: PipCompileEnvironment = field(init=False)
3
2023-11-10 00:34:00+00:00
8k
zhang-tao-whu/DVIS_Plus
mask2former/modeling/pixel_decoder/fpn.py
[ { "identifier": "PositionEmbeddingSine", "path": "mask2former/modeling/transformer_decoder/position_encoding.py", "snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need paper, generalized to work on images.\n \"\"\"\n\n def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):\n super().__init__()\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n if scale is not None and normalize is False:\n raise ValueError(\"normalize should be True if scale is passed\")\n if scale is None:\n scale = 2 * math.pi\n self.scale = scale\n\n def forward(self, x, mask=None):\n if mask is None:\n mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)\n not_mask = ~mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if self.normalize:\n eps = 1e-6\n y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale\n x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale\n\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)\n dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n pos_x = torch.stack(\n (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4\n ).flatten(3)\n pos_y = torch.stack(\n (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4\n ).flatten(3)\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos\n \n def __repr__(self, _repr_indent=4):\n head = \"Positional encoding \" + self.__class__.__name__\n body = [\n \"num_pos_feats: {}\".format(self.num_pos_feats),\n \"temperature: {}\".format(self.temperature),\n \"normalize: {}\".format(self.normalize),\n \"scale: {}\".format(self.scale),\n ]\n # _repr_indent = 4\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "TransformerEncoder", "path": "mask2former/modeling/transformer_decoder/transformer.py", "snippet": "class TransformerEncoder(nn.Module):\n def __init__(self, encoder_layer, num_layers, norm=None):\n super().__init__()\n self.layers = _get_clones(encoder_layer, num_layers)\n self.num_layers = num_layers\n self.norm = norm\n\n def forward(\n self,\n src,\n mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n ):\n output = src\n\n for layer in self.layers:\n output = layer(\n output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos\n )\n\n if self.norm is not None:\n output = self.norm(output)\n\n return output" }, { "identifier": "TransformerEncoderLayer", "path": "mask2former/modeling/transformer_decoder/transformer.py", "snippet": "class TransformerEncoderLayer(nn.Module):\n def __init__(\n self,\n d_model,\n nhead,\n dim_feedforward=2048,\n dropout=0.1,\n activation=\"relu\",\n normalize_before=False,\n ):\n super().__init__()\n self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)\n # Implementation of Feedforward model\n self.linear1 = nn.Linear(d_model, dim_feedforward)\n self.dropout = nn.Dropout(dropout)\n self.linear2 = nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = nn.LayerNorm(d_model)\n self.norm2 = nn.LayerNorm(d_model)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n self.activation = _get_activation_fn(activation)\n self.normalize_before = normalize_before\n\n def with_pos_embed(self, tensor, pos: Optional[Tensor]):\n return tensor if pos is None else tensor + pos\n\n def forward_post(\n self,\n src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n ):\n q = k = self.with_pos_embed(src, pos)\n src2 = self.self_attn(\n q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask\n )[0]\n src = src + self.dropout1(src2)\n src = self.norm1(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def forward_pre(\n self,\n src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n ):\n src2 = self.norm1(src)\n q = k = self.with_pos_embed(src2, pos)\n src2 = self.self_attn(\n q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask\n )[0]\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src\n\n def forward(\n self,\n src,\n src_mask: Optional[Tensor] = None,\n src_key_padding_mask: Optional[Tensor] = None,\n pos: Optional[Tensor] = None,\n ):\n if self.normalize_before:\n return self.forward_pre(src, src_mask, src_key_padding_mask, pos)\n return self.forward_post(src, src_mask, src_key_padding_mask, pos)" }, { "identifier": "_get_clones", "path": "mask2former/modeling/transformer_decoder/transformer.py", "snippet": "def _get_clones(module, N):\n return nn.ModuleList([copy.deepcopy(module) for i in range(N)])" }, { "identifier": "_get_activation_fn", "path": "mask2former/modeling/transformer_decoder/transformer.py", "snippet": "def _get_activation_fn(activation):\n \"\"\"Return an activation function given a string\"\"\"\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(f\"activation should be relu/gelu, not {activation}.\")" } ]
import logging import numpy as np import fvcore.nn.weight_init as weight_init import torch from typing import Callable, Dict, List, Optional, Tuple, Union from torch import nn from torch.nn import functional as F from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_ from torch.cuda.amp import autocast from detectron2.config import configurable from detectron2.layers import Conv2d, DeformConv, ShapeSpec, get_norm from detectron2.modeling import SEM_SEG_HEADS_REGISTRY from ..transformer_decoder.position_encoding import PositionEmbeddingSine from ..transformer_decoder.transformer import TransformerEncoder, TransformerEncoderLayer, _get_clones, _get_activation_fn
3,838
ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM return ret def forward_features(self, features): multi_scale_features = [] num_cur_levels = 0 # Reverse feature maps into top-down order (from low to high resolution) for idx, f in enumerate(self.in_features[::-1]): x = features[f] lateral_conv = self.lateral_convs[idx] output_conv = self.output_convs[idx] if lateral_conv is None: y = output_conv(x) else: cur_fpn = lateral_conv(x) # Following FPN implementation, we use nearest upsampling here y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest") y = output_conv(y) if num_cur_levels < self.maskformer_num_feature_levels: multi_scale_features.append(y) num_cur_levels += 1 return self.mask_features(y), None, multi_scale_features def forward(self, features, targets=None): logger = logging.getLogger(__name__) logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.") return self.forward_features(features) class TransformerEncoderOnly(nn.Module): def __init__( self, d_model=512, nhead=8, num_encoder_layers=6, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False, ): super().__init__() encoder_layer = TransformerEncoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) encoder_norm = nn.LayerNorm(d_model) if normalize_before else None self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) self._reset_parameters() self.d_model = d_model self.nhead = nhead def _reset_parameters(self): for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def forward(self, src, mask, pos_embed): # flatten NxCxHxW to HWxNxC bs, c, h, w = src.shape src = src.flatten(2).permute(2, 0, 1) pos_embed = pos_embed.flatten(2).permute(2, 0, 1) if mask is not None: mask = mask.flatten(1) memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) return memory.permute(1, 2, 0).view(bs, c, h, w) # This is a modified FPN decoder with extra Transformer encoder that processes the lowest-resolution feature map. @SEM_SEG_HEADS_REGISTRY.register() class TransformerEncoderPixelDecoder(BasePixelDecoder): @configurable def __init__( self, input_shape: Dict[str, ShapeSpec], *, transformer_dropout: float, transformer_nheads: int, transformer_dim_feedforward: int, transformer_enc_layers: int, transformer_pre_norm: bool, conv_dim: int, mask_dim: int, norm: Optional[Union[str, Callable]] = None, ): """ NOTE: this interface is experimental. Args: input_shape: shapes (channels and stride) of the input features transformer_dropout: dropout probability in transformer transformer_nheads: number of heads in transformer transformer_dim_feedforward: dimension of feedforward network transformer_enc_layers: number of transformer encoder layers transformer_pre_norm: whether to use pre-layernorm or not conv_dims: number of output channels for the intermediate conv layers. mask_dim: number of output channels for the final conv layer. norm (str or callable): normalization for all conv layers """ super().__init__(input_shape, conv_dim=conv_dim, mask_dim=mask_dim, norm=norm) input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5" feature_strides = [v.stride for k, v in input_shape] feature_channels = [v.channels for k, v in input_shape] in_channels = feature_channels[len(self.in_features) - 1] self.input_proj = Conv2d(in_channels, conv_dim, kernel_size=1) weight_init.c2_xavier_fill(self.input_proj) self.transformer = TransformerEncoderOnly( d_model=conv_dim, dropout=transformer_dropout, nhead=transformer_nheads, dim_feedforward=transformer_dim_feedforward, num_encoder_layers=transformer_enc_layers, normalize_before=transformer_pre_norm, ) N_steps = conv_dim // 2
# Copyright (c) Facebook, Inc. and its affiliates. def build_pixel_decoder(cfg, input_shape): """ Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`. """ name = cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME model = SEM_SEG_HEADS_REGISTRY.get(name)(cfg, input_shape) forward_features = getattr(model, "forward_features", None) if not callable(forward_features): raise ValueError( "Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. " f"Please implement forward_features for {name} to only return mask features." ) return model # This is a modified FPN decoder. @SEM_SEG_HEADS_REGISTRY.register() class BasePixelDecoder(nn.Module): @configurable def __init__( self, input_shape: Dict[str, ShapeSpec], *, conv_dim: int, mask_dim: int, norm: Optional[Union[str, Callable]] = None, ): """ NOTE: this interface is experimental. Args: input_shape: shapes (channels and stride) of the input features conv_dims: number of output channels for the intermediate conv layers. mask_dim: number of output channels for the final conv layer. norm (str or callable): normalization for all conv layers """ super().__init__() input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5" feature_channels = [v.channels for k, v in input_shape] lateral_convs = [] output_convs = [] use_bias = norm == "" for idx, in_channels in enumerate(feature_channels): if idx == len(self.in_features) - 1: output_norm = get_norm(norm, conv_dim) output_conv = Conv2d( in_channels, conv_dim, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=output_norm, activation=F.relu, ) weight_init.c2_xavier_fill(output_conv) self.add_module("layer_{}".format(idx + 1), output_conv) lateral_convs.append(None) output_convs.append(output_conv) else: lateral_norm = get_norm(norm, conv_dim) output_norm = get_norm(norm, conv_dim) lateral_conv = Conv2d( in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm ) output_conv = Conv2d( conv_dim, conv_dim, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=output_norm, activation=F.relu, ) weight_init.c2_xavier_fill(lateral_conv) weight_init.c2_xavier_fill(output_conv) self.add_module("adapter_{}".format(idx + 1), lateral_conv) self.add_module("layer_{}".format(idx + 1), output_conv) lateral_convs.append(lateral_conv) output_convs.append(output_conv) # Place convs into top-down order (from low to high resolution) # to make the top-down computation in forward clearer. self.lateral_convs = lateral_convs[::-1] self.output_convs = output_convs[::-1] self.mask_dim = mask_dim self.mask_features = Conv2d( conv_dim, mask_dim, kernel_size=3, stride=1, padding=1, ) weight_init.c2_xavier_fill(self.mask_features) self.maskformer_num_feature_levels = 3 # always use 3 scales @classmethod def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]): ret = {} ret["input_shape"] = { k: v for k, v in input_shape.items() if k in cfg.MODEL.SEM_SEG_HEAD.IN_FEATURES } ret["conv_dim"] = cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM ret["norm"] = cfg.MODEL.SEM_SEG_HEAD.NORM return ret def forward_features(self, features): multi_scale_features = [] num_cur_levels = 0 # Reverse feature maps into top-down order (from low to high resolution) for idx, f in enumerate(self.in_features[::-1]): x = features[f] lateral_conv = self.lateral_convs[idx] output_conv = self.output_convs[idx] if lateral_conv is None: y = output_conv(x) else: cur_fpn = lateral_conv(x) # Following FPN implementation, we use nearest upsampling here y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest") y = output_conv(y) if num_cur_levels < self.maskformer_num_feature_levels: multi_scale_features.append(y) num_cur_levels += 1 return self.mask_features(y), None, multi_scale_features def forward(self, features, targets=None): logger = logging.getLogger(__name__) logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.") return self.forward_features(features) class TransformerEncoderOnly(nn.Module): def __init__( self, d_model=512, nhead=8, num_encoder_layers=6, dim_feedforward=2048, dropout=0.1, activation="relu", normalize_before=False, ): super().__init__() encoder_layer = TransformerEncoderLayer( d_model, nhead, dim_feedforward, dropout, activation, normalize_before ) encoder_norm = nn.LayerNorm(d_model) if normalize_before else None self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) self._reset_parameters() self.d_model = d_model self.nhead = nhead def _reset_parameters(self): for p in self.parameters(): if p.dim() > 1: nn.init.xavier_uniform_(p) def forward(self, src, mask, pos_embed): # flatten NxCxHxW to HWxNxC bs, c, h, w = src.shape src = src.flatten(2).permute(2, 0, 1) pos_embed = pos_embed.flatten(2).permute(2, 0, 1) if mask is not None: mask = mask.flatten(1) memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) return memory.permute(1, 2, 0).view(bs, c, h, w) # This is a modified FPN decoder with extra Transformer encoder that processes the lowest-resolution feature map. @SEM_SEG_HEADS_REGISTRY.register() class TransformerEncoderPixelDecoder(BasePixelDecoder): @configurable def __init__( self, input_shape: Dict[str, ShapeSpec], *, transformer_dropout: float, transformer_nheads: int, transformer_dim_feedforward: int, transformer_enc_layers: int, transformer_pre_norm: bool, conv_dim: int, mask_dim: int, norm: Optional[Union[str, Callable]] = None, ): """ NOTE: this interface is experimental. Args: input_shape: shapes (channels and stride) of the input features transformer_dropout: dropout probability in transformer transformer_nheads: number of heads in transformer transformer_dim_feedforward: dimension of feedforward network transformer_enc_layers: number of transformer encoder layers transformer_pre_norm: whether to use pre-layernorm or not conv_dims: number of output channels for the intermediate conv layers. mask_dim: number of output channels for the final conv layer. norm (str or callable): normalization for all conv layers """ super().__init__(input_shape, conv_dim=conv_dim, mask_dim=mask_dim, norm=norm) input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride) self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5" feature_strides = [v.stride for k, v in input_shape] feature_channels = [v.channels for k, v in input_shape] in_channels = feature_channels[len(self.in_features) - 1] self.input_proj = Conv2d(in_channels, conv_dim, kernel_size=1) weight_init.c2_xavier_fill(self.input_proj) self.transformer = TransformerEncoderOnly( d_model=conv_dim, dropout=transformer_dropout, nhead=transformer_nheads, dim_feedforward=transformer_dim_feedforward, num_encoder_layers=transformer_enc_layers, normalize_before=transformer_pre_norm, ) N_steps = conv_dim // 2
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
0
2023-11-14 10:55:11+00:00
8k
52phm/pylmkit
pylmkit/core/base.py
[ { "identifier": "read_yaml", "path": "pylmkit/utils/data_utils.py", "snippet": "def read_yaml(filepath):\n try:\n with open(filepath, encoding=\"utf-8\") as fp:\n result = yaml.load(fp, Loader=SafeLoader)\n except Exception as e:\n raise Exception(e)\n return result" }, { "identifier": "read_json", "path": "pylmkit/utils/data_utils.py", "snippet": "def read_json(filepath, mode='r', encoding='utf-8'):\n with open(filepath, mode, encoding=encoding) as fp:\n data = json.load(fp)\n return data" }, { "identifier": "write_yaml", "path": "pylmkit/utils/data_utils.py", "snippet": "def write_yaml(data, filepath, mode=\"w\", encoding='utf-8'):\n try:\n with open(filepath, mode=mode, encoding=encoding) as f:\n yaml.dump(data=data, stream=f, allow_unicode=True)\n except Exception as e:\n raise Exception(e)" }, { "identifier": "write_json", "path": "pylmkit/utils/data_utils.py", "snippet": "def write_json(data, filepath, mode='w', encoding='utf-8', ensure_ascii=False):\n with open(filepath, mode, encoding=encoding) as fp:\n json.dump(data, # 字典数据\n fp=fp, # open 文件\n ensure_ascii=ensure_ascii, # 确保中文无乱码\n )" }, { "identifier": "message_as_string", "path": "pylmkit/utils/data_utils.py", "snippet": "def message_as_string(memory_messages):\n messages_string = [f\"\\n{message['role']}: {message['content']}\" for message in memory_messages]\n return \"\".join(messages_string)" }, { "identifier": "document_as_dict", "path": "pylmkit/utils/data_utils.py", "snippet": "def document_as_dict(documents):\n document_dict = [{\"page_content\": doc.page_content, \"metadata\": doc.metadata,\n \"type\": doc.type} for doc in documents]\n return document_dict" }, { "identifier": "dict_as_document", "path": "pylmkit/utils/data_utils.py", "snippet": "def dict_as_document(doc_dict):\n document_dict = [Document(page_content=doc['page_content'],\n metadata=doc.get('metadata', {}),\n type=doc.get('type', 'Document')) for doc in doc_dict]\n return document_dict" }, { "identifier": "BaseLoader", "path": "pylmkit/perception/directory.py", "snippet": "FILE_LOADER_TYPE = Union[\n Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]\n]\ndef _is_visible(p: Path) -> bool:\n def __init__(\n self,\n path: str,\n glob: str = \"**/[!.]*\",\n suffixes: Optional[Sequence[str]] = None, # Modifying\n silent_errors: bool = False,\n load_hidden: bool = False,\n loader_cls=None,\n loader_kwargs: Union[dict, None] = None,\n recursive: bool = False,\n show_progress: bool = False,\n use_multithreading: bool = False,\n max_concurrency: int = 4,\n *,\n sample_size: int = 0,\n randomize_sample: bool = False,\n sample_seed: Union[int, None] = None,\n ):\n def load_file(\n self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any]\n ) -> None:\n def load(self) -> List[Document]:\nclass DirectoryLoader(BaseLoader):" }, { "identifier": "text_as_document", "path": "pylmkit/utils/data_utils.py", "snippet": "def text_as_document(texts, metadatas=None, types=\"Document\"):\n documents = []\n if metadatas:\n if isinstance(types, str):\n for i, text in enumerate(texts):\n documents.append(Document(page_content=text, metadata=metadatas[i], type=types))\n else: # types is `list` mode\n for i, text in enumerate(texts):\n documents.append(Document(page_content=text, metadata=metadatas[i], type=types[i]))\n else:\n if isinstance(types, str):\n for i, text in enumerate(texts):\n documents.append(Document(page_content=text, type=types))\n else: # types is `list` mode\n for i, text in enumerate(texts):\n documents.append(Document(page_content=text, type=types[i]))\n return documents" }, { "identifier": "RecursiveCharacterTextSplitter", "path": "pylmkit/perception/directory.py", "snippet": "FILE_LOADER_TYPE = Union[\n Type[UnstructuredFileLoader], Type[TextLoader], Type[BSHTMLLoader]\n]\ndef _is_visible(p: Path) -> bool:\n def __init__(\n self,\n path: str,\n glob: str = \"**/[!.]*\",\n suffixes: Optional[Sequence[str]] = None, # Modifying\n silent_errors: bool = False,\n load_hidden: bool = False,\n loader_cls=None,\n loader_kwargs: Union[dict, None] = None,\n recursive: bool = False,\n show_progress: bool = False,\n use_multithreading: bool = False,\n max_concurrency: int = 4,\n *,\n sample_size: int = 0,\n randomize_sample: bool = False,\n sample_seed: Union[int, None] = None,\n ):\n def load_file(\n self, item: Path, path: Path, docs: List[Document], pbar: Optional[Any]\n ) -> None:\n def load(self) -> List[Document]:\nclass DirectoryLoader(BaseLoader):" }, { "identifier": "init_css", "path": "pylmkit/core/html.py", "snippet": "" }, { "identifier": "_zh", "path": "pylmkit/core/html.py", "snippet": "" } ]
from abc import ABC from pathlib import Path from tqdm import tqdm from pydantic import Field, BaseModel from pylmkit.utils.data_utils import read_yaml, read_json, write_yaml, write_json from pylmkit.utils.data_utils import message_as_string, document_as_dict, dict_as_document from typing import Any, List, Optional, Type, Union, Sequence, Literal from pylmkit.perception.directory import BaseLoader from pylmkit.utils.data_utils import text_as_document from pylmkit.perception.directory import RecursiveCharacterTextSplitter from functools import partial from pylmkit.core.html import init_css, init_footer, init_logo from pylmkit.core.html import _zh, _en import time import pandas as pd import streamlit as st
3,904
self.page_icon = self.lang.get('_page_icon', None) if self.footer_describe is None: self.footer_describe = self.lang.get('_footer_describe', '') if self.sidebar_title is None: self.sidebar_title = self.lang.get('_sidebar_title', '') if self.sidebar_describe is None: self.sidebar_describe = self.lang.get('_sidebar_describe', '') if self.logo1 is None: self.logo1 = self.lang.get('_logo1', '') if self.logo2 is None: self.logo2 = self.lang.get('_logo2', '') if self.greetings is None: self.greetings = self.lang.get('_greetings', '') if self.placeholder is None: self.placeholder = self.lang.get('_placeholder', '') if self.refer_name is None: self.refer_name = self.lang.get('_refer_name', 'refer') self.base_page() if "messages" not in st.session_state: st.session_state["messages"] = [{"role": "assistant", "content": self.greetings}] self.input_kwargs = {} st.session_state["output_kwargs"] = {} for msg in st.session_state.messages: st.chat_message(msg["role"]).write(msg["content"]) # refer setting refer = msg.get("refer", False) if refer: with st.expander(label=self.refer_name, expanded=False): st.markdown(refer, unsafe_allow_html=True) def _input(self, content, role="user", avatar="😄"): st.chat_message(role, avatar=avatar).write(content, unsafe_allow_html=True) msg = {"role": role, "content": content} st.session_state.messages.append(msg) def _output(self, content, refer=None, role="assistant"): # st.chat_message(role).write(content, unsafe_allow_html=True) with st.chat_message(role): content_placeholder = st.empty() full_content = "" for chunk in content: full_content += chunk + "" time.sleep(0.01) content_placeholder.markdown(full_content + "▌") content_placeholder.markdown(full_content) if refer: # refer setting with st.expander(label=self.refer_name, expanded=False): st.markdown(refer, unsafe_allow_html=True) msg = {"role": role, "content": content, "refer": refer} st.session_state.messages.append(msg) def output_parse(self, output_param, output_result): refer = None if len(output_param) == 0: response = None elif len(output_param) == 1: response = output_result st.session_state["output_kwargs"][output_param[0]['name']] = response else: response = output_result[0] for i, arg in enumerate(output_param): st.session_state["output_kwargs"][arg['name']] = output_result[i] if arg['type'] == 'chat': response = output_result[i] if arg['type'] == 'refer': refer = output_result[i] return response, refer def run(self, obj, input_param: list, output_param: list): chat_variable = "" obj = self.wrapper(obj) for arg in input_param: if arg['type'] != 'chat': self.input_kwargs[arg['name']] = generate_input_widget(mode='sidebar', **arg) else: chat_variable = arg['name'] if chat_variable: if prompt := st.chat_input(placeholder=self.placeholder): self.input_kwargs[chat_variable] = prompt self._input(content=prompt) with st.spinner('PyLMKit: Generating, please wait...'): # 正在生成,请稍候... result = obj(**self.input_kwargs) response, refer = self.output_parse(output_param, result) self._output(content=response, refer=refer) else: with st.spinner('PyLMKit: Generating, please wait...'): # 正在生成,请稍候... result = obj(**self.input_kwargs) response, refer = self.output_parse(output_param, result) # self._output(content=response, refer=refer) with st.expander(label="output", expanded=True): st.json(st.session_state["output_kwargs"], expanded=True) def wrapper(self, fun): return partial(fun) def param(self, label, type, value, mode='sidebar'): input_kwargs = { "label": label, "type": type, "value": value } key = f"{label}-{type}-{str(value)}" if key not in st.session_state.keys(): st.session_state[key] = "" renew_value = generate_input_widget( mode=mode, **input_kwargs ) return renew_value def base_page(self): st.set_page_config( page_title=self.title, layout=self.layout, page_icon=self.page_icon, ) st.markdown(init_css, unsafe_allow_html=True) if self.footer_describe:
class BaseMemory(object): human_prefix: str = "Human" ai_prefix: str = "AI" system_prefix: str = "System" def __init__(self, init_memory=None, streamlit_web=False): self.memory_messages = [] self.streamlit_web = streamlit_web if self.streamlit_web: # streamlit rerun page, so need cache if "memory" not in st.session_state: st.session_state["memory"] = self.memory_messages if isinstance(init_memory, list): self.memory_messages = init_memory if self.streamlit_web: st.session_state['memory'] = self.memory_messages if self.streamlit_web: # streamlit rerun page, so need cache self.memory_messages = st.session_state['memory'] def add(self, role, content, refer=''): """ role,human ai system """ if role in ['user', 'User', 'USER', 'human', 'Human', 'HUMAN']: role = self.human_prefix elif role in ['ai', 'Ai', 'AI', 'assistant']: role = self.ai_prefix elif role in ['sys', 'system', 'System', 'SYS', 'SYSTEM']: role = self.system_prefix else: raise Exception(f"The role `{role}` does not exist") self.memory_messages.append( {"role": role, "content": content, "refer": refer, "date": time.strftime('%Y-%m-%d %H:%M:%S')}) if self.streamlit_web: # streamlit rerun page, so need cache st.session_state['memory'] = self.memory_messages def to_csv(self, filepath, index=False, **kwargs): data = self.memory_messages pd.DataFrame(data).to_csv(filepath, index=index, **kwargs) def clear(self): self.memory_messages = [] if self.streamlit_web: # streamlit rerun page, so need cache st.session_state['memory'] = self.memory_messages def _get(self, mode='message'): if mode == 'message': return self.memory_messages elif mode == 'string': return message_as_string(self.memory_messages) else: raise Exception(f"There is no such `{mode}` mode. Support modes: message, string") class BaseKnowledgeBase(object): def __init__(self, init_documents=None): self.documents = [] self.splitter_documents = [] if isinstance(init_documents, list): self.documents = init_documents @classmethod def load(cls, filepath, is_return=True, return_mode="doc", extend=True): if filepath.endswith('.json'): data = read_json(filepath) elif filepath.endswith('.yaml') or filepath.endswith('yml'): data = read_yaml(filepath) # data=[{},{}] else: raise Exception(f"The file type is not supported") data_dict_as_document = dict_as_document(data) result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return, extend=extend) if is_return: return result @classmethod def add(cls, texts, metadatas=None, is_return=True, return_mode="doc", extend=True, types="Document"): data_dict_as_document = text_as_document(texts=texts, metadatas=metadatas, types=types) result = cls()._base(documents=data_dict_as_document, return_mode=return_mode, is_return=is_return, extend=extend) if is_return: return result def split(self, splitter=None, chunk_size=500, chunk_overlap=100, return_mode='doc', **kwargs): if splitter is None: splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs) else: splitter = splitter self.splitter_documents = splitter.split_documents(self.documents) if return_mode == 'doc': return self.splitter_documents else: return document_as_dict(self.splitter_documents) def to_csv_loader(self, filepath, index=False, **kwargs): data = document_as_dict(self.documents) pd.DataFrame(data).to_csv(filepath, index=index, **kwargs) def to_csv_splitter(self, filepath, splitter=None, chunk_size=500, chunk_overlap=100, index=False, splitter_kwargs={}, csv_kwargs={} ): if not self.splitter_documents: self.splitter_documents = self.split(splitter=splitter, chunk_size=chunk_size, chunk_overlap=chunk_overlap, **splitter_kwargs) data = document_as_dict(self.splitter_documents) pd.DataFrame(data).to_csv(filepath, index=index, **csv_kwargs) def clear(self, mode='doc'): if mode == 'doc': self.documents = [] else: self.splitter_documents = [] def _base(self, documents, is_return=True, return_mode='doc', extend=True): if extend: self.documents.extend(documents) # # dict -> Document if is_return: if return_mode == 'doc': return self.documents else: return document_as_dict(self.documents) else: # self.documents = documents # when extend is False, just reset documents if is_return: if return_mode == 'doc': return documents else: return document_as_dict(documents) # def load_multi_memory(path: str, suffixes=None, show_progress: bool = True): # data = [] # if suffixes is None: # suffixes = [".yaml", '.json'] # if show_progress: # for suffixe in tqdm(suffixes): # for filepath in tqdm(list(Path(path).rglob(f"*{suffixe}"))): # try: # data += load_memory(filepath) # except Exception as e: # raise e # else: # for suffixe in suffixes: # for filepath in list(Path(path).rglob(f"*{suffixe}")): # try: # data += load_memory(filepath) # except Exception as e: # raise e # return data def input_widget(input1, input2, type, value): if type == "int": return st.number_input(format='%d', step=1, **input1) if type == "float": return st.number_input(format='%f', **input1) elif type in ['list', 'List', 'select']: return st.selectbox(options=value, **input2) elif type == "bool": if value in [True, 'True', 'true']: options = [True, False] else: options = [False, True] return st.radio(options=options, horizontal=True, **input2) elif type == "file": uploaded_file = st.file_uploader(**input2) if uploaded_file is not None: res = str(Path().cwd() / uploaded_file.name) with open(res, "wb") as f: f.write(uploaded_file.getbuffer()) else: res = None return res elif type in ['multiselect']: return st.multiselect(options=value, **input2) else: return st.text_input(**input1) def generate_input_widget(mode="main", **kwargs): # 在前端生成输入框 """ mode, default "main" ,other "sidebar" """ label = kwargs.get('label', "") value = kwargs.get('value', None) name = kwargs.get('name', None) _input1 = {"label": label, "value": value, "key": f"{name}-{label}"} _input2 = {"label": label, "key": f"{name}-{label}"} _type = kwargs.get('type', None) # int float bool string chat file if mode == 'main': return input_widget( input1=_input1, input2=_input2, type=_type, value=value ) else: with st.sidebar: return input_widget( input1=_input1, input2=_input2, type=_type, value=value ) class BaseWebUI(object): def __init__(self, title=None, page_icon=None, layout="centered", language='en', sidebar_title=None, sidebar_describe=None, footer_describe=None, logo1=None, logo2=None, greetings=None, placeholder=None, refer_name=None, ): self.title = title self.layout = layout self.page_icon = page_icon self.footer_describe = footer_describe self.sidebar_title = sidebar_title self.sidebar_describe = sidebar_describe self.logo1 = logo1 self.logo2 = logo2 self.greetings = greetings self.placeholder = placeholder self.refer_name = refer_name if language in ['zh', '中国', 'china']: self.lang = _zh else: self.lang = _en if self.title is None: self.title = self.lang.get('_title', '') if self.page_icon is None: self.page_icon = self.lang.get('_page_icon', None) if self.footer_describe is None: self.footer_describe = self.lang.get('_footer_describe', '') if self.sidebar_title is None: self.sidebar_title = self.lang.get('_sidebar_title', '') if self.sidebar_describe is None: self.sidebar_describe = self.lang.get('_sidebar_describe', '') if self.logo1 is None: self.logo1 = self.lang.get('_logo1', '') if self.logo2 is None: self.logo2 = self.lang.get('_logo2', '') if self.greetings is None: self.greetings = self.lang.get('_greetings', '') if self.placeholder is None: self.placeholder = self.lang.get('_placeholder', '') if self.refer_name is None: self.refer_name = self.lang.get('_refer_name', 'refer') self.base_page() if "messages" not in st.session_state: st.session_state["messages"] = [{"role": "assistant", "content": self.greetings}] self.input_kwargs = {} st.session_state["output_kwargs"] = {} for msg in st.session_state.messages: st.chat_message(msg["role"]).write(msg["content"]) # refer setting refer = msg.get("refer", False) if refer: with st.expander(label=self.refer_name, expanded=False): st.markdown(refer, unsafe_allow_html=True) def _input(self, content, role="user", avatar="😄"): st.chat_message(role, avatar=avatar).write(content, unsafe_allow_html=True) msg = {"role": role, "content": content} st.session_state.messages.append(msg) def _output(self, content, refer=None, role="assistant"): # st.chat_message(role).write(content, unsafe_allow_html=True) with st.chat_message(role): content_placeholder = st.empty() full_content = "" for chunk in content: full_content += chunk + "" time.sleep(0.01) content_placeholder.markdown(full_content + "▌") content_placeholder.markdown(full_content) if refer: # refer setting with st.expander(label=self.refer_name, expanded=False): st.markdown(refer, unsafe_allow_html=True) msg = {"role": role, "content": content, "refer": refer} st.session_state.messages.append(msg) def output_parse(self, output_param, output_result): refer = None if len(output_param) == 0: response = None elif len(output_param) == 1: response = output_result st.session_state["output_kwargs"][output_param[0]['name']] = response else: response = output_result[0] for i, arg in enumerate(output_param): st.session_state["output_kwargs"][arg['name']] = output_result[i] if arg['type'] == 'chat': response = output_result[i] if arg['type'] == 'refer': refer = output_result[i] return response, refer def run(self, obj, input_param: list, output_param: list): chat_variable = "" obj = self.wrapper(obj) for arg in input_param: if arg['type'] != 'chat': self.input_kwargs[arg['name']] = generate_input_widget(mode='sidebar', **arg) else: chat_variable = arg['name'] if chat_variable: if prompt := st.chat_input(placeholder=self.placeholder): self.input_kwargs[chat_variable] = prompt self._input(content=prompt) with st.spinner('PyLMKit: Generating, please wait...'): # 正在生成,请稍候... result = obj(**self.input_kwargs) response, refer = self.output_parse(output_param, result) self._output(content=response, refer=refer) else: with st.spinner('PyLMKit: Generating, please wait...'): # 正在生成,请稍候... result = obj(**self.input_kwargs) response, refer = self.output_parse(output_param, result) # self._output(content=response, refer=refer) with st.expander(label="output", expanded=True): st.json(st.session_state["output_kwargs"], expanded=True) def wrapper(self, fun): return partial(fun) def param(self, label, type, value, mode='sidebar'): input_kwargs = { "label": label, "type": type, "value": value } key = f"{label}-{type}-{str(value)}" if key not in st.session_state.keys(): st.session_state[key] = "" renew_value = generate_input_widget( mode=mode, **input_kwargs ) return renew_value def base_page(self): st.set_page_config( page_title=self.title, layout=self.layout, page_icon=self.page_icon, ) st.markdown(init_css, unsafe_allow_html=True) if self.footer_describe:
st.sidebar.markdown(init_footer.format(self.footer_describe), unsafe_allow_html=True)
10
2023-11-18 10:31:58+00:00
8k
ej0cl6/TextEE
TextEE/models/UniST/EDtrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, config):\n for instance in instances:\n instance[\"extra_info\"] = None\n return instances\n \n def load_model(self, checkpoint=None):\n pass\n \n def train(self, train_data, dev_data, **kwargs):\n pass\n \n def predict(self, data, **kwargs):\n pass" }, { "identifier": "UniSTModel", "path": "TextEE/models/UniST/EDmodel.py", "snippet": "class UniSTModel(nn.Module):\n def __init__(self, config, tokenizer, type_set):\n super().__init__()\n self.config = config\n self.tokenizer = tokenizer\n self.type_set = type_set\n self.label_list = [x for x in sorted(self.type_set[\"trigger\"])]\n self.label_strings = [self.preprocess_label(x) for x in self.label_list]\n \n # base encoder\n if self.config.pretrained_model_name.startswith('bert-'):\n self.tokenizer.bos_token = self.tokenizer.cls_token\n self.tokenizer.eos_token = self.tokenizer.sep_token\n self.base_config = BertConfig.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir)\n self.base_model = BertModel.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir, \n output_hidden_states=True)\n elif self.config.pretrained_model_name.startswith('roberta-'):\n self.base_config = RobertaConfig.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir)\n self.base_model = RobertaModel.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir, \n output_hidden_states=True)\n elif self.config.pretrained_model_name.startswith('xlm-'):\n self.base_config = XLMRobertaConfig.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir)\n self.base_model = XLMRobertaModel.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir, \n output_hidden_states=True)\n else:\n raise ValueError(f\"pretrained_model_name is not supported.\")\n \n self.base_model.resize_token_embeddings(len(self.tokenizer))\n \n def preprocess_label(self, label):\n label = label.lower()\n label = label.replace(\"_\", \" \")\n label = label.replace(\"-\", \" \")\n label = label.replace(\":\", \" \")\n return label\n \n def process_data(self, batch):\n \n sent_texts = []\n pos_texts = []\n neg_texts = []\n for tokens, triggers in zip(batch.batch_tokens, batch.batch_triggers):\n sample_idxs = np.arange(len(triggers))\n np.random.shuffle(sample_idxs)\n triggers_ = [triggers[i] for i in sample_idxs[:self.config.max_sample_trigger]]\n for trigger in triggers_:\n text = tokens[:trigger[0]] + [\"<t>\"] + tokens[trigger[0]:trigger[1]]+ [\"</t>\"] + tokens[trigger[1]:]\n text = \" \".join(text)\n sent_texts.append(text)\n \n pos_texts.append(self.preprocess_label(trigger[2]))\n \n neg_text = trigger[2]\n while neg_text == trigger[2]:\n neg_text = np.random.choice(list(self.type_set[\"trigger\"]))\n \n neg_texts.append(self.preprocess_label(neg_text))\n \n if len(sent_texts) == 0:\n return None, None, None, None, None, None\n \n sent_res = self.tokenizer(sent_texts, padding=True, return_tensors=\"pt\")\n sent_input_ids = sent_res[\"input_ids\"].cuda()\n sent_attention_mask = sent_res[\"attention_mask\"].cuda()\n \n pos_res = self.tokenizer(pos_texts, padding=True, return_tensors=\"pt\")\n pos_input_ids = pos_res[\"input_ids\"].cuda()\n pos_attention_mask = pos_res[\"attention_mask\"].cuda()\n\n neg_res = self.tokenizer(neg_texts, padding=True, return_tensors=\"pt\")\n neg_input_ids = neg_res[\"input_ids\"].cuda()\n neg_attention_mask = neg_res[\"attention_mask\"].cuda()\n \n return sent_input_ids, sent_attention_mask, pos_input_ids, pos_attention_mask, neg_input_ids, neg_attention_mask\n \n def embed(self, input_ids, attention_mask=None): \n outputs = self.base_model(input_ids, attention_mask=attention_mask, return_dict=True)\n embeddings = outputs[\"pooler_output\"]\n return embeddings\n \n def dist_fn(self, sent_embeddings, label_embeddings):\n return 1.0 - F.cosine_similarity(sent_embeddings, label_embeddings)\n \n def forward(self, batch):\n # process data\n sent_input_ids, sent_attention_mask, pos_input_ids, pos_attention_mask, neg_input_ids, neg_attention_mask = self.process_data(batch)\n if sent_input_ids is None:\n return None\n \n sent_embeddings = self.embed(sent_input_ids, sent_attention_mask)\n pos_embeddings = self.embed(pos_input_ids, pos_attention_mask)\n neg_embeddings = self.embed(neg_input_ids, neg_attention_mask)\n \n loss_fn = nn.TripletMarginWithDistanceLoss(distance_function=self.dist_fn, margin=self.config.margin)\n loss = loss_fn(sent_embeddings, pos_embeddings, neg_embeddings)\n\n return loss\n \n def predict(self, batch, batch_pred_spans):\n self.eval()\n with torch.no_grad():\n sent_texts = []\n for pred_spans, tokens in zip(batch_pred_spans, batch.batch_tokens):\n for pred_span in pred_spans:\n sent_text = tokens[:pred_span[0]] + [\"<t>\"] + tokens[pred_span[0]:pred_span[1]]+ [\"</t>\"] + tokens[pred_span[1]:]\n sent_text = \" \".join(sent_text)\n sent_texts.append(sent_text)\n n_sent_text = len(sent_texts)\n if n_sent_text == 0:\n return [[] for _ in range(len(batch_pred_spans))]\n \n sent_res = self.tokenizer(sent_texts, padding=True, return_tensors=\"pt\")\n sent_input_ids = sent_res[\"input_ids\"].cuda()\n sent_attention_mask = sent_res[\"attention_mask\"].cuda()\n sent_embeddings = self.embed(sent_input_ids, sent_attention_mask)\n \n label_res = self.tokenizer(self.label_strings, padding=True, return_tensors=\"pt\")\n label_input_ids = label_res[\"input_ids\"].cuda()\n label_attention_mask = label_res[\"attention_mask\"].cuda()\n label_embeddings = self.embed(label_input_ids, label_attention_mask)\n \n dists = np.zeros((n_sent_text, len(self.label_list)))\n \n for i in range(len(sent_embeddings)):\n sent_embedding = sent_embeddings[i].expand(label_embeddings.shape)\n dist = self.dist_fn(sent_embedding, label_embeddings).detach().cpu().numpy()\n dists[i] = dist\n \n preds = dists.argmin(axis=1)\n batch_pred_triggers = []\n idx = 0\n for pred_spans in batch_pred_spans:\n pred_triggers = []\n for pred_span in pred_spans:\n pred_trigger = (pred_span[0], pred_span[1], self.label_list[preds[idx]])\n pred_triggers.append(pred_trigger)\n idx += 1\n batch_pred_triggers.append(pred_triggers)\n \n self.train()\n \n return batch_pred_triggers" }, { "identifier": "SpanModel", "path": "TextEE/models/UniST/EDmodel.py", "snippet": "class SpanModel(nn.Module):\n def __init__(self, config, tokenizer):\n super().__init__()\n self.config = config\n self.tokenizer = tokenizer\n self.generate_tagging_vocab()\n \n # base encoder\n if self.config.pretrained_model_name.startswith('bert-'):\n self.tokenizer.bos_token = self.tokenizer.cls_token\n self.tokenizer.eos_token = self.tokenizer.sep_token\n self.base_config = BertConfig.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir)\n self.base_model = BertModel.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir, \n output_hidden_states=True)\n elif self.config.pretrained_model_name.startswith('roberta-'):\n self.base_config = RobertaConfig.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir)\n self.base_model = RobertaModel.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir, \n output_hidden_states=True)\n elif self.config.pretrained_model_name.startswith('xlm-'):\n self.base_config = XLMRobertaConfig.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir)\n self.base_model = XLMRobertaModel.from_pretrained(self.config.pretrained_model_name, \n cache_dir=self.config.cache_dir, \n output_hidden_states=True)\n else:\n raise ValueError(f\"pretrained_model_name is not supported.\")\n \n self.base_model.resize_token_embeddings(len(self.tokenizer))\n self.base_model_dim = self.base_config.hidden_size\n self.base_model_dropout = nn.Dropout(p=self.config.base_model_dropout)\n \n # local classifiers\n self.dropout = nn.Dropout(p=self.config.linear_dropout)\n feature_dim = self.base_model_dim\n\n self.span_label_ffn = Linears([feature_dim, self.config.linear_hidden_num, len(self.label_stoi)],\n dropout_prob=self.config.linear_dropout, \n bias=self.config.linear_bias, \n activation=self.config.linear_activation)\n if self.config.use_crf:\n self.span_crf = CRF(self.label_stoi, bioes=False)\n \n def generate_tagging_vocab(self):\n prefix = ['B', 'I']\n span_label_stoi = {'O': 0}\n for t in [\"Span\"]:\n for p in prefix:\n span_label_stoi['{}-{}'.format(p, t)] = len(span_label_stoi)\n\n self.label_stoi = span_label_stoi\n self.type_stoi = {t: i for i, t in enumerate([\"Span\"])}\n \n def get_span_seqlabels(self, spans, token_num, specify_span=None):\n labels = ['O'] * token_num\n count = 0\n for span in spans:\n start, end = span[0], span[1]\n if end > token_num:\n continue\n span_type = span[2]\n\n if specify_span is not None:\n if span_type != specify_span:\n continue\n\n if any([labels[i] != 'O' for i in range(start, end)]):\n count += 1\n continue\n\n labels[start] = 'B-{}'.format(span_type)\n for i in range(start + 1, end):\n labels[i] = 'I-{}'.format(span_type)\n \n return labels\n \n def token_lens_to_offsets(self, token_lens):\n \"\"\"Map token lengths to first word piece indices, used by the sentence\n encoder.\n :param token_lens (list): token lengths (word piece numbers)\n :return (list): first word piece indices (offsets)\n \"\"\"\n max_token_num = max([len(x) for x in token_lens])\n offsets = []\n for seq_token_lens in token_lens:\n seq_offsets = [0]\n for l in seq_token_lens[:-1]:\n seq_offsets.append(seq_offsets[-1] + l)\n offsets.append(seq_offsets + [-1] * (max_token_num - len(seq_offsets)))\n return offsets\n \n def token_lens_to_idxs(self, token_lens):\n \"\"\"Map token lengths to a word piece index matrix (for torch.gather) and a\n mask tensor.\n For example (only show a sequence instead of a batch):\n token lengths: [1,1,1,3,1]\n =>\n indices: [[0,0,0], [1,0,0], [2,0,0], [3,4,5], [6,0,0]]\n masks: [[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0],\n [0.33, 0.33, 0.33], [1.0, 0.0, 0.0]]\n Next, we use torch.gather() to select vectors of word pieces for each token,\n and average them as follows (incomplete code):\n outputs = torch.gather(bert_outputs, 1, indices) * masks\n outputs = bert_outputs.view(batch_size, seq_len, -1, self.bert_dim)\n outputs = bert_outputs.sum(2)\n :param token_lens (list): token lengths.\n :return: a index matrix and a mask tensor.\n \"\"\"\n max_token_num = max([len(x) for x in token_lens])\n max_token_len = max([max(x) for x in token_lens])\n idxs, masks = [], []\n for seq_token_lens in token_lens:\n seq_idxs, seq_masks = [], []\n offset = 0\n for token_len in seq_token_lens:\n seq_idxs.extend([i + offset for i in range(token_len)]\n + [-1] * (max_token_len - token_len))\n seq_masks.extend([1.0 / token_len] * token_len\n + [0.0] * (max_token_len - token_len))\n offset += token_len\n seq_idxs.extend([-1] * max_token_len * (max_token_num - len(seq_token_lens)))\n seq_masks.extend([0.0] * max_token_len * (max_token_num - len(seq_token_lens)))\n idxs.append(seq_idxs)\n masks.append(seq_masks)\n return idxs, masks, max_token_num, max_token_len\n \n def tag_paths_to_spans(self, paths, token_nums, vocab):\n \"\"\"\n Convert predicted tag paths to a list of spans (entity mentions or event\n triggers).\n :param paths: predicted tag paths.\n :return (list): a list (batch) of lists (sequence) of spans.\n \"\"\"\n batch_mentions = []\n itos = {i: s for s, i in vocab.items()}\n for i, path in enumerate(paths):\n mentions = []\n cur_mention = None\n path = path.tolist()[:token_nums[i].item()]\n for j, tag in enumerate(path):\n tag = itos[tag]\n if tag == 'O':\n prefix = tag = 'O'\n else:\n prefix, tag = tag.split('-', 1)\n if prefix == 'B':\n if cur_mention:\n mentions.append(cur_mention)\n cur_mention = [j, j + 1, tag]\n elif prefix == 'I':\n if cur_mention is None:\n # treat it as B-*\n cur_mention = [j, j + 1, tag]\n elif cur_mention[-1] == tag:\n cur_mention[1] = j + 1\n else:\n # treat it as B-*\n mentions.append(cur_mention)\n cur_mention = [j, j + 1, tag]\n else:\n if cur_mention:\n mentions.append(cur_mention)\n cur_mention = None\n if cur_mention:\n mentions.append(cur_mention)\n batch_mentions.append(mentions)\n \n return batch_mentions\n \n def process_data(self, batch):\n enc_idxs = []\n enc_attn = []\n span_seqidxs = []\n token_lens = []\n token_nums = []\n max_token_num = max(batch.batch_token_num)\n \n for tokens, pieces, spans, token_len, token_num in zip(batch.batch_tokens, batch.batch_pieces, batch.batch_spans, \n batch.batch_token_lens, batch.batch_token_num):\n \n piece_id = self.tokenizer.convert_tokens_to_ids(pieces)\n enc_idx = [self.tokenizer.convert_tokens_to_ids(self.tokenizer.bos_token)] + piece_id + [self.tokenizer.convert_tokens_to_ids(self.tokenizer.eos_token)]\n \n enc_idxs.append(enc_idx)\n enc_attn.append([1]*len(enc_idx)) \n \n span_seq = self.get_span_seqlabels(spans, len(tokens))\n token_lens.append(token_len)\n token_nums.append(token_num)\n if self.config.use_crf:\n span_seqidxs.append([self.label_stoi[s] for s in span_seq] + [0] * (max_token_num-len(tokens)))\n else:\n span_seqidxs.append([self.label_stoi[s] for s in span_seq] + [-100] * (max_token_num-len(tokens)))\n max_len = max([len(enc_idx) for enc_idx in enc_idxs])\n enc_idxs = torch.LongTensor([enc_idx + [self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)]*(max_len-len(enc_idx)) for enc_idx in enc_idxs])\n enc_attn = torch.LongTensor([enc_att + [0]*(max_len-len(enc_att)) for enc_att in enc_attn])\n enc_idxs = enc_idxs.cuda()\n enc_attn = enc_attn.cuda()\n span_seqidxs = torch.cuda.LongTensor(span_seqidxs)\n return enc_idxs, enc_attn, span_seqidxs, token_lens, torch.cuda.LongTensor(token_nums)\n \n def encode(self, piece_idxs, attention_masks, token_lens):\n \"\"\"Encode input sequences with BERT\n :param piece_idxs (LongTensor): word pieces indices\n :param attention_masks (FloatTensor): attention mask\n :param token_lens (list): token lengths\n \"\"\"\n batch_size, _ = piece_idxs.size()\n all_base_model_outputs = self.base_model(piece_idxs, attention_mask=attention_masks)\n base_model_outputs = all_base_model_outputs[0]\n if self.config.multi_piece_strategy == 'first':\n # select the first piece for multi-piece words\n offsets = token_lens_to_offsets(token_lens)\n offsets = piece_idxs.new(offsets) # batch x max_token_num\n # + 1 because the first vector is for [CLS]\n offsets = offsets.unsqueeze(-1).expand(batch_size, -1, self.bert_dim) + 1\n base_model_outputs = torch.gather(base_model_outputs, 1, offsets)\n elif self.config.multi_piece_strategy == 'average':\n # average all pieces for multi-piece words\n idxs, masks, token_num, token_len = self.token_lens_to_idxs(token_lens)\n idxs = piece_idxs.new(idxs).unsqueeze(-1).expand(batch_size, -1, self.base_model_dim) + 1\n masks = base_model_outputs.new(masks).unsqueeze(-1)\n base_model_outputs = torch.gather(base_model_outputs, 1, idxs) * masks\n base_model_outputs = base_model_outputs.view(batch_size, token_num, token_len, self.base_model_dim)\n base_model_outputs = base_model_outputs.sum(2)\n else:\n raise ValueError(f'Unknown multi-piece token handling strategy: {self.config.multi_piece_strategy}')\n base_model_outputs = self.base_model_dropout(base_model_outputs)\n return base_model_outputs\n\n def span_id(self, base_model_outputs, token_nums, target=None, predict=False):\n loss = 0.0\n entities = None\n entity_label_scores = self.span_label_ffn(base_model_outputs)\n if self.config.use_crf:\n entity_label_scores_ = self.span_crf.pad_logits(entity_label_scores)\n if predict:\n _, entity_label_preds = self.span_crf.viterbi_decode(entity_label_scores_,\n token_nums)\n entities = self.tag_paths_to_spans(entity_label_preds, \n token_nums, \n self.label_stoi)\n else: \n entity_label_loglik = self.span_crf.loglik(entity_label_scores_, \n target, \n token_nums)\n loss -= entity_label_loglik.mean()\n else:\n if predict:\n entity_label_preds = torch.argmax(entity_label_scores, dim=-1)\n entities = tag_paths_to_spans(entity_label_preds, \n token_nums, \n self.label_stoi)\n else:\n loss = F.cross_entropy(entity_label_scores.view(-1, self.span_label_num), target.view(-1))\n\n return loss, entities\n\n def forward(self, batch):\n # process data\n enc_idxs, enc_attn, span_seqidxs, token_lens, token_nums = self.process_data(batch)\n \n # encoding\n base_model_outputs = self.encode(enc_idxs, enc_attn, token_lens)\n span_id_loss, _ = self.span_id(base_model_outputs, token_nums, span_seqidxs, predict=False)\n loss = span_id_loss\n\n return loss\n \n def predict(self, batch):\n self.eval()\n with torch.no_grad():\n # process data\n enc_idxs, enc_attn, _, token_lens, token_nums = self.process_data(batch)\n \n # encoding\n base_model_outputs = self.encode(enc_idxs, enc_attn, token_lens)\n _, spans = self.span_id(base_model_outputs, token_nums, predict=True)\n self.train()\n return spans" } ]
import os, sys, logging, tqdm, pprint import torch import numpy as np import ipdb from collections import namedtuple from transformers import RobertaTokenizer, AutoTokenizer, get_linear_schedule_with_warmup from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .EDmodel import UniSTModel, SpanModel from scorer import compute_ED_scores, print_scores
5,341
logger = logging.getLogger(__name__) EDBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_pieces', 'batch_token_lens', 'batch_token_num', 'batch_text', 'batch_triggers', 'batch_spans'] EDBatch = namedtuple('EDBatch', field_names=EDBatch_fields, defaults=[None] * len(EDBatch_fields)) def ED_collate_fn(batch): return EDBatch( batch_doc_id=[instance["doc_id"] for instance in batch], batch_wnd_id=[instance["wnd_id"] for instance in batch], batch_tokens=[instance["tokens"] for instance in batch], batch_pieces=[instance["pieces"] for instance in batch], batch_token_lens=[instance["token_lens"] for instance in batch], batch_token_num=[instance["token_num"] for instance in batch], batch_text=[instance["text"] for instance in batch], batch_triggers=[instance["triggers"] for instance in batch], batch_spans=[instance["spans"] for instance in batch], )
logger = logging.getLogger(__name__) EDBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_pieces', 'batch_token_lens', 'batch_token_num', 'batch_text', 'batch_triggers', 'batch_spans'] EDBatch = namedtuple('EDBatch', field_names=EDBatch_fields, defaults=[None] * len(EDBatch_fields)) def ED_collate_fn(batch): return EDBatch( batch_doc_id=[instance["doc_id"] for instance in batch], batch_wnd_id=[instance["wnd_id"] for instance in batch], batch_tokens=[instance["tokens"] for instance in batch], batch_pieces=[instance["pieces"] for instance in batch], batch_token_lens=[instance["token_lens"] for instance in batch], batch_token_num=[instance["token_num"] for instance in batch], batch_text=[instance["text"] for instance in batch], batch_triggers=[instance["triggers"] for instance in batch], batch_spans=[instance["spans"] for instance in batch], )
class UniSTEDTrainer(BasicTrainer):
0
2023-11-15 21:32:56+00:00
8k
isce-framework/snaphu-py
src/snaphu/_unwrap.py
[ { "identifier": "run_snaphu", "path": "src/snaphu/_snaphu.py", "snippet": "def run_snaphu(config_file: str | os.PathLike[str]) -> None:\n \"\"\"\n Run SNAPHU with the specified config file.\n\n Parameters\n ----------\n config_file : path-like\n The file path of a text file storing configuration parameters to pass to SNAPHU.\n \"\"\"\n if not Path(config_file).is_file():\n errmsg = f\"config file not found: {config_file}\"\n raise FileNotFoundError(errmsg)\n\n with get_snaphu_executable() as snaphu:\n args = [os.fspath(snaphu), \"-f\", os.fspath(config_file)]\n try:\n subprocess.run(args, stderr=subprocess.PIPE, check=True, text=True)\n except subprocess.CalledProcessError as e:\n errmsg = e.stderr.strip()\n raise RuntimeError(errmsg) from e" }, { "identifier": "BlockIterator", "path": "src/snaphu/_util.py", "snippet": "class BlockIterator(Iterable[tuple[slice, ...]]):\n \"\"\"\n An iterable over chunks of an N-dimensional array.\n\n `BlockIterator` represents a partitioning of a multidimensional array into\n regularly-sized non-overlapping blocks. Each block is represented by an index\n expression (i.e. a tuple of `slice` objects) that can be used to access the\n corresponding block of data from the partitioned array. The full set of blocks spans\n the entire array.\n\n Iterating over a `BlockIterator` object yields each block in unspecified order.\n \"\"\"\n\n shape: tuple[int, ...]\n \"\"\"tuple of int : The shape of the array to be partitioned into blocks.\"\"\"\n chunks: tuple[int, ...]\n \"\"\"\n tuple of int : The shape of a typical block. The last block along each axis may be\n smaller.\n \"\"\"\n\n def __init__(self, shape: int | Iterable[int], chunks: int | Iterable[int]):\n \"\"\"\n Construct a new `BlockIterator` object.\n\n Parameters\n ----------\n shape : int or iterable of int\n The shape of the array to be partitioned into blocks. Each dimension must be\n > 0.\n chunks : int or iterable of int\n The shape of a typical block. Must be the same length as `shape`. Each chunk\n dimension must be > 0.\n \"\"\"\n # Normalize `shape` and `chunks` into tuples of ints.\n shape = as_tuple_of_int(shape)\n chunks = as_tuple_of_int(chunks)\n\n if len(chunks) != len(shape):\n errmsg = (\n \"size mismatch: shape and chunks must have the same number of elements,\"\n f\" instead got len(shape) != len(chunks) ({len(shape)} !=\"\n f\" {len(chunks)})\"\n )\n raise ValueError(errmsg)\n\n if not all(n > 0 for n in shape):\n errmsg = f\"shape elements must all be > 0, instead got {shape}\"\n raise ValueError(errmsg)\n if any(n <= 0 for n in chunks):\n errmsg = f\"chunk elements must all be > 0, instead got {chunks}\"\n raise ValueError(errmsg)\n\n # XXX Workaround for `frozen=True`.\n object.__setattr__(self, \"shape\", shape)\n object.__setattr__(self, \"chunks\", chunks)\n\n def __iter__(self) -> Iterator[tuple[slice, ...]]:\n \"\"\"\n Iterate over blocks in unspecified order.\n\n Yields\n ------\n block : tuple of slice\n A tuple of slices that can be used to access the corresponding block of data\n from an array.\n \"\"\"\n # Number of blocks along each array axis.\n nblocks = ceil_divide(self.shape, self.chunks)\n\n # Iterate over blocks.\n for block_ind in itertools.product(*[range(n) for n in nblocks]):\n # Get the lower & upper index bounds for the current block.\n start = np.multiply(block_ind, self.chunks)\n stop = np.minimum(start + self.chunks, self.shape)\n\n # Yield a tuple of slice objects.\n yield tuple(itertools.starmap(slice, zip(start, stop)))" }, { "identifier": "scratch_directory", "path": "src/snaphu/_util.py", "snippet": "@contextmanager\ndef scratch_directory(\n dir_: str | os.PathLike[str] | None = None, *, delete: bool = True\n) -> Generator[Path, None, None]:\n \"\"\"\n Context manager that creates a (possibly temporary) file system directory.\n\n If `dir_` is a path-like object, a directory will be created at the specified\n file system path if it did not already exist. Otherwise, if `dir_` is None, a\n temporary directory will instead be created as though by ``tempfile.mkdtemp()``.\n\n The directory may be automatically removed from the file system upon exiting the\n context manager.\n\n Parameters\n ----------\n dir_ : path-like or None, optional\n Scratch directory path. If None, a temporary directory will be created. Defaults\n to None.\n delete : bool, optional\n If True, the directory and its contents are recursively removed from the\n file system upon exiting the context manager. Defaults to True.\n\n Yields\n ------\n pathlib.Path\n Scratch directory path. If `delete` was True, the directory will be removed from\n the file system upon exiting the context manager scope.\n \"\"\"\n if dir_ is None:\n scratchdir = Path(mkdtemp())\n else:\n scratchdir = Path(dir_)\n scratchdir.mkdir(parents=True, exist_ok=True)\n\n yield scratchdir\n\n if delete:\n shutil.rmtree(scratchdir)" }, { "identifier": "InputDataset", "path": "src/snaphu/io/_dataset.py", "snippet": "class InputDataset(Protocol):\n \"\"\"\n An array-like interface for reading input datasets.\n\n `InputDataset` defines the abstract interface that types must conform to in order\n to be valid inputs to the ``snaphu.unwrap()`` function. Such objects must export\n NumPy-like `dtype`, `shape`, and `ndim` attributes and must support NumPy-style\n slice-based indexing.\n\n See Also\n --------\n OutputDataset\n \"\"\"\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"numpy.dtype : Data-type of the array's elements.\"\"\"\n\n @property\n def shape(self) -> tuple[int, ...]:\n \"\"\"tuple of int : Tuple of array dimensions.\"\"\" # noqa: D403\n\n @property\n def ndim(self) -> int:\n \"\"\"int : Number of array dimensions.\"\"\" # noqa: D403\n\n def __getitem__(self, key: slice | tuple[slice, ...], /) -> ArrayLike:\n \"\"\"Read a block of data.\"\"\"" }, { "identifier": "OutputDataset", "path": "src/snaphu/io/_dataset.py", "snippet": "class OutputDataset(Protocol):\n \"\"\"\n An array-like interface for writing output datasets.\n\n `OutputDataset` defines the abstract interface that types must conform to in order\n to be valid outputs of the ``snaphu.unwrap()`` function. Such objects must export\n NumPy-like `dtype`, `shape`, and `ndim` attributes and must support NumPy-style\n slice-based indexing.\n\n See Also\n --------\n InputDataset\n \"\"\"\n\n @property\n def dtype(self) -> np.dtype:\n \"\"\"numpy.dtype : Data-type of the array's elements.\"\"\"\n\n @property\n def shape(self) -> tuple[int, ...]:\n \"\"\"tuple of int : Tuple of array dimensions.\"\"\" # noqa: D403\n\n @property\n def ndim(self) -> int:\n \"\"\"int : Number of array dimensions.\"\"\" # noqa: D403\n\n def __setitem__(self, key: slice | tuple[slice, ...], value: np.ndarray, /) -> None:\n \"\"\"Write a block of data.\"\"\"" } ]
import io import os import textwrap import numpy as np from dataclasses import dataclass from pathlib import Path from tempfile import mkstemp from typing import cast, overload from ._snaphu import run_snaphu from ._util import BlockIterator, scratch_directory from .io import InputDataset, OutputDataset
4,853
belonging to any component are assigned a label of zero. Parameters ---------- igram : snaphu.io.InputDataset The input interferogram. A 2-D complex-valued array. Not a Number (NaN) values in the array will be replaced with zeros. corr : snaphu.io.InputDataset The sample coherence magnitude. Must be a floating-point array with the same dimensions as the input interferogram. Valid coherence values are in the range [0, 1]. NaN values in the array will be replaced with zeros. nlooks : float The equivalent number of independent looks used to form the sample coherence. An estimate of the number of statistically independent samples averaged in the multilooked data, taking into account spatial correlation due to oversampling/filtering (see `Notes`_). cost : {'defo', 'smooth'}, optional Statistical cost mode. Defaults to 'smooth'. init : {'mst', 'mcf'}, optional Algorithm used for initialization of unwrapped phase gradients. Supported algorithms include Minimum Spanning Tree ('mst') and Minimum Cost Flow ('mcf'). Defaults to 'mcf'. mask : snaphu.io.InputDataset or None, optional Binary mask of valid pixels. Zeros in this raster indicate interferogram pixels that should be masked out. If provided, it must have the same dimensions as the input interferogram and boolean or 8-bit integer datatype. Defaults to None. ntiles : (int, int), optional Number of tiles along the row/column directions. If `ntiles` is (1, 1), then the interferogram will be unwrapped as a single tile. Increasing the number of tiles may improve runtime and reduce peak memory utilization, but may also introduce tile boundary artifacts in the unwrapped result. Defaults to (1, 1). tile_overlap : int or (int, int), optional Overlap, in pixels, between neighboring tiles. Increasing overlap may help to avoid phase discontinuities between tiles. If `tile_overlap` is a scalar integer, the number of overlapping rows and columns will be the same. Defaults to 0. nproc : int, optional Maximum number of child processes to spawn for parallel tile unwrapping. If `nproc` is less than 1, use all available processors. Defaults to 1. scratchdir : path-like or None, optional Scratch directory where intermediate processing artifacts are written. If the specified directory does not exist, it will be created. If None, a temporary directory will be created as though by ``tempfile.TemporaryDirectory()``. Defaults to None. delete_scratch : bool, optional If True, the scratch directory will be automatically removed from the file system when the function exits. Otherwise, the scratch directory will be preserved. Defaults to True. unw : snaphu.io.OutputDataset or None, optional An optional output dataset to store the unwrapped phase, in radians. If provided, it must have the same dimensions as the input interferogram and floating-point datatype. If None, an output array will be allocated internally. Defaults to None. conncomp : snaphu.io.OutputDataset or None, optional An optional output dataset to store the connected component labels. If provided, it must have the same dimensions as the input interferogram and any integer datatype. If None, an output array will be allocated internally. Defaults to None. Returns ------- unw : snaphu.io.OutputDataset The output unwrapped phase, in radians. conncomp : snaphu.io.OutputDataset The output connected component labels. Notes ----- An estimate of the equivalent number of independent looks may be obtained by .. math:: n_e = k_r k_a \frac{d_r d_a}{\rho_r \rho_a} where :math:`k_r` and :math:`k_a` are the number of looks in range and azimuth, :math:`d_r` and :math:`d_a` are the (single-look) sample spacing in range and azimuth, and :math:`\rho_r` and :math:`\rho_a are the range and azimuth resolution. References ---------- .. [1] C. W. Chen and H. A. Zebker, "Two-dimensional phase unwrapping with use of statistical models for cost functions in nonlinear optimization," Journal of the Optical Society of America A, vol. 18, pp. 338-351 (2001). .. [2] C. W. Chen and H. A. Zebker, "Phase unwrapping for large SAR interferograms: Statistical segmentation and generalized network models," IEEE Transactions on Geoscience and Remote Sensing, vol. 40, pp. 1709-1719 (2002). """ # If the `unw` and/or `conncomp` output datasets were not provided, allocate arrays # to store these outputs. if unw is None: unw = np.zeros(shape=igram.shape, dtype=np.float32) if conncomp is None: conncomp = np.zeros(shape=igram.shape, dtype=np.uint32) # Ensure that input & output datasets have valid dimensions and datatypes. check_shapes(igram, corr, unw, conncomp, mask) check_dtypes(igram, corr, unw, conncomp, mask) if nlooks < 1.0: errmsg = f"nlooks must be >= 1, instead got {nlooks}" raise ValueError(errmsg) if cost == "topo": errmsg = "'topo' cost mode is not currently supported" raise NotImplementedError(errmsg) cost_modes = {"defo", "smooth"} if cost not in cost_modes: errmsg = f"cost mode must be in {cost_modes}, instead got {cost!r}" raise ValueError(errmsg) init_methods = {"mst", "mcf"} if init not in init_methods: errmsg = f"init method must be in {init_methods}, instead got {init!r}" raise ValueError(errmsg) # Validate inputs related to tiling and coerce them to the expected types. ntiles, tile_overlap, nproc = normalize_and_validate_tiling_params( ntiles=ntiles, tile_overlap=tile_overlap, nproc=nproc )
from __future__ import annotations __all__ = [ "unwrap", ] @dataclass(frozen=True) class TilingParams: """ SNAPHU configuration parameters affecting scene tiling and parallel processing. Parameters ---------- ntilerow, ntilecol : int, optional Number of tiles along the row/column directions. If `ntilerow` and `ntilecol` are both 1 (the default), the interferogram will be unwrapped as a single tile. rowovrlp, colovrlp : int, optional Overlap, in number of rows/columns, between neighboring tiles. Defaults to 0. nproc : int, optional Maximum number of child processes to spawn for parallel tile unwrapping. Defaults to 1. """ ntilerow: int = 1 ntilecol: int = 1 rowovrlp: int = 0 colovrlp: int = 0 nproc: int = 1 def to_string(self) -> str: """ Write SNAPHU tiling parameters to a string. Creates a multi-line string in SNAPHU configuration file format. Returns ------- str The output string. """ return textwrap.dedent(f"""\ NTILEROW {self.ntilerow} NTILECOL {self.ntilecol} ROWOVRLP {self.rowovrlp} COLOVRLP {self.colovrlp} NPROC {self.nproc} """) @dataclass(frozen=True) class SnaphuConfig: """ SNAPHU configuration parameters. Parameters ---------- infile : path-like The input interferogram file path. corrfile : path-like The input coherence file path. outfile : path-like The output unwrapped phase file path. conncompfile : path-like The output connected component labels file path. linelength : int The line length, in samples, of the input interferogram data array. ncorrlooks : float The equivalent number of independent looks used to form the coherence data. statcostmode : str The statistical cost mode. initmethod : str The algorithm used for initializing the network solver routine. bytemaskfile : path-like or None, optional An optional file path of a byte mask file. If None, no mask is applied. Defaults to None. tiling_params : TilingParams or None, optional Optional additional configuration parameters affecting scene tiling and parallel processing. Defaults to None. """ infile: str | os.PathLike[str] corrfile: str | os.PathLike[str] outfile: str | os.PathLike[str] conncompfile: str | os.PathLike[str] linelength: int ncorrlooks: float statcostmode: str initmethod: str bytemaskfile: str | os.PathLike[str] | None = None tiling_params: TilingParams | None = None def to_string(self) -> str: """ Write SNAPHU configuration parameters to a string. Creates a multi-line string in SNAPHU configuration file format. Returns ------- str The output string. """ config = textwrap.dedent(f"""\ INFILE {os.fspath(self.infile)} INFILEFORMAT COMPLEX_DATA CORRFILE {os.fspath(self.corrfile)} CORRFILEFORMAT FLOAT_DATA OUTFILE {os.fspath(self.outfile)} OUTFILEFORMAT FLOAT_DATA CONNCOMPFILE {os.fspath(self.conncompfile)} CONNCOMPOUTTYPE UINT LINELENGTH {self.linelength} NCORRLOOKS {self.ncorrlooks} STATCOSTMODE {self.statcostmode.upper()} INITMETHOD {self.initmethod.upper()} """) if self.bytemaskfile is not None: config += f"BYTEMASKFILE {os.fspath(self.bytemaskfile)}\n" if self.tiling_params is not None: config += self.tiling_params.to_string() return config def _to_file_textio(self, file_: io.TextIOBase, /) -> None: # Write config params to file. s = self.to_string() count = file_.write(s) # Check that the full text was successfully written to the file. if count != len(s): errmsg = "failed to write config params to file" raise RuntimeError(errmsg) def _to_file_pathlike(self, file_: str | os.PathLike[str], /) -> None: # Create the file's parent directory(ies) if they didn't already exist. p = Path(file_) p.parent.mkdir(parents=True, exist_ok=True) # Write config params to file. s = self.to_string() p.write_text(s) def to_file(self, file_: str | os.PathLike[str] | io.TextIOBase, /) -> None: """ Write SNAPHU configuration parameters to a file. The resulting file is suitable for passing to the SNAPHU executable as a configuration file. Parameters ---------- file_ : path-like or file-like The output file. May be an open text file or a file path. If the file and any of its parent directories do not exist, they will be created. If the path to an existing file is specified, the file will be overwritten. """ if isinstance(file_, io.TextIOBase): self._to_file_textio(file_) elif isinstance(file_, (str, os.PathLike)): self._to_file_pathlike(file_) else: errmsg = ( "to_file argument must be a path-like or file-like object, instead got" f" type={type(file_)}" ) raise TypeError(errmsg) def check_shapes( igram: InputDataset, corr: InputDataset, unw: OutputDataset, conncomp: OutputDataset, mask: InputDataset | None = None, ) -> None: """ Check that the arguments are 2-D arrays with matching shapes. Parameters ---------- igram : snaphu.io.InputDataset The input interferogram. Must be a 2-D array. corr : snaphu.io.InputDataset The input coherence. Must be a 2-D array with the same shape as `igram`. unw : snaphu.io.OutputDataset The output unwrapped phase. Must be a 2-D array with the same shape as `igram`. conncomp : snaphu.io.OutputDataset The output connected component labels. Must be a 2-D array with the same shape as `igram`. mask : snaphu.io.InputDataset or None, optional An optional binary mask of valid samples. If not None, it must be a 2-D array with the same shape as `igram`. Defaults to None. Raises ------ ValueError If any array is not 2-D or has a different shape than the others. """ if igram.ndim != 2: errmsg = f"igram must be 2-D, instead got ndim={igram.ndim}" raise ValueError(errmsg) def raise_shape_mismatch_error( name: str, arr: InputDataset | OutputDataset, ) -> None: errmsg = ( f"shape mismatch: {name} and igram must have the same shape, instead got" f" {name}.shape={arr.shape} and {igram.shape=}" ) raise ValueError(errmsg) if corr.shape != igram.shape: raise_shape_mismatch_error("corr", corr) if unw.shape != igram.shape: raise_shape_mismatch_error("unw", unw) if conncomp.shape != igram.shape: raise_shape_mismatch_error("conncomp", conncomp) if (mask is not None) and (mask.shape != igram.shape): raise_shape_mismatch_error("mask", mask) def check_dtypes( igram: InputDataset, corr: InputDataset, unw: OutputDataset, conncomp: OutputDataset, mask: InputDataset | None = None, ) -> None: """ Check that the arguments have valid datatypes. Parameters ---------- igram : snaphu.io.InputDataset The input interferogram. Must be a complex-valued array. corr : snaphu.io.InputDataset The input coherence. Must be a real-valued array. unw : snaphu.io.OutputDataset The output unwrapped phase. Must be a real-valued array. conncomp : snaphu.io.OutputDataset The output connected component labels. Must be an integer-valued array. mask : snaphu.io.InputDataset or None, optional An optional binary mask of valid samples. If not None, it must be a boolean or 8-bit integer array. Raises ------ TypeError If any array has an unexpected datatype. """ if not np.issubdtype(igram.dtype, np.complexfloating): errmsg = ( f"igram must be a complex-valued array, instead got dtype={igram.dtype}" ) raise TypeError(errmsg) if not np.issubdtype(corr.dtype, np.floating): errmsg = f"corr must be a real-valued array, instead got dtype={corr.dtype}" raise TypeError(errmsg) if not np.issubdtype(unw.dtype, np.floating): errmsg = f"unw must be a real-valued array, instead got dtype={unw.dtype}" raise TypeError(errmsg) if not np.issubdtype(conncomp.dtype, np.integer): errmsg = ( "conncomp must be an integer-valued array, instead got" f" dtype={conncomp.dtype}" ) raise TypeError(errmsg) if ( (mask is not None) and (mask.dtype != np.bool_) and (mask.dtype != np.uint8) and (mask.dtype != np.int8) ): errmsg = ( "mask must be a boolean or 8-bit integer array (or None), instead got" f" dtype={mask.dtype}" ) raise TypeError(errmsg) def normalize_and_validate_tiling_params( ntiles: tuple[int, int], tile_overlap: int | tuple[int, int], nproc: int, ) -> tuple[tuple[int, int], tuple[int, int], int]: """ Normalize and validate inputs related to tiling and multiprocessing. Parameters ---------- ntiles : (int, int) Number of tiles along the row/column directions. tile_overlap : int or (int, int) Overlap, in pixels, between neighboring tiles. nproc : int Maximum number of child processes to spawn for parallel tile unwrapping. Returns ------- ntiles : (int, int) `ntiles` normalized to a pair of positive integers. tile_overlap : (int, int) `tile_overlap` normalized to a pair of nonnegative integers. nproc : int `nproc` as a positive integer. """ # Normalize `ntiles` to a tuple and ensure its contents are two positive-valued # integers. ntiles = tuple(ntiles) # type: ignore[assignment] if len(ntiles) != 2: errmsg = f"ntiles must be a pair of ints, instead got {ntiles=}" raise ValueError(errmsg) if not all(n >= 1 for n in ntiles): errmsg = f"ntiles may not contain negative or zero values, got {ntiles=}" raise ValueError(errmsg) # If `tile_overlap` is iterable, ensure it's a tuple. Otherwise, assume it's a # single integer. try: tile_overlap = tuple(tile_overlap) # type: ignore[arg-type,assignment] except TypeError: tile_overlap = (tile_overlap, tile_overlap) # type: ignore[assignment] # Convince static type checkers that `tile_overlap` is now a pair of ints. tile_overlap = cast(tuple[int, int], tile_overlap) # Ensure the contents of `tile_overlap` are two nonnegative integers. if len(tile_overlap) != 2: errmsg = ( f"tile_overlap must be an int or pair of ints, instead got {tile_overlap=}" ) raise ValueError(errmsg) if not all(n >= 0 for n in tile_overlap): errmsg = f"tile_overlap may not contain negative values, got {tile_overlap=}" raise ValueError(errmsg) # If `nproc` is less than 1, use all available processors. Fall back to serial # execution if the number of available processors cannot be determined. if nproc < 1: nproc = os.cpu_count() or 1 return ntiles, tile_overlap, nproc def copy_blockwise( src: InputDataset, dst: OutputDataset, chunks: tuple[int, int] = (1024, 1024), *, nan_to_zero: bool = False, ) -> None: """ Copy the contents of `src` to `dst` block-by-block. Parameters ---------- src : snaphu.io.InputDataset Source dataset. dst : snaphu.io.OutputDataset Destination dataset. chunks : (int, int), optional Block dimensions. Defaults to (1024, 1024). nan_to_zero : bool, optional If True, replace Not a Number (NaN) values in the array with zeros in the output. Defaults to False. """ shape = src.shape if dst.shape != shape: errmsg = ( "shape mismatch: src and dst must have the same shape, instead got" f" {src.shape=} and {dst.shape=}" ) raise ValueError(errmsg) for block in BlockIterator(shape, chunks): if nan_to_zero: nan_mask = np.isnan(src[block]) dst[block] = np.where(nan_mask, 0.0, src[block]) else: dst[block] = src[block] @overload def unwrap( igram: InputDataset, corr: InputDataset, nlooks: float, cost: str = "smooth", init: str = "mcf", *, mask: InputDataset | None = None, ntiles: tuple[int, int] = (1, 1), tile_overlap: int | tuple[int, int] = 0, nproc: int = 1, scratchdir: str | os.PathLike[str] | None = None, delete_scratch: bool = True, unw: OutputDataset, conncomp: OutputDataset, ) -> tuple[OutputDataset, OutputDataset]: ... # pragma: no cover # If `unw` and `conncomp` aren't specified, return the outputs as two NumPy arrays. @overload def unwrap( igram: InputDataset, corr: InputDataset, nlooks: float, cost: str = "smooth", init: str = "mcf", *, mask: InputDataset | None = None, ntiles: tuple[int, int] = (1, 1), tile_overlap: int | tuple[int, int] = 0, nproc: int = 1, scratchdir: str | os.PathLike[str] | None = None, delete_scratch: bool = True, ) -> tuple[np.ndarray, np.ndarray]: ... # pragma: no cover def unwrap( # type: ignore[no-untyped-def] igram, corr, nlooks, cost="smooth", init="mcf", *, mask=None, ntiles=(1, 1), tile_overlap=0, nproc=1, scratchdir=None, delete_scratch=True, unw=None, conncomp=None, ): r""" Unwrap an interferogram using SNAPHU. Performs 2-D phase unwrapping using the Statistical-Cost, Network-Flow Algorithm for Phase Unwrapping (SNAPHU)\ [1]_. The algorithm produces a Maximum a Posteriori (MAP) estimate of the unwrapped phase field by (approximately) solving a nonlinear network flow optimization problem. The outputs include the unwrapped phase and a corresponding array of connected component labels. Each connected component is a region of pixels in the solution that is believed to have been unwrapped in an internally self-consistent manner\ [2]_. Each distinct region is assigned a unique positive integer label. Pixels not belonging to any component are assigned a label of zero. Parameters ---------- igram : snaphu.io.InputDataset The input interferogram. A 2-D complex-valued array. Not a Number (NaN) values in the array will be replaced with zeros. corr : snaphu.io.InputDataset The sample coherence magnitude. Must be a floating-point array with the same dimensions as the input interferogram. Valid coherence values are in the range [0, 1]. NaN values in the array will be replaced with zeros. nlooks : float The equivalent number of independent looks used to form the sample coherence. An estimate of the number of statistically independent samples averaged in the multilooked data, taking into account spatial correlation due to oversampling/filtering (see `Notes`_). cost : {'defo', 'smooth'}, optional Statistical cost mode. Defaults to 'smooth'. init : {'mst', 'mcf'}, optional Algorithm used for initialization of unwrapped phase gradients. Supported algorithms include Minimum Spanning Tree ('mst') and Minimum Cost Flow ('mcf'). Defaults to 'mcf'. mask : snaphu.io.InputDataset or None, optional Binary mask of valid pixels. Zeros in this raster indicate interferogram pixels that should be masked out. If provided, it must have the same dimensions as the input interferogram and boolean or 8-bit integer datatype. Defaults to None. ntiles : (int, int), optional Number of tiles along the row/column directions. If `ntiles` is (1, 1), then the interferogram will be unwrapped as a single tile. Increasing the number of tiles may improve runtime and reduce peak memory utilization, but may also introduce tile boundary artifacts in the unwrapped result. Defaults to (1, 1). tile_overlap : int or (int, int), optional Overlap, in pixels, between neighboring tiles. Increasing overlap may help to avoid phase discontinuities between tiles. If `tile_overlap` is a scalar integer, the number of overlapping rows and columns will be the same. Defaults to 0. nproc : int, optional Maximum number of child processes to spawn for parallel tile unwrapping. If `nproc` is less than 1, use all available processors. Defaults to 1. scratchdir : path-like or None, optional Scratch directory where intermediate processing artifacts are written. If the specified directory does not exist, it will be created. If None, a temporary directory will be created as though by ``tempfile.TemporaryDirectory()``. Defaults to None. delete_scratch : bool, optional If True, the scratch directory will be automatically removed from the file system when the function exits. Otherwise, the scratch directory will be preserved. Defaults to True. unw : snaphu.io.OutputDataset or None, optional An optional output dataset to store the unwrapped phase, in radians. If provided, it must have the same dimensions as the input interferogram and floating-point datatype. If None, an output array will be allocated internally. Defaults to None. conncomp : snaphu.io.OutputDataset or None, optional An optional output dataset to store the connected component labels. If provided, it must have the same dimensions as the input interferogram and any integer datatype. If None, an output array will be allocated internally. Defaults to None. Returns ------- unw : snaphu.io.OutputDataset The output unwrapped phase, in radians. conncomp : snaphu.io.OutputDataset The output connected component labels. Notes ----- An estimate of the equivalent number of independent looks may be obtained by .. math:: n_e = k_r k_a \frac{d_r d_a}{\rho_r \rho_a} where :math:`k_r` and :math:`k_a` are the number of looks in range and azimuth, :math:`d_r` and :math:`d_a` are the (single-look) sample spacing in range and azimuth, and :math:`\rho_r` and :math:`\rho_a are the range and azimuth resolution. References ---------- .. [1] C. W. Chen and H. A. Zebker, "Two-dimensional phase unwrapping with use of statistical models for cost functions in nonlinear optimization," Journal of the Optical Society of America A, vol. 18, pp. 338-351 (2001). .. [2] C. W. Chen and H. A. Zebker, "Phase unwrapping for large SAR interferograms: Statistical segmentation and generalized network models," IEEE Transactions on Geoscience and Remote Sensing, vol. 40, pp. 1709-1719 (2002). """ # If the `unw` and/or `conncomp` output datasets were not provided, allocate arrays # to store these outputs. if unw is None: unw = np.zeros(shape=igram.shape, dtype=np.float32) if conncomp is None: conncomp = np.zeros(shape=igram.shape, dtype=np.uint32) # Ensure that input & output datasets have valid dimensions and datatypes. check_shapes(igram, corr, unw, conncomp, mask) check_dtypes(igram, corr, unw, conncomp, mask) if nlooks < 1.0: errmsg = f"nlooks must be >= 1, instead got {nlooks}" raise ValueError(errmsg) if cost == "topo": errmsg = "'topo' cost mode is not currently supported" raise NotImplementedError(errmsg) cost_modes = {"defo", "smooth"} if cost not in cost_modes: errmsg = f"cost mode must be in {cost_modes}, instead got {cost!r}" raise ValueError(errmsg) init_methods = {"mst", "mcf"} if init not in init_methods: errmsg = f"init method must be in {init_methods}, instead got {init!r}" raise ValueError(errmsg) # Validate inputs related to tiling and coerce them to the expected types. ntiles, tile_overlap, nproc = normalize_and_validate_tiling_params( ntiles=ntiles, tile_overlap=tile_overlap, nproc=nproc )
with scratch_directory(scratchdir, delete=delete_scratch) as dir_:
2
2023-11-16 21:48:58+00:00
8k
fofr/cog-sdxl-multi-controlnet-lora
predict.py
[ { "identifier": "WeightsDownloader", "path": "weights_downloader.py", "snippet": "class WeightsDownloader:\n @staticmethod\n def download_if_not_exists(url, dest):\n if not os.path.exists(dest):\n WeightsDownloader.download(url, dest)\n\n @staticmethod\n def download(url, dest):\n start = time.time()\n print(\"downloading url: \", url)\n print(\"downloading to: \", dest)\n subprocess.check_call([\"pget\", \"-x\", url, dest], close_fds=False)\n print(\"downloading took: \", time.time() - start)" }, { "identifier": "WeightsManager", "path": "weights_manager.py", "snippet": "class WeightsManager:\n def __init__(self, predictor):\n self.predictor = predictor\n self.weights_cache = WeightsDownloadCache()\n\n def load_trained_weights(self, weights, pipe):\n from no_init import no_init_or_tensor\n\n # weights can be a URLPath, which behaves in unexpected ways\n weights = str(weights)\n if self.predictor.tuned_weights == weights:\n print(\"skipping loading .. weights already loaded\")\n return\n\n self.predictor.tuned_weights = weights\n\n local_weights_cache = self.weights_cache.ensure(weights)\n\n # load UNET\n print(\"Loading fine-tuned model\")\n self.predictor.is_lora = False\n\n maybe_unet_path = os.path.join(local_weights_cache, \"unet.safetensors\")\n if not os.path.exists(maybe_unet_path):\n print(\"Does not have Unet. assume we are using LoRA\")\n self.predictor.is_lora = True\n\n if not self.predictor.is_lora:\n print(\"Loading Unet\")\n\n new_unet_params = load_file(\n os.path.join(local_weights_cache, \"unet.safetensors\")\n )\n # this should return _IncompatibleKeys(missing_keys=[...], unexpected_keys=[])\n pipe.unet.load_state_dict(new_unet_params, strict=False)\n\n else:\n print(\"Loading Unet LoRA\")\n\n unet = pipe.unet\n\n tensors = load_file(os.path.join(local_weights_cache, \"lora.safetensors\"))\n\n unet_lora_attn_procs = {}\n name_rank_map = {}\n for tk, tv in tensors.items():\n # up is N, d\n if tk.endswith(\"up.weight\"):\n proc_name = \".\".join(tk.split(\".\")[:-3])\n r = tv.shape[1]\n name_rank_map[proc_name] = r\n\n for name, attn_processor in unet.attn_processors.items():\n cross_attention_dim = (\n None\n if name.endswith(\"attn1.processor\")\n else unet.config.cross_attention_dim\n )\n if name.startswith(\"mid_block\"):\n hidden_size = unet.config.block_out_channels[-1]\n elif name.startswith(\"up_blocks\"):\n block_id = int(name[len(\"up_blocks.\")])\n hidden_size = list(reversed(unet.config.block_out_channels))[\n block_id\n ]\n elif name.startswith(\"down_blocks\"):\n block_id = int(name[len(\"down_blocks.\")])\n hidden_size = unet.config.block_out_channels[block_id]\n with no_init_or_tensor():\n module = LoRAAttnProcessor2_0(\n hidden_size=hidden_size,\n cross_attention_dim=cross_attention_dim,\n rank=name_rank_map[name],\n )\n unet_lora_attn_procs[name] = module.to(\"cuda\", non_blocking=True)\n\n unet.set_attn_processor(unet_lora_attn_procs)\n unet.load_state_dict(tensors, strict=False)\n\n # load text\n handler = TokenEmbeddingsHandler(\n [pipe.text_encoder, pipe.text_encoder_2], [pipe.tokenizer, pipe.tokenizer_2]\n )\n handler.load_embeddings(os.path.join(local_weights_cache, \"embeddings.pti\"))\n\n # load params\n with open(os.path.join(local_weights_cache, \"special_params.json\"), \"r\") as f:\n params = json.load(f)\n self.predictor.token_map = params\n\n self.predictor.tuned_model = True" }, { "identifier": "ControlNet", "path": "controlnet.py", "snippet": "class ControlNet:\n CONTROLNET_MODELS = [\n \"none\",\n \"edge_canny\",\n \"illusion\",\n \"depth_leres\",\n \"depth_midas\",\n \"soft_edge_pidi\",\n \"soft_edge_hed\",\n \"lineart\",\n \"lineart_anime\",\n \"openpose\",\n # Preprocessors without an XL model yet\n # \"straight_edge_mlsd\",\n # \"face_detector\",\n # \"content_shuffle\",\n # \"normal_bae\",\n # \"segementation_sam\",\n ]\n\n def __init__(self, predictor):\n WeightsDownloader.download_if_not_exists(CONTROLNET_URL, CONTROLNET_MODEL_CACHE)\n self.predictor = predictor\n self.controlnet_preprocessor = None\n self.models = {}\n\n def initialize_controlnet(self, model_name):\n print(\"Initializing\", model_name)\n return ControlNetModel.from_pretrained(\n model_name, cache_dir=CONTROLNET_MODEL_CACHE, torch_dtype=torch.float16\n )\n\n def get_model(self, controlnet_name):\n if controlnet_name not in self.models:\n if controlnet_name.startswith(\"edge_\"):\n self.models[controlnet_name] = self.initialize_controlnet(\"diffusers/controlnet-canny-sdxl-1.0\")\n elif controlnet_name.startswith(\"depth_\"):\n self.models[controlnet_name] = self.initialize_controlnet(\"diffusers/controlnet-depth-sdxl-1.0-small\")\n elif controlnet_name.startswith(\"soft_edge\") or controlnet_name.startswith(\"lineart\"):\n self.models[controlnet_name] = self.initialize_controlnet(\"SargeZT/controlnet-sd-xl-1.0-softedge-dexined\")\n elif controlnet_name == \"openpose\":\n self.models[controlnet_name] = self.initialize_controlnet(\"thibaud/controlnet-openpose-sdxl-1.0\")\n elif controlnet_name == \"illusion\":\n self.models[controlnet_name] = self.initialize_controlnet(\"monster-labs/control_v1p_sdxl_qrcode_monster\")\n return self.models.get(controlnet_name)\n\n def get_models(self, controlnet_names):\n models = [\n self.get_model(controlnet_name) for controlnet_name in controlnet_names\n ]\n return list(filter(None, models))\n\n def preprocess(self, image, controlnet_name):\n # Illusion model needs no preprocessing\n if controlnet_name == \"illusion\" or controlnet_name == \"none\":\n return image\n\n if self.controlnet_preprocessor is None:\n self.controlnet_preprocessor = ControlNetPreprocessor(self.predictor)\n\n return self.controlnet_preprocessor.process_image(image, controlnet_name)\n\n @staticmethod\n def get_controlnet_names():\n return ControlNet.CONTROLNET_MODELS" }, { "identifier": "SizingStrategy", "path": "sizing_strategy.py", "snippet": "class SizingStrategy:\n def __init__(self):\n pass\n\n def get_dimensions(self, image):\n original_width, original_height = image.size\n print(\n f\"Original dimensions: Width: {original_width}, Height: {original_height}\"\n )\n resized_width, resized_height = self.get_resized_dimensions(\n original_width, original_height\n )\n print(\n f\"Dimensions to resize to: Width: {resized_width}, Height: {resized_height}\"\n )\n return resized_width, resized_height\n\n def get_allowed_dimensions(self, base=LOWEST_DIMENSION, max_dim=MAX_DIMENSION):\n \"\"\"\n Function to generate allowed dimensions optimized around a base up to a max\n \"\"\"\n allowed_dimensions = []\n for i in range(base, max_dim + 1, 64):\n for j in range(base, max_dim + 1, 64):\n allowed_dimensions.append((i, j))\n return allowed_dimensions\n\n def get_resized_dimensions(self, width, height):\n allowed_dimensions = self.get_allowed_dimensions()\n aspect_ratio = width / height\n print(f\"Aspect Ratio: {aspect_ratio:.2f}\")\n # Find the closest allowed dimensions that maintain the aspect ratio\n # and are closest to the optimum dimension\n closest_dimensions = min(\n allowed_dimensions,\n key=lambda dim: abs(dim[0] / dim[1] - aspect_ratio)\n + abs(dim[0] - OPTIMUM_DIMENSION),\n )\n return closest_dimensions\n\n def resize_images(self, images, width, height):\n return [\n img.resize((width, height)) if img is not None else None for img in images\n ]\n\n def open_image(self, image_path):\n return Image.open(str(image_path)) if image_path is not None else None\n\n def apply(\n self,\n sizing_strategy,\n width,\n height,\n image=None,\n mask=None,\n control_1_image=None,\n control_2_image=None,\n control_3_image=None,\n ):\n image_keys = [\n \"input_image\",\n \"mask_image\",\n \"controlnet_1_image\",\n \"controlnet_2_image\",\n \"controlnet_3_image\",\n ]\n image_values = [image, mask, control_1_image, control_2_image, control_3_image]\n image_dict = {\n key: self.open_image(value).convert(\"RGB\") if value is not None else None\n for key, value in zip(image_keys, image_values)\n }\n\n if sizing_strategy in image_dict:\n print(f\"Resizing based on {sizing_strategy}\")\n width, height = self.get_dimensions(image_dict[sizing_strategy])\n else:\n print(\"Using given dimensions\")\n\n resized_images = self.resize_images(\n list(image_dict.values()),\n width,\n height,\n )\n\n return width, height, resized_images" } ]
import os import time import numpy as np import torch from typing import List, Optional from cog import BasePredictor, Input, Path from diffusers import ( DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, PNDMScheduler, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLControlNetPipeline, StableDiffusionXLControlNetInpaintPipeline, StableDiffusionXLControlNetImg2ImgPipeline, ) from diffusers.pipelines.stable_diffusion.safety_checker import ( StableDiffusionSafetyChecker, ) from transformers import CLIPImageProcessor from weights_downloader import WeightsDownloader from weights_manager import WeightsManager from controlnet import ControlNet from sizing_strategy import SizingStrategy
3,716
SAFETY_URL = "https://weights.replicate.delivery/default/sdxl/safety-1.0.tar" class KarrasDPM: def from_config(config): return DPMSolverMultistepScheduler.from_config(config, use_karras_sigmas=True) SCHEDULERS = { "DDIM": DDIMScheduler, "DPMSolverMultistep": DPMSolverMultistepScheduler, "HeunDiscrete": HeunDiscreteScheduler, "KarrasDPM": KarrasDPM, "K_EULER_ANCESTRAL": EulerAncestralDiscreteScheduler, "K_EULER": EulerDiscreteScheduler, "PNDM": PNDMScheduler, } class Predictor(BasePredictor): def load_trained_weights(self, weights, pipe): self.weights_manager.load_trained_weights(weights, pipe) def build_controlnet_pipeline(self, pipeline_class, controlnet_models): pipe = pipeline_class.from_pretrained( SDXL_MODEL_CACHE, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", vae=self.txt2img_pipe.vae, text_encoder=self.txt2img_pipe.text_encoder, text_encoder_2=self.txt2img_pipe.text_encoder_2, tokenizer=self.txt2img_pipe.tokenizer, tokenizer_2=self.txt2img_pipe.tokenizer_2, unet=self.txt2img_pipe.unet, scheduler=self.txt2img_pipe.scheduler, controlnet=self.controlnet.get_models(controlnet_models), ) pipe.to("cuda") return pipe def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" start = time.time() self.sizing_strategy = SizingStrategy() self.weights_manager = WeightsManager(self) self.tuned_model = False self.tuned_weights = None if str(weights) == "weights": weights = None print("Loading safety checker...") WeightsDownloader.download_if_not_exists(SAFETY_URL, SAFETY_CACHE) self.safety_checker = StableDiffusionSafetyChecker.from_pretrained( SAFETY_CACHE, torch_dtype=torch.float16 ).to("cuda") self.feature_extractor = CLIPImageProcessor.from_pretrained(FEATURE_EXTRACTOR) WeightsDownloader.download_if_not_exists(SDXL_URL, SDXL_MODEL_CACHE) print("Loading sdxl txt2img pipeline...") self.txt2img_pipe = DiffusionPipeline.from_pretrained( SDXL_MODEL_CACHE, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) self.is_lora = False if weights or os.path.exists("./trained-model"): self.load_trained_weights(weights, self.txt2img_pipe) self.txt2img_pipe.to("cuda") print("Loading SDXL img2img pipeline...") self.img2img_pipe = StableDiffusionXLImg2ImgPipeline( vae=self.txt2img_pipe.vae, text_encoder=self.txt2img_pipe.text_encoder, text_encoder_2=self.txt2img_pipe.text_encoder_2, tokenizer=self.txt2img_pipe.tokenizer, tokenizer_2=self.txt2img_pipe.tokenizer_2, unet=self.txt2img_pipe.unet, scheduler=self.txt2img_pipe.scheduler, ) self.img2img_pipe.to("cuda") print("Loading SDXL inpaint pipeline...") self.inpaint_pipe = StableDiffusionXLInpaintPipeline( vae=self.txt2img_pipe.vae, text_encoder=self.txt2img_pipe.text_encoder, text_encoder_2=self.txt2img_pipe.text_encoder_2, tokenizer=self.txt2img_pipe.tokenizer, tokenizer_2=self.txt2img_pipe.tokenizer_2, unet=self.txt2img_pipe.unet, scheduler=self.txt2img_pipe.scheduler, ) self.inpaint_pipe.to("cuda") print("Loading SDXL refiner pipeline...") # FIXME(ja): should the vae/text_encoder_2 be loaded from SDXL always? # - in the case of fine-tuned SDXL should we still? # FIXME(ja): if the answer to above is use VAE/Text_Encoder_2 from fine-tune # what does this imply about lora + refiner? does the refiner need to know about WeightsDownloader.download_if_not_exists(REFINER_URL, REFINER_MODEL_CACHE) print("Loading refiner pipeline...") self.refiner = DiffusionPipeline.from_pretrained( REFINER_MODEL_CACHE, text_encoder_2=self.txt2img_pipe.text_encoder_2, vae=self.txt2img_pipe.vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) self.refiner.to("cuda")
SDXL_MODEL_CACHE = "./sdxl-cache" REFINER_MODEL_CACHE = "./refiner-cache" SAFETY_CACHE = "./safety-cache" FEATURE_EXTRACTOR = "./feature-extractor" SDXL_URL = "https://weights.replicate.delivery/default/sdxl/sdxl-vae-upcast-fix.tar" REFINER_URL = ( "https://weights.replicate.delivery/default/sdxl/refiner-no-vae-no-encoder-1.0.tar" ) SAFETY_URL = "https://weights.replicate.delivery/default/sdxl/safety-1.0.tar" class KarrasDPM: def from_config(config): return DPMSolverMultistepScheduler.from_config(config, use_karras_sigmas=True) SCHEDULERS = { "DDIM": DDIMScheduler, "DPMSolverMultistep": DPMSolverMultistepScheduler, "HeunDiscrete": HeunDiscreteScheduler, "KarrasDPM": KarrasDPM, "K_EULER_ANCESTRAL": EulerAncestralDiscreteScheduler, "K_EULER": EulerDiscreteScheduler, "PNDM": PNDMScheduler, } class Predictor(BasePredictor): def load_trained_weights(self, weights, pipe): self.weights_manager.load_trained_weights(weights, pipe) def build_controlnet_pipeline(self, pipeline_class, controlnet_models): pipe = pipeline_class.from_pretrained( SDXL_MODEL_CACHE, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", vae=self.txt2img_pipe.vae, text_encoder=self.txt2img_pipe.text_encoder, text_encoder_2=self.txt2img_pipe.text_encoder_2, tokenizer=self.txt2img_pipe.tokenizer, tokenizer_2=self.txt2img_pipe.tokenizer_2, unet=self.txt2img_pipe.unet, scheduler=self.txt2img_pipe.scheduler, controlnet=self.controlnet.get_models(controlnet_models), ) pipe.to("cuda") return pipe def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" start = time.time() self.sizing_strategy = SizingStrategy() self.weights_manager = WeightsManager(self) self.tuned_model = False self.tuned_weights = None if str(weights) == "weights": weights = None print("Loading safety checker...") WeightsDownloader.download_if_not_exists(SAFETY_URL, SAFETY_CACHE) self.safety_checker = StableDiffusionSafetyChecker.from_pretrained( SAFETY_CACHE, torch_dtype=torch.float16 ).to("cuda") self.feature_extractor = CLIPImageProcessor.from_pretrained(FEATURE_EXTRACTOR) WeightsDownloader.download_if_not_exists(SDXL_URL, SDXL_MODEL_CACHE) print("Loading sdxl txt2img pipeline...") self.txt2img_pipe = DiffusionPipeline.from_pretrained( SDXL_MODEL_CACHE, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) self.is_lora = False if weights or os.path.exists("./trained-model"): self.load_trained_weights(weights, self.txt2img_pipe) self.txt2img_pipe.to("cuda") print("Loading SDXL img2img pipeline...") self.img2img_pipe = StableDiffusionXLImg2ImgPipeline( vae=self.txt2img_pipe.vae, text_encoder=self.txt2img_pipe.text_encoder, text_encoder_2=self.txt2img_pipe.text_encoder_2, tokenizer=self.txt2img_pipe.tokenizer, tokenizer_2=self.txt2img_pipe.tokenizer_2, unet=self.txt2img_pipe.unet, scheduler=self.txt2img_pipe.scheduler, ) self.img2img_pipe.to("cuda") print("Loading SDXL inpaint pipeline...") self.inpaint_pipe = StableDiffusionXLInpaintPipeline( vae=self.txt2img_pipe.vae, text_encoder=self.txt2img_pipe.text_encoder, text_encoder_2=self.txt2img_pipe.text_encoder_2, tokenizer=self.txt2img_pipe.tokenizer, tokenizer_2=self.txt2img_pipe.tokenizer_2, unet=self.txt2img_pipe.unet, scheduler=self.txt2img_pipe.scheduler, ) self.inpaint_pipe.to("cuda") print("Loading SDXL refiner pipeline...") # FIXME(ja): should the vae/text_encoder_2 be loaded from SDXL always? # - in the case of fine-tuned SDXL should we still? # FIXME(ja): if the answer to above is use VAE/Text_Encoder_2 from fine-tune # what does this imply about lora + refiner? does the refiner need to know about WeightsDownloader.download_if_not_exists(REFINER_URL, REFINER_MODEL_CACHE) print("Loading refiner pipeline...") self.refiner = DiffusionPipeline.from_pretrained( REFINER_MODEL_CACHE, text_encoder_2=self.txt2img_pipe.text_encoder_2, vae=self.txt2img_pipe.vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16", ) self.refiner.to("cuda")
self.controlnet = ControlNet(self)
2
2023-11-13 13:04:41+00:00
8k
ahayler/s4c
datasets/kitti_360/kitti_360_dataset.py
[ { "identifier": "KITTI360Bbox3D", "path": "datasets/kitti_360/annotation.py", "snippet": "class KITTI360Bbox3D(KITTI360Object):\n # Constructor\n def __init__(self):\n KITTI360Object.__init__(self)\n # the polygon as list of points\n self.vertices = []\n self.faces = []\n self.lines = [[0,5],[1,4],[2,7],[3,6],\n [0,1],[1,3],[3,2],[2,0],\n [4,5],[5,7],[7,6],[6,4]]\n\n # the ID of the corresponding object\n self.semanticId = -1\n self.instanceId = -1\n self.annotationId = -1\n\n # the window that contains the bbox\n self.start_frame = -1\n self.end_frame = -1\n\n # timestamp of the bbox (-1 if statis)\n self.timestamp = -1\n\n # projected vertices\n self.vertices_proj = None\n self.meshes = []\n\n # name\n self.name = '' \n\n def __str__(self): \n return self.name\n\n def generateMeshes(self):\n self.meshes = []\n if self.vertices_proj:\n for fidx in range(self.faces.shape[0]):\n self.meshes.append( [ Point(self.vertices_proj[0][int(x)], self.vertices_proj[1][int(x)]) for x in self.faces[fidx]] )\n \n def parseOpencvMatrix(self, node):\n rows = int(node.find('rows').text)\n cols = int(node.find('cols').text)\n data = node.find('data').text.split(' ')\n \n mat = []\n for d in data:\n d = d.replace('\\n', '')\n if len(d)<1:\n continue\n mat.append(float(d))\n mat = np.reshape(mat, [rows, cols])\n return mat\n\n def parseVertices(self, child):\n transform = self.parseOpencvMatrix(child.find('transform'))\n R = transform[:3,:3]\n T = transform[:3,3]\n vertices = self.parseOpencvMatrix(child.find('vertices'))\n faces = self.parseOpencvMatrix(child.find('faces'))\n\n vertices = np.matmul(R, vertices.transpose()).transpose() + T\n self.vertices = vertices\n self.faces = faces\n self.R = R\n self.T = T\n\n def parseBbox(self, child):\n semanticIdKITTI = int(child.find('semanticId').text)\n self.semanticId = kittiId2label[semanticIdKITTI].id\n self.instanceId = int(child.find('instanceId').text)\n self.name = kittiId2label[semanticIdKITTI].name\n\n self.start_frame = int(child.find('start_frame').text)\n self.end_frame = int(child.find('end_frame').text)\n\n self.timestamp = int(child.find('timestamp').text)\n\n self.annotationId = int(child.find('index').text) + 1\n\n global annotation2global\n annotation2global[self.annotationId] = local2global(self.semanticId, self.instanceId)\n self.parseVertices(child)\n\n def parseStuff(self, child):\n classmap = {'driveway': 'parking', 'ground': 'terrain', 'unknownGround': 'ground', \n 'railtrack': 'rail track', 'bigPole': 'pole', 'unknownObject': 'unknown object',\n 'smallPole': 'smallpole', 'trafficSign': 'traffic sign', 'trashbin': 'trash bin',\n 'guardrail': 'guard rail', 'trafficLight': 'traffic light', 'pedestrian': 'person',\n 'vendingmachine': 'vending machine', 'unknownConstruction': 'unknown construction',\n 'unknownVehicle': 'unknown vehicle'}\n label = child.find('label').text \n if label in classmap.keys():\n label = classmap[label]\n\n self.start_frame = int(child.find('start_frame').text)\n self.end_frame = int(child.find('end_frame').text)\n\n self.timestamp = int(child.find('timestamp').text)\n\n self.semanticId = name2label[label].id\n self.name = label\n self.parseVertices(child)" }, { "identifier": "get_color_aug_fn", "path": "utils/augmentation.py", "snippet": "def get_color_aug_fn(params):\n fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = params\n def color_aug_fn(img):\n for fn_id in fn_idx:\n if fn_id == 0 and brightness_factor is not None:\n img = F.adjust_brightness(img, brightness_factor)\n elif fn_id == 1 and contrast_factor is not None:\n img = F.adjust_contrast(img, contrast_factor)\n elif fn_id == 2 and saturation_factor is not None:\n img = F.adjust_saturation(img, saturation_factor)\n elif fn_id == 3 and hue_factor is not None:\n img = F.adjust_hue(img, hue_factor)\n\n return img\n\n return color_aug_fn" }, { "identifier": "labels", "path": "datasets/kitti_360/labels.py", "snippet": "def assureSingleInstanceName( name ):" } ]
import os import time import xml.etree.ElementTree as ET import cv2 import numpy as np import torch import torch.nn.functional as F import yaml import omegaconf from collections import Counter, defaultdict from pathlib import Path from typing import Optional from scipy.spatial.transform import Rotation from torch.utils.data import Dataset from torchvision.transforms import ColorJitter from datasets.kitti_360.annotation import KITTI360Bbox3D from utils.augmentation import get_color_aug_fn from datasets.kitti_360.labels import labels
5,122
T_02_to_pose[:3, :] = np.reshape(cam_to_pose_data["image_02"], (3, 4)) T_03_to_pose[:3, :] = np.reshape(cam_to_pose_data["image_03"], (3, 4)) T_00_to_velo[:3, :] = np.reshape(cam_to_velo_data, (3, 4)) # Compute cam to pose transforms for rectified perspective cameras T_rect_00_to_pose = T_00_to_pose @ np.linalg.inv(R_rect_00) T_rect_01_to_pose = T_01_to_pose @ np.linalg.inv(R_rect_01) # Compute cam to pose transform for fisheye cameras T_02_to_pose = T_02_to_pose @ R_02 T_03_to_pose = T_03_to_pose @ R_03 # Compute velo to cameras and velo to pose transforms T_velo_to_rect_00 = R_rect_00 @ np.linalg.inv(T_00_to_velo) T_velo_to_pose = T_rect_00_to_pose @ T_velo_to_rect_00 T_velo_to_rect_01 = np.linalg.inv(T_rect_01_to_pose) @ T_velo_to_pose # Calibration matrix is the same for both perspective cameras K = P_rect_00[:3, :3] # Normalize calibration f_x = K[0, 0] / im_size_rect[1] f_y = K[1, 1] / im_size_rect[0] c_x = K[0, 2] / im_size_rect[1] c_y = K[1, 2] / im_size_rect[0] # Change to image coordinates [-1, 1] K[0, 0] = f_x * 2. K[1, 1] = f_y * 2. K[0, 2] = c_x * 2. - 1 K[1, 2] = c_y * 2. - 1 # Convert fisheye calibration to [-1, 1] image dimensions fisheye_02_data["projection_parameters"]["gamma1"] = (fisheye_02_data["projection_parameters"]["gamma1"] / im_size_fish[1]) * 2. fisheye_02_data["projection_parameters"]["gamma2"] = (fisheye_02_data["projection_parameters"]["gamma2"] / im_size_fish[0]) * 2. fisheye_02_data["projection_parameters"]["u0"] = (fisheye_02_data["projection_parameters"]["u0"] / im_size_fish[1]) * 2. - 1. fisheye_02_data["projection_parameters"]["v0"] = (fisheye_02_data["projection_parameters"]["v0"] / im_size_fish[0]) * 2. - 1. fisheye_03_data["projection_parameters"]["gamma1"] = (fisheye_03_data["projection_parameters"]["gamma1"] / im_size_fish[1]) * 2. fisheye_03_data["projection_parameters"]["gamma2"] = (fisheye_03_data["projection_parameters"]["gamma2"] / im_size_fish[0]) * 2. fisheye_03_data["projection_parameters"]["u0"] = (fisheye_03_data["projection_parameters"]["u0"] / im_size_fish[1]) * 2. - 1. fisheye_03_data["projection_parameters"]["v0"] = (fisheye_03_data["projection_parameters"]["v0"] / im_size_fish[0]) * 2. - 1. # Use same camera calibration as perspective cameras for resampling # K_fisheye = np.eye(3, dtype=np.float32) # K_fisheye[0, 0] = 2 # K_fisheye[1, 1] = 2 K_fisheye = K calibs = { "K_perspective": K, "K_fisheye": K_fisheye, "T_cam_to_pose": { "00": T_rect_00_to_pose, "01": T_rect_01_to_pose, "02": T_02_to_pose, "03": T_03_to_pose, }, "T_velo_to_cam": { "00": T_velo_to_rect_00, "01": T_velo_to_rect_01, }, "T_velo_to_pose": T_velo_to_pose, "fisheye": { "calib_02": fisheye_02_data, "calib_03": fisheye_03_data, "R_02": R_02[:3, :3], "R_03": R_03[:3, :3] }, "im_size": im_size_rect } return calibs @staticmethod def _get_resamplers(calibs, K_target, target_image_size): resampler_02 = FisheyeToPinholeSampler(K_target, target_image_size, calibs["fisheye"]["calib_02"], calibs["fisheye"]["R_02"]) resampler_03 = FisheyeToPinholeSampler(K_target, target_image_size, calibs["fisheye"]["calib_03"], calibs["fisheye"]["R_03"]) return resampler_02, resampler_03 @staticmethod def _load_poses(pose_path, sequences): ids = {} poses = {} for seq in sequences: pose_file = Path(pose_path) / seq / f"poses.txt" try: pose_data = np.loadtxt(pose_file) except FileNotFoundError: print(f'Ground truth poses are not avaialble for sequence {seq}.') ids_seq = pose_data[:, 0].astype(int) poses_seq = pose_data[:, 1:].astype(np.float32).reshape((-1, 3, 4)) poses_seq = np.concatenate((poses_seq, np.zeros_like(poses_seq[:, :1, :])), axis=1) poses_seq[:, 3, 3] = 1 ids[seq] = ids_seq poses[seq] = poses_seq return ids, poses @staticmethod def _load_3d_bboxes(bbox_path, sequences): bboxes = {} for seq in sequences: with open(Path(bbox_path) / f"{seq}.xml", "rb") as f: tree = ET.parse(f) root = tree.getroot() objects = defaultdict(list) num_bbox = 0 for child in root: if child.find('transform') is None: continue
name2label = {label.name: label for label in labels} id2ProposedId = {label.id: label.trainId for label in labels} PropsedId2TrainId = dict(enumerate(list(set(id2ProposedId.values())))) PropsedId2TrainId = {v : k for k, v in PropsedId2TrainId.items()} id2TrainId = {k : PropsedId2TrainId[v] for k, v in id2ProposedId.items()} class FisheyeToPinholeSampler: def __init__(self, K_target, target_image_size, calibs, rotation=None): self._compute_transform(K_target, target_image_size, calibs, rotation) def _compute_transform(self, K_target, target_image_size, calibs, rotation=None): x = torch.linspace(-1, 1, target_image_size[1]).view(1, -1).expand(target_image_size) y = torch.linspace(-1, 1, target_image_size[0]).view(-1, 1).expand(target_image_size) z = torch.ones_like(x) xyz = torch.stack((x, y, z), dim=-1).view(-1, 3) # Unproject xyz = (torch.inverse(torch.tensor(K_target)) @ xyz.T).T if rotation is not None: xyz = (torch.tensor(rotation) @ xyz.T).T # Backproject into fisheye xyz = xyz / torch.norm(xyz, dim=-1, keepdim=True) x = xyz[:, 0] y = xyz[:, 1] z = xyz[:, 2] xi_src = calibs["mirror_parameters"]["xi"] x = x / (z + xi_src) y = y / (z + xi_src) k1 = calibs["distortion_parameters"]["k1"] k2 = calibs["distortion_parameters"]["k2"] r = x*x + y*y factor = (1 + k1 * r + k2 * r * r) x = x * factor y = y * factor gamma0 = calibs["projection_parameters"]["gamma1"] gamma1 = calibs["projection_parameters"]["gamma2"] u0 = calibs["projection_parameters"]["u0"] v0 = calibs["projection_parameters"]["v0"] x = x * gamma0 + u0 y = y * gamma1 + v0 xy = torch.stack((x, y), dim=-1).view(1, *target_image_size, 2) self.sample_pts = xy def resample(self, img): img = img.unsqueeze(0) resampled_img = F.grid_sample(img, self.sample_pts, align_corners=True).squeeze(0) return resampled_img class Kitti360Dataset(Dataset): def __init__(self, data_path: str, pose_path: str, split_path: Optional[str], target_image_size=(192, 640), return_stereo=False, return_depth=False, return_fisheye=True, return_3d_bboxes=False, return_segmentation=False, segmentation_mode=None, data_segmentation_path=None, frame_count=2, keyframe_offset=0, dilation=1, fisheye_rotation=0, fisheye_offset=0, eigen_depth=True, color_aug=False, is_preprocessed=False, load_kitti_360_segmentation_gt=False, constrain_to_datapoints=False, additional_random_front_offset=False ): self.data_path = data_path self.data_segmentation_path = data_segmentation_path self.pose_path = pose_path self.split_path = split_path self.target_image_size = target_image_size self.return_stereo = return_stereo self.return_fisheye = return_fisheye self.return_depth = return_depth self.return_3d_bboxes = return_3d_bboxes self.return_segmentation = return_segmentation self.segmentation_mode = segmentation_mode self.frame_count = frame_count self.dilation = dilation self.fisheye_rotation = fisheye_rotation self.fisheye_offset = fisheye_offset self.keyframe_offset = keyframe_offset self.eigen_depth = eigen_depth self.color_aug = color_aug self.is_preprocessed = is_preprocessed self.load_kitti_360_segmentation_gt = load_kitti_360_segmentation_gt self.constrain_to_datapoints = constrain_to_datapoints if isinstance(self.fisheye_rotation, float) or isinstance(self.fisheye_rotation, int): self.fisheye_rotation = (0, self.fisheye_rotation) self.fisheye_rotation = tuple(self.fisheye_rotation) # Support random fisheye offset if type(self.fisheye_offset) == int: self.random_fisheye_offset = False self.fisheye_offset = (self.fisheye_offset, ) elif type(self.fisheye_offset) in [tuple, list, omegaconf.listconfig.ListConfig]: self.random_fisheye_offset = True self.fisheye_offset = tuple(sorted(self.fisheye_offset)) else: raise ValueError(f"Invalid datatype for fisheye offset: {type(self.fisheye_offset)}") if additional_random_front_offset and not self.random_fisheye_offset: raise ValueError("Random Fisheye Offset needs to be active for additional random front offset!") else: self.additional_random_front_offset = additional_random_front_offset self._sequences = self._get_sequences(self.data_path) self._calibs = self._load_calibs(self.data_path, self.fisheye_rotation) self._resampler_02, self._resampler_03 = self._get_resamplers(self._calibs, self._calibs["K_fisheye"], self.target_image_size) self._img_ids, self._poses = self._load_poses(self.pose_path, self._sequences) self._left_offset = ((self.frame_count - 1) // 2 + self.keyframe_offset) * self.dilation self._perspective_folder = "data_rect" if not self.is_preprocessed else f"data_{self.target_image_size[0]}x{self.target_image_size[1]}" self._segmentation_perspective_folder = "data_192x640" self._segmentation_fisheye_folder = "data_192x640_0x-15" self._fisheye_folder = "data_rgb" if not self.is_preprocessed else f"data_{self.target_image_size[0]}x{self.target_image_size[1]}_{self.fisheye_rotation[0]}x{self.fisheye_rotation[1]}" if self.split_path is not None: self._datapoints = self._load_split(self.split_path, self._img_ids) elif self.return_segmentation: self._datapoints = self._semantics_split(self._sequences, self.data_path, self._img_ids) else: self._datapoints = self._full_split(self._sequences, self._img_ids, self.check_file_integrity) if self.return_3d_bboxes: self._3d_bboxes = self._load_3d_bboxes(Path(data_path) / "data_3d_bboxes" / "train_full", self._sequences) if self.segmentation_mode == 'KITTI-360' or self.load_kitti_360_segmentation_gt: # Segmentations are only provided for the left camera self._datapoints = [dp for dp in self._datapoints if not dp[2]] # make sure we can load all segmentation masks self._datapoints = [dp for dp in self._datapoints if self.check_segmentation(dp)] if self.constrain_to_datapoints: print("Using maximum datapoint as last image of sequence.") seq_max_id = {seq: max([0] + [d[1] for d in self._datapoints if d[0] == seq]) for seq in self._sequences} for seq in self._sequences: self._poses[seq] = self._poses[seq][:seq_max_id[seq]+1] self._img_ids[seq] = self._img_ids[seq][:seq_max_id[seq]+1] self._skip = 0 self.length = len(self._datapoints) def check_segmentation(self, dp): """Checks for a datapoint dp if we can load all the segmentation masks for all image_ids.""" sequence, id, is_right = dp seq_len = self._img_ids[sequence].shape[0] ids = [id] + [max(min(i, seq_len - 1), 0) for i in range(id - self._left_offset, id - self._left_offset + self.frame_count * self.dilation, self.dilation) if i != id] img_ids = [self.get_img_id_from_id(sequence, id) for id in ids] for img_id in img_ids: _p = os.path.join(self.data_path, "data_2d_semantics", "train", sequence, "image_00", "semantic", f"{img_id:010d}.png") if not os.path.isfile(_p): return False return True def check_file_integrity(self, seq, id): dp = Path(self.data_path) image_00 = dp / "data_2d_raw" / seq / "image_00" / self._perspective_folder image_01 = dp / "data_2d_raw" / seq / "image_01" / self._perspective_folder image_02 = dp / "data_2d_raw" / seq / "image_02" / self._fisheye_folder image_03 = dp / "data_2d_raw" / seq / "image_03" / self._fisheye_folder fisheye_offset = self.fisheye_offset[-1] seq_len = self._img_ids[seq].shape[0] ids = [id] + [max(min(i, seq_len - 1), 0) for i in range(id - self._left_offset, id - self._left_offset + self.frame_count * self.dilation, self.dilation) if i != id] ids_fish = [max(min(id + fisheye_offset, seq_len - 1), 0)] + [max(min(i, seq_len - 1), 0) for i in range(id + fisheye_offset - self._left_offset, id + fisheye_offset - self._left_offset + self.frame_count * self.dilation, self.dilation) if i != id + fisheye_offset] img_ids = [self.get_img_id_from_id(seq, id) for id in ids] img_ids_fish = [self.get_img_id_from_id(seq, id) for id in ids_fish] for img_id in img_ids: if not ((image_00 / f"{img_id:010d}.png").exists() and (image_01 / f"{img_id:010d}.png").exists()): return False if self.return_fisheye: for img_id in img_ids_fish: if not ((image_02 / f"{img_id:010d}.png").exists() and (image_03 / f"{img_id:010d}.png").exists()): return False return True @staticmethod def _get_sequences(data_path): all_sequences = [] seqs_path = Path(data_path) / "data_2d_raw" for seq in seqs_path.iterdir(): if not seq.is_dir(): continue all_sequences.append(seq.name) return all_sequences @staticmethod def _full_split(sequences, img_ids, check_integrity): datapoints = [] for seq in sorted(sequences): ids = [id for id in range(len(img_ids[seq])) if check_integrity(seq, id)] datapoints_seq = [(seq, id, False) for id in ids] + [(seq, id, True) for id in ids] datapoints.extend(datapoints_seq) return datapoints @staticmethod def _semantics_split(sequences, data_path, img_ids): datapoints = [] for seq in sorted(sequences): datapoints_seq = [(seq, id, False) for id in range(len(img_ids[seq]))] datapoints_seq = [dp for dp in datapoints_seq if os.path.exists(os.path.join(data_path, "data_2d_semantics", "train", seq, "image_00", "semantic_rgb", f"{img_ids[seq][dp[1]]:010d}.png"))] datapoints.extend(datapoints_seq) return datapoints @staticmethod def _load_split(split_path, img_ids): img_id2id = {seq: {id: i for i, id in enumerate(ids)} for seq, ids in img_ids.items()} with open(split_path, "r") as f: lines = f.readlines() def split_line(l): segments = l.split(" ") seq = segments[0] id = img_id2id[seq][int(segments[1])] return seq, id, segments[2][0] == "r" return list(map(split_line, lines)) @staticmethod def _load_calibs(data_path, fisheye_rotation=0): data_path = Path(data_path) calib_folder = data_path / "calibration" cam_to_pose_file = calib_folder / "calib_cam_to_pose.txt" cam_to_velo_file = calib_folder / "calib_cam_to_velo.txt" intrinsics_file = calib_folder / "perspective.txt" fisheye_02_file = calib_folder / "image_02.yaml" fisheye_03_file = calib_folder / "image_03.yaml" cam_to_pose_data = {} with open(cam_to_pose_file, 'r') as f: for line in f.readlines(): key, value = line.split(':', 1) try: cam_to_pose_data[key] = np.array([float(x) for x in value.split()], dtype=np.float32) except ValueError: pass cam_to_velo_data = None with open(cam_to_velo_file, 'r') as f: line = f.readline() try: cam_to_velo_data = np.array([float(x) for x in line.split()], dtype=np.float32) except ValueError: pass intrinsics_data = {} with open(intrinsics_file, 'r') as f: for line in f.readlines(): key, value = line.split(':', 1) try: intrinsics_data[key] = np.array([float(x) for x in value.split()], dtype=np.float32) except ValueError: pass with open(fisheye_02_file, 'r') as f: f.readline() # Skips first line that defines the YAML version fisheye_02_data = yaml.safe_load(f) with open(fisheye_03_file, 'r') as f: f.readline() # Skips first line that defines the YAML version fisheye_03_data = yaml.safe_load(f) im_size_rect = (int(intrinsics_data["S_rect_00"][1]), int(intrinsics_data["S_rect_00"][0])) im_size_fish = (fisheye_02_data["image_height"], fisheye_02_data["image_width"]) # Projection matrices # We use these projection matrices also when resampling the fisheye cameras. # This makes downstream processing easier, but it could be done differently. P_rect_00 = np.reshape(intrinsics_data['P_rect_00'], (3, 4)) P_rect_01 = np.reshape(intrinsics_data['P_rect_01'], (3, 4)) # Rotation matrices from raw to rectified -> Needs to be inverted later R_rect_00 = np.eye(4, dtype=np.float32) R_rect_01 = np.eye(4, dtype=np.float32) R_rect_00[:3, :3] = np.reshape(intrinsics_data['R_rect_00'], (3, 3)) R_rect_01[:3, :3] = np.reshape(intrinsics_data['R_rect_01'], (3, 3)) # Rotation matrices from resampled fisheye to raw fisheye fisheye_rotation = np.array(fisheye_rotation).reshape((1, 2)) R_02 = np.eye(4, dtype=np.float32) R_03 = np.eye(4, dtype=np.float32) R_02[:3, :3] = Rotation.from_euler("xy", fisheye_rotation[:, [1, 0]], degrees=True).as_matrix().astype(np.float32) R_03[:3, :3] = Rotation.from_euler("xy", fisheye_rotation[:, [1, 0]] * np.array([[1, -1]]), degrees=True).as_matrix().astype(np.float32) # Load cam to pose transforms T_00_to_pose = np.eye(4, dtype=np.float32) T_01_to_pose = np.eye(4, dtype=np.float32) T_02_to_pose = np.eye(4, dtype=np.float32) T_03_to_pose = np.eye(4, dtype=np.float32) T_00_to_velo = np.eye(4, dtype=np.float32) T_00_to_pose[:3, :] = np.reshape(cam_to_pose_data["image_00"], (3, 4)) T_01_to_pose[:3, :] = np.reshape(cam_to_pose_data["image_01"], (3, 4)) T_02_to_pose[:3, :] = np.reshape(cam_to_pose_data["image_02"], (3, 4)) T_03_to_pose[:3, :] = np.reshape(cam_to_pose_data["image_03"], (3, 4)) T_00_to_velo[:3, :] = np.reshape(cam_to_velo_data, (3, 4)) # Compute cam to pose transforms for rectified perspective cameras T_rect_00_to_pose = T_00_to_pose @ np.linalg.inv(R_rect_00) T_rect_01_to_pose = T_01_to_pose @ np.linalg.inv(R_rect_01) # Compute cam to pose transform for fisheye cameras T_02_to_pose = T_02_to_pose @ R_02 T_03_to_pose = T_03_to_pose @ R_03 # Compute velo to cameras and velo to pose transforms T_velo_to_rect_00 = R_rect_00 @ np.linalg.inv(T_00_to_velo) T_velo_to_pose = T_rect_00_to_pose @ T_velo_to_rect_00 T_velo_to_rect_01 = np.linalg.inv(T_rect_01_to_pose) @ T_velo_to_pose # Calibration matrix is the same for both perspective cameras K = P_rect_00[:3, :3] # Normalize calibration f_x = K[0, 0] / im_size_rect[1] f_y = K[1, 1] / im_size_rect[0] c_x = K[0, 2] / im_size_rect[1] c_y = K[1, 2] / im_size_rect[0] # Change to image coordinates [-1, 1] K[0, 0] = f_x * 2. K[1, 1] = f_y * 2. K[0, 2] = c_x * 2. - 1 K[1, 2] = c_y * 2. - 1 # Convert fisheye calibration to [-1, 1] image dimensions fisheye_02_data["projection_parameters"]["gamma1"] = (fisheye_02_data["projection_parameters"]["gamma1"] / im_size_fish[1]) * 2. fisheye_02_data["projection_parameters"]["gamma2"] = (fisheye_02_data["projection_parameters"]["gamma2"] / im_size_fish[0]) * 2. fisheye_02_data["projection_parameters"]["u0"] = (fisheye_02_data["projection_parameters"]["u0"] / im_size_fish[1]) * 2. - 1. fisheye_02_data["projection_parameters"]["v0"] = (fisheye_02_data["projection_parameters"]["v0"] / im_size_fish[0]) * 2. - 1. fisheye_03_data["projection_parameters"]["gamma1"] = (fisheye_03_data["projection_parameters"]["gamma1"] / im_size_fish[1]) * 2. fisheye_03_data["projection_parameters"]["gamma2"] = (fisheye_03_data["projection_parameters"]["gamma2"] / im_size_fish[0]) * 2. fisheye_03_data["projection_parameters"]["u0"] = (fisheye_03_data["projection_parameters"]["u0"] / im_size_fish[1]) * 2. - 1. fisheye_03_data["projection_parameters"]["v0"] = (fisheye_03_data["projection_parameters"]["v0"] / im_size_fish[0]) * 2. - 1. # Use same camera calibration as perspective cameras for resampling # K_fisheye = np.eye(3, dtype=np.float32) # K_fisheye[0, 0] = 2 # K_fisheye[1, 1] = 2 K_fisheye = K calibs = { "K_perspective": K, "K_fisheye": K_fisheye, "T_cam_to_pose": { "00": T_rect_00_to_pose, "01": T_rect_01_to_pose, "02": T_02_to_pose, "03": T_03_to_pose, }, "T_velo_to_cam": { "00": T_velo_to_rect_00, "01": T_velo_to_rect_01, }, "T_velo_to_pose": T_velo_to_pose, "fisheye": { "calib_02": fisheye_02_data, "calib_03": fisheye_03_data, "R_02": R_02[:3, :3], "R_03": R_03[:3, :3] }, "im_size": im_size_rect } return calibs @staticmethod def _get_resamplers(calibs, K_target, target_image_size): resampler_02 = FisheyeToPinholeSampler(K_target, target_image_size, calibs["fisheye"]["calib_02"], calibs["fisheye"]["R_02"]) resampler_03 = FisheyeToPinholeSampler(K_target, target_image_size, calibs["fisheye"]["calib_03"], calibs["fisheye"]["R_03"]) return resampler_02, resampler_03 @staticmethod def _load_poses(pose_path, sequences): ids = {} poses = {} for seq in sequences: pose_file = Path(pose_path) / seq / f"poses.txt" try: pose_data = np.loadtxt(pose_file) except FileNotFoundError: print(f'Ground truth poses are not avaialble for sequence {seq}.') ids_seq = pose_data[:, 0].astype(int) poses_seq = pose_data[:, 1:].astype(np.float32).reshape((-1, 3, 4)) poses_seq = np.concatenate((poses_seq, np.zeros_like(poses_seq[:, :1, :])), axis=1) poses_seq[:, 3, 3] = 1 ids[seq] = ids_seq poses[seq] = poses_seq return ids, poses @staticmethod def _load_3d_bboxes(bbox_path, sequences): bboxes = {} for seq in sequences: with open(Path(bbox_path) / f"{seq}.xml", "rb") as f: tree = ET.parse(f) root = tree.getroot() objects = defaultdict(list) num_bbox = 0 for child in root: if child.find('transform') is None: continue
obj = KITTI360Bbox3D()
0
2023-11-12 21:53:27+00:00
8k
pedramhaqiqi/LANPONG
lanpong/server/server.py
[ { "identifier": "Game", "path": "lanpong/game/game.py", "snippet": "class Game:\n \"\"\"\n Game object for pong\n \"\"\"\n\n DEFAULT_ROWS = 24\n DEFAULT_COLS = 70\n STATS_HEIGHT = 3\n GAME_LENGTH = 3\n SCORE_DISPLAY_TIME = 2\n\n def __init__(\n self,\n rows=DEFAULT_ROWS,\n cols=DEFAULT_COLS,\n stats_height=STATS_HEIGHT,\n game_length=GAME_LENGTH,\n ):\n self.nrows = rows\n self.ncols = cols\n self.score = [0, 0]\n self.score_timestamp = 0\n self.most_recent_score = -1\n\n self.is_game_started_event = threading.Event()\n\n self.ball = Ball(\n self.nrows // 2,\n self.ncols // 2,\n 1,\n 1,\n )\n\n self.paddle1 = Paddle(self.nrows // 2, 1, 3)\n self.paddle2 = Paddle(self.nrows // 2, self.ncols - 2, 3)\n\n self.screen = Game.get_blank_screen(stats_height=stats_height)\n network_header = \"Network Statistics:\"\n start = (cols - len(network_header)) // 2\n self.screen[-self.STATS_HEIGHT, start : start + len(network_header)] = list(\n network_header\n )\n # Draw the paddles\n self.draw_paddle(self.paddle1)\n self.draw_paddle(self.paddle2)\n # Draw the ball\n self.screen[self.ball.get_row()][self.ball.get_col()] = Ball.SYMBOL\n\n self.player1 = self.player2 = None\n\n self.loser = 0\n\n def _reset_paddles(self):\n \"\"\"Resets the paddles to their original positions\"\"\"\n self.screen[1 : self.nrows - 1, 1] = self.screen[1 : self.nrows - 1, -2] = b\" \"\n self.paddle1.row = self.nrows // 2\n self.paddle2.row = self.nrows // 2\n self.paddle1.direction = self.paddle2.direction = 0\n\n def _reset_ball(self):\n \"\"\"Resets the ball to its original position\"\"\"\n self.ball.row = self.nrows // 2\n self.ball.col = self.ncols // 2\n choice = random.choice([-1, 1])\n self.ball.row_velocity *= choice\n self.ball.col_velocity *= choice\n\n def reset_board(self):\n \"\"\"Resets the board to its original state\"\"\"\n self._reset_paddles()\n self._reset_ball()\n\n def draw_paddle(self, paddle):\n \"\"\"Draws a paddle on the screen\"\"\"\n self.screen[paddle.row : paddle.row + paddle.length, paddle.col] = b\"|\"\n\n def initialize_player(self, username):\n \"\"\"Initializes a player. Returns non-zero player id, 0 if game is full.\"\"\"\n if self.player1 is None:\n self.player1 = Player(self.paddle1, username)\n self.player1.id = 1\n return 1\n elif self.player2 is None:\n self.player2 = Player(self.paddle2, username)\n self.player2.id = 2\n return 2\n else:\n return 0\n\n def set_player_ready(self, player_id, is_ready):\n \"\"\"Sets the player status to either 'ready' or 'not ready'\"\"\"\n player = self.player1 if player_id == 1 else self.player2\n player.is_ready = is_ready\n if self.player1.is_ready and (\n self.player2 is not None and self.player2.is_ready\n ):\n self.is_game_started_event.set()\n\n def update_score(self, player_id):\n \"\"\"Updates the score of the player\"\"\"\n if player_id != 0:\n self.score[player_id - 1] += 1\n self.check_for_winner()\n self.reset_board()\n\n def check_for_winner(self):\n \"\"\"Checks if there is a winner and updates the screen\"\"\"\n if self.score[0] >= self.GAME_LENGTH:\n self.loser = 2\n elif self.score[1] >= self.GAME_LENGTH:\n self.loser = 1\n\n def update_game(self):\n \"\"\"\n Updates the game state.\n\n This function handles the main logic for updating the game state, including ball movement,\n collisions, score tracking, and screen updates.\n\n Returns:\n None. Modifies the internal state of the Game object.\n \"\"\"\n # Check if the game is in the score display phase after a goal\n if time.time() - self.score_timestamp < self.SCORE_DISPLAY_TIME:\n return\n\n # Reset the most recent score, indicating no recent score update\n self.most_recent_score = -1\n\n # Check if the game is over\n if self.loser != 0:\n # Game is over, don't update anything further\n return\n\n # Erase the ball from its previous position on the screen\n self.screen[self.ball.get_row()][self.ball.get_col()] = b\" \"\n\n # Update the ball's position based on its velocity\n self.ball.update_position()\n\n # Check for collisions with the walls and update the score if a goal is scored\n score = self.ball.handle_wall_collision(self.nrows, self.ncols)\n if score != 0:\n # Record the timestamp of the goal for score display\n self.score_timestamp = time.time()\n # Update the most recent score and overall score\n self.most_recent_score = score\n self.update_score(score)\n\n # Check for collisions with paddles and adjust ball velocity accordingly\n self.ball.handle_paddle_collision(self.player1.paddle, self.player2.paddle)\n\n # Ensure the ball stays within the game boundaries\n self.ball.keep_within_bounds(self.nrows, self.ncols)\n\n # Update the ball position on the screen\n self.screen[self.ball.get_row()][self.ball.get_col()] = Ball.SYMBOL\n\n def get_message_screen(self, message):\n screen = Game.get_blank_screen(stats_height=0)\n rows, cols = screen.shape\n assert len(message) < cols - 2\n\n start = (cols - len(message)) // 2\n screen[rows // 2, start : start + len(message)] = list(message)\n return Game.screen_to_tui(screen)\n\n def update_paddle(self, player_number: int, key):\n \"\"\"\n Updates the paddle positions based on user input.\n\n This function handles the paddle movement in response to user input.\n It checks the validity of the input key and ensures that the paddle stays within the game boundaries.\n\n Args:\n player_number (int): The player number (1 or 2) whose paddle to update.\n key (bytes): The user input key representing the desired paddle movement.\n\n Returns:\n None. Modifies the internal state of the Game object.\n \"\"\"\n # Check if the game is in the score display phase after a goal\n if time.time() - self.score_timestamp < self.SCORE_DISPLAY_TIME:\n return\n\n # Check if the game is over\n if self.loser != 0:\n # Game is over, don't update anything further\n return\n\n # Select the player and corresponding paddle based on the player number\n player = self.player1 if player_number == 1 else self.player2\n paddle = player.paddle\n\n # Clear the old paddle position on the screen\n self.screen[paddle.row : paddle.row + paddle.length, paddle.col] = b\" \"\n\n # Check if the key is valid and update the paddle direction\n if key == b\"w\":\n paddle.direction = -1\n elif key == b\"s\":\n paddle.direction = 1\n elif key == b\" \":\n paddle.direction = 0\n\n # Update the paddle position based on the direction and ensure it stays within bounds\n if paddle.direction == -1 and paddle.row > 1:\n paddle.row -= 1\n elif paddle.direction == 1 and paddle.row < self.nrows - paddle.length - 1:\n paddle.row += 1\n\n # Draw the new paddle position on the screen\n self.draw_paddle(paddle)\n\n def update_network_stats(self, stats, offset=1):\n \"\"\"Updates the network statistics area\"\"\"\n # self.screen[-self.STATS_HEIGHT + 1, 1:-2] = b\" \"\n if offset == 1:\n self.screen[-self.STATS_HEIGHT + 1, 1 : 1 + len(stats)] = list(stats)\n else:\n self.screen[-self.STATS_HEIGHT + 1, -1 - len(stats) : -1] = list(stats)\n\n def is_full(self):\n \"\"\"Returns True if the game is full, False otherwise\"\"\"\n return self.player1 is not None and self.player2 is not None\n\n def __str__(self):\n if time.time() - self.score_timestamp < self.SCORE_DISPLAY_TIME:\n return self.get_message_screen(\n f\"{self.player1.username if self.most_recent_score == self.player1.id else self.player2.username} scores! Score: {self.score[0]}-{self.score[1]}\"\n )\n\n return Game.screen_to_tui(self.screen)\n\n @staticmethod\n def get_blank_screen(\n rows=DEFAULT_ROWS, cols=DEFAULT_COLS, stats_height=STATS_HEIGHT\n ):\n \"\"\"Return a blank screen with no paddles or ball, just the border\"\"\"\n rows = rows + stats_height\n screen = np.full((rows, cols), b\" \", dtype=\"S1\")\n screen[0, :] = screen[-1, :] = screen[-stats_height - 1, :] = b\"-\"\n screen[:, 0] = screen[:, -1] = b\"+\"\n return screen\n\n @staticmethod\n def screen_to_tui(screen):\n \"\"\"\n Convert a screen to a TUI representation\n :param screen: The screen to convert\n :return: The TUI representation of the screen\n \"\"\"\n # Code looks ugly but point is to minimizing use of str \"+\" operator.\n return b\"\".join(\n chain.from_iterable(chain(row, [b\"\\r\", b\"\\n\"]) for row in screen)\n ).decode()" }, { "identifier": "SSHServer", "path": "lanpong/server/ssh.py", "snippet": "class SSHServer(paramiko.ServerInterface):\n def __init__(self, server):\n \"\"\"\n Initialize the SSH server.\n\n Parameters:\n - server: Instance of the server containing a database, user information, lock, and connections.\n \"\"\"\n self.db = server.db\n self.user = None\n self.lock = server.lock\n self.connections = server.connections\n\n def check_channel_request(self, kind, chanid):\n \"\"\"\n Callback for checking if a channel request is allowed.\n\n Parameters:\n - kind: Type of channel request.\n - chanid: Channel ID.\n\n Returns:\n - paramiko.OPEN_SUCCEEDED if the request is allowed, else paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED.\n \"\"\"\n if kind == \"session\":\n return paramiko.OPEN_SUCCEEDED\n return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED\n\n def check_channel_pty_request(\n self, channel, term, width, height, pixelwidth, pixelheight, modes\n ):\n \"\"\"\n Callback for checking if a PTY request is allowed.\n\n Parameters:\n - channel: The channel requesting the PTY.\n - term: Terminal type.\n - width: Width of the terminal.\n - height: Height of the terminal.\n - pixelwidth: Width of the terminal in pixels.\n - pixelheight: Height of the terminal in pixels.\n - modes: Terminal modes.\n\n Returns:\n - True if the PTY request is allowed.\n \"\"\"\n # Investigate whether useful for draw\n return True\n\n def check_channel_shell_request(self, channel):\n \"\"\"\n Callback for checking if a shell request is allowed.\n\n Parameters:\n - channel: The channel requesting the shell.\n\n Returns:\n - True if the shell request is allowed.\n \"\"\"\n return True\n\n def check_auth_password(self, username, password):\n \"\"\"\n Callback for checking password-based authentication.\n\n Parameters:\n - username: Username attempting authentication.\n - password: Password provided for authentication.\n\n Returns:\n - paramiko.AUTH_SUCCESSFUL if authentication is successful, else paramiko.AUTH_FAILED.\n \"\"\"\n try:\n self.user = self.db.login(username, password)\n if self.user:\n return paramiko.AUTH_SUCCESSFUL\n return paramiko.AUTH_FAILED\n except:\n return paramiko.AUTH_FAILED\n\n def check_auth_publickey(self, username, key):\n \"\"\"\n Callback for checking public key-based authentication.\n\n Parameters:\n - username: Username attempting authentication.\n - key: Public key provided for authentication.\n\n Returns:\n - paramiko.AUTH_SUCCESSFUL if authentication is successful, else paramiko.AUTH_FAILED.\n \"\"\"\n try:\n user = self.db.get_user(username)\n key_gen_func = {\"ed25519\": paramiko.ed25519key.Ed25519Key}\n\n pbk = user[\"public_key\"].split(\" \", 3)\n user_key = key_gen_func[user[\"key_type\"]](data=base64.b64decode(pbk[1]))\n if key == user_key:\n self.user = user\n return paramiko.AUTH_SUCCESSFUL\n return paramiko.AUTH_FAILED\n except:\n return paramiko.AUTH_FAILED\n\n def get_allowed_auths(self, username):\n \"\"\"\n Callback for getting allowed authentication methods for a user.\n\n Parameters:\n - username: Username for which allowed authentication methods are requested.\n\n Returns:\n - Comma-separated string of allowed authentication methods.\n \"\"\"\n with self.lock:\n if (not username == \"new\") and (username in self.connections):\n return \"none\"\n\n user = self.db.get_user(username)\n allowed = [\"password\"]\n if user is None:\n return \"none\"\n elif user.get(\"public_key\") is not None:\n allowed.append(\"publickey\")\n return \",\".join(allowed)\n\n def get_banner(self):\n \"\"\"\n Callback for getting a banner to send to clients during connection.\n\n Returns:\n - Tuple containing the banner text (\"LAN PONG\\r\\n\") and language code (\"en-US\").\n \"\"\"\n return (\"LAN PONG\\r\\n\", \"en-US\")" }, { "identifier": "Ping", "path": "lanpong/server/ping.py", "snippet": "class Ping:\n # Maximum size for the ping result cache\n MAX_CACHE_SIZE = 100\n\n def __init__(self, ip) -> None:\n \"\"\"\n Initialize a Ping object with an IP address and an empty cache.\n\n Parameters:\n - ip: IP address to ping.\n \"\"\"\n self._cache = [] # List to store ping results\n self.ip = ip # IP address to ping\n\n def get(self):\n \"\"\"\n Get the average ping time for the specified IP address.\n\n Returns:\n - Average ping time rounded to 3 decimal places.\n \"\"\"\n self.get_ping(self.ip)\n average = sum(self._cache) / len(self._cache)\n return round(average, 3)\n\n def get_ping(self, ip_address):\n \"\"\"\n Perform a single ping to the specified IP address and update the cache.\n\n Parameters:\n - ip_address: IP address to ping.\n \"\"\"\n command = [\"ping\", \"-c\", \"1\", ip_address]\n process = subprocess.Popen(\n command, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n stdout, stderr = process.communicate()\n\n if process.returncode == 0:\n # Regex to extract the time from the output\n match = re.search(r\"time=(\\d+\\.?\\d*)\\s*ms\", stdout.decode())\n if match:\n if len(self._cache) > self.MAX_CACHE_SIZE:\n # If the cache exceeds the maximum size, remove the oldest entry\n self._cache.pop(0)\n # Add the ping time to the cache\n self._cache.append(float(match.group(1)))\n else:\n # Print an error message if the ping was not successful\n print(f\"Error pinging {ip_address}: {stderr}\")\n return None" }, { "identifier": "DB", "path": "lanpong/server/db.py", "snippet": "class DB:\n def __init__(self, filename=\"users.json\"):\n \"\"\"\n Initialize the DB object.\n\n Args:\n filename (str): The name of the JSON file used for storage.\n \"\"\"\n self.filename = filename\n self.lock = threading.Lock()\n self.path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), self.filename\n )\n self.users = self.load_db()\n\n def load_db(self):\n \"\"\"\n Load the user data from the JSON file.\n\n Returns:\n list: List of user objects.\n \"\"\"\n if not Path(self.path).is_file():\n return []\n with open(self.path, \"r\") as file:\n return json.load(file)\n\n def save_db(self):\n \"\"\"\n Save the user data to the JSON file.\n \"\"\"\n with open(self.path, \"w\") as file:\n json.dump(self.users, file, indent=2)\n\n def is_username_valid(self, username):\n \"\"\"\n Check if the given username is unique in the user list.\n\n Args:\n username (str): The username to check.\n\n Returns:\n bool: True if the username is unique, False otherwise.\n \"\"\"\n if username == \"\" or re.search(r\"\\s\", username):\n return False\n\n for user in self.users:\n if user[\"username\"] == username:\n return False\n return True\n\n def create_user(self, username, password, score=0):\n \"\"\"\n Create a new user with sanitization and unique username checking.\n\n Args:\n username (str): The new username.\n password (str): The password for the new user.\n score (int): The initial score for the new user (default is 0).\n\n Raises:\n ValueError: If the username is not unique or if either username or password is empty.\n \"\"\"\n if not username or not password:\n raise ValueError(\"Username and password are required.\")\n\n with self.lock:\n if not self.is_username_valid(username):\n raise ValueError(\"Username already exists.\")\n\n new_user = {\n \"id\": len(self.users) + 1,\n \"username\": username,\n \"password\": password,\n \"score\": score,\n }\n\n self.users.append(new_user)\n self.save_db()\n\n def update_user(self, user_id, new_data):\n \"\"\"\n Update user information based on the user's ID.\n\n Args:\n user_id (int): The ID of the user to update.\n new_data (dict): A dictionary containing the new data to update for the user.\n\n Raises:\n ValueError: If the user with the specified ID is not found.\n \"\"\"\n with self.lock:\n for user in self.users:\n if user[\"id\"] == user_id:\n for key, value in new_data.items():\n user[key] = value\n self.save_db()\n\n def login(self, username, password):\n \"\"\"\n Attempt to authenticate a user with the provided username and password.\n\n Args:\n username (str): The username to authenticate.\n password (str): The password to authenticate.\n\n Returns:\n dict: User information if authentication is successful, None otherwise.\n \"\"\"\n with self.lock:\n for user in self.users:\n if user[\"username\"] == username and user[\"password\"] == password:\n return user\n return None\n\n def get_user(self, username):\n \"\"\"\n Retrieve a user by their username.\n\n Args:\n username (str): The username to retrieve.\n\n Returns:\n dict or None: The user information if a user with the given username exists, None otherwise.\n \"\"\"\n with self.lock:\n for user in self.users:\n if user[\"username\"] == username:\n return user\n return None\n\n def get_top_users(self, num):\n \"\"\"\n Get the top users based on their score.\n\n Args:\n num (int): The number of users to return.\n\n Returns:\n list: A list of the top users.\n \"\"\"\n with self.lock:\n return sorted(self.users, key=lambda x: x[\"score\"], reverse=True)[:num]" } ]
import re import socket import threading import time import paramiko import numpy as np from itertools import count from ..game.game import Game from lanpong.server.ssh import SSHServer from lanpong.server.ping import Ping from lanpong.server.db import DB
6,521
] ): # Center each line. start = (cols - len(line)) // 2 screen[current_row + i, start : len(line) + start] = list(line) return Game.screen_to_tui(screen) def wait_for_char(channel, channel_file, valid_chars): """ Waits for a character from the client that is in the valid_chars set. """ while not channel.closed: char = channel_file.read(1).decode() if char in valid_chars: return char class Server: def __init__(self, key_file_name="test_key") -> None: self.lock = threading.Lock() self.db = DB() self.server_key = paramiko.RSAKey.from_private_key_file(filename=key_file_name) # Set of usernames of connected clients. # Used to prevent multiple connections from the same user. self.connections = set() self.waiting_screen = get_message_screen( f"You are player 1. Waiting for player 2..." ) self.games = [] self.games_lock = threading.Lock() def start_server(self, host="0.0.0.0", port=2222): """Starts an SSH server on specified port and address Args: host (str): Server host addr. Defaults to '0.0.0.0'. port (int): Port. Defaults to 2222. """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_sock: # Bind socket to port and start listening for connections. server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_sock.bind((host, port)) server_sock.listen(100) print(f"Listening for connection on {host}:{port}") # Accept multiple connections, thread-out while True: client_socket, client_addr = server_sock.accept() print(f"Incoming connection from {client_addr[0]}:{client_addr[1]}") client_thread = threading.Thread( target=self.handle_client, args=(client_socket,) ) client_thread.start() def handle_game(self, game: Game): """ Handles the non-paddle game updates (mainly the ball) """ game.is_game_started_event.wait() while game.loser == 0: game.update_game() time.sleep(0.05) def handle_ping(self, game: Game, ping: Ping, name, player_id): """ Handles the ping updates """ game.is_game_started_event.wait() while game.loser == 0: game.update_network_stats(f"{name}'s PING: {ping.get():.3F}ms", player_id) time.sleep(0.05) def echo_line(self, channel_file, channel): line = "" while True: char = channel_file.read(1).decode() # Handle backspace (ASCII 8 or '\b' or '\x7F') if char in {"\x08", "\b", "\x7F"}: if line: # Remove the last character from the line and move the cursor back line = line[:-1] elif char == "\r" or char == "\n": break else: line += char channel.sendall(char) return line def get_game_or_create(self, username): """ Returns a game that is not full, or creates a new one Returns: (Game, int): Game and player id """ with self.games_lock: # Get a game that is not full, or None if all games are full. game = next((g for g in self.games if not g.is_full()), None) if game is None: # No game available, create a new one. game = Game() self.games.append(game) # Create a thread for this game and start it. game_thread = threading.Thread(target=self.handle_game, args=(game,)) game_thread.start() player_id = game.initialize_player(username) return game, player_id def handle_client(self, client_socket): """ Handles a client connection. """ try: # Initialize the SSH server protocol for this connection. transport = paramiko.Transport(client_socket)
CLEAR_SCREEN = "\x1b[H\x1b[J" HIDE_CURSOR = "\033[?25l" SHOW_CURSOR = "\033[?25h" LOGO_ASCII = """\ _ ___ _ _ ______ _____ _ _ _____ | | / _ \ | \ | || ___ \ _ | \ | | __ \\ | | / /_\ \| \| || |_/ / | | | \| | | \/ | | | _ || . ` || __/| | | | . ` | | __ | |____| | | || |\ || | \ \_/ / |\ | |_\ \\ \_____/\_| |_/\_| \_/\_| \___/\_| \_/\____/""".splitlines() def get_message_screen(message): """ Returns a screen with the message centered. """ screen = Game.get_blank_screen(stats_height=0) rows, cols = screen.shape assert len(message) < cols - 2 start = (cols - len(message)) // 2 screen[rows // 2, start : start + len(message)] = list(message) return Game.screen_to_tui(screen) def send_frame(channel, frame): """ Sends a frame to the client. """ return channel.sendall("".join([CLEAR_SCREEN, frame, HIDE_CURSOR])) def get_lobby_screen(db, username=""): """ Returns the lobby screen with the leaderboard and options. """ screen = Game.get_blank_screen(stats_height=0) rows, cols = screen.shape assert len(LOGO_ASCII[0]) < cols - 2 start = (cols - len(LOGO_ASCII[0])) // 2 for i, line in enumerate(LOGO_ASCII): # Center each line of the logo. screen[1 + i, start : start + len(line)] = list(line) current_row = 1 + len(LOGO_ASCII) + 1 for i, line in enumerate( [f"Welcome to LAN PONG, {username}!", "Leaderboard:"] + [ f"{i + 1}. {user['username']} - {user['score']}" for i, user in enumerate(db.get_top_users(10)) ] + [ "", "Press key to proceed:", "[1] Matchmaking", "[2] Public key configuration", ] ): # Center each line. start = (cols - len(line)) // 2 screen[current_row + i, start : len(line) + start] = list(line) return Game.screen_to_tui(screen) def wait_for_char(channel, channel_file, valid_chars): """ Waits for a character from the client that is in the valid_chars set. """ while not channel.closed: char = channel_file.read(1).decode() if char in valid_chars: return char class Server: def __init__(self, key_file_name="test_key") -> None: self.lock = threading.Lock() self.db = DB() self.server_key = paramiko.RSAKey.from_private_key_file(filename=key_file_name) # Set of usernames of connected clients. # Used to prevent multiple connections from the same user. self.connections = set() self.waiting_screen = get_message_screen( f"You are player 1. Waiting for player 2..." ) self.games = [] self.games_lock = threading.Lock() def start_server(self, host="0.0.0.0", port=2222): """Starts an SSH server on specified port and address Args: host (str): Server host addr. Defaults to '0.0.0.0'. port (int): Port. Defaults to 2222. """ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_sock: # Bind socket to port and start listening for connections. server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_sock.bind((host, port)) server_sock.listen(100) print(f"Listening for connection on {host}:{port}") # Accept multiple connections, thread-out while True: client_socket, client_addr = server_sock.accept() print(f"Incoming connection from {client_addr[0]}:{client_addr[1]}") client_thread = threading.Thread( target=self.handle_client, args=(client_socket,) ) client_thread.start() def handle_game(self, game: Game): """ Handles the non-paddle game updates (mainly the ball) """ game.is_game_started_event.wait() while game.loser == 0: game.update_game() time.sleep(0.05) def handle_ping(self, game: Game, ping: Ping, name, player_id): """ Handles the ping updates """ game.is_game_started_event.wait() while game.loser == 0: game.update_network_stats(f"{name}'s PING: {ping.get():.3F}ms", player_id) time.sleep(0.05) def echo_line(self, channel_file, channel): line = "" while True: char = channel_file.read(1).decode() # Handle backspace (ASCII 8 or '\b' or '\x7F') if char in {"\x08", "\b", "\x7F"}: if line: # Remove the last character from the line and move the cursor back line = line[:-1] elif char == "\r" or char == "\n": break else: line += char channel.sendall(char) return line def get_game_or_create(self, username): """ Returns a game that is not full, or creates a new one Returns: (Game, int): Game and player id """ with self.games_lock: # Get a game that is not full, or None if all games are full. game = next((g for g in self.games if not g.is_full()), None) if game is None: # No game available, create a new one. game = Game() self.games.append(game) # Create a thread for this game and start it. game_thread = threading.Thread(target=self.handle_game, args=(game,)) game_thread.start() player_id = game.initialize_player(username) return game, player_id def handle_client(self, client_socket): """ Handles a client connection. """ try: # Initialize the SSH server protocol for this connection. transport = paramiko.Transport(client_socket)
ssh_server = SSHServer(self)
1
2023-11-13 04:32:20+00:00
8k
maagic6/SDIMV
SDIMV.py
[ { "identifier": "imageProcess", "path": "image.py", "snippet": "class imageProcess:\n def __init__(self, fn):\n ft = filetype.guess(fn)\n self.data = {\"prompt\": \"\", \n \"negative_prompt\": \"\", \n \"steps\": \"\", \n \"sampler\": \"\", \n \"cfg_scale\": \"\", \n \"seed\": \"\", \n \"size\": \"\",\n \"model_hash\": \"\",\n \"model\": \"\",\n \"lora\": \"\"}\n if ft == None:\n self.compatible = False\n elif ft.extension in ['png']:\n self.image = Image.open(fn)\n self.compatible = False\n\n if 'parameters' in self.image.info: #web ui\n self.info = str(self.image.info['parameters'])\n self.metadataType = 'parameters'\n self.compatible = True\n self.image.close()\n del self.image\n elif 'Comment' in self.image.info: #novelai\n self.info = json.loads(self.image.info['Comment'])\n self.metadataType = 'comment'\n self.compatible = True\n self.image.close()\n self.image = None\n del self.image\n elif 'prompt' in self.image.info: #comfyui\n self.info = json.loads(self.image.info['prompt'])\n self.metadataType = 'prompt'\n self.compatible = True\n self.image.close()\n self.image = None\n del self.image\n elif ft.extension == 'jpg':\n self.image = Image.open(fn)\n self.compatible = False\n exif_data = self.image._getexif()\n\n if exif_data is not None:\n for tag, value in ExifTags.TAGS.items():\n if tag in exif_data:\n if ExifTags.TAGS[tag] == \"UserComment\":\n user_comment = exif_data[tag]\n user_comment_unicode = user_comment.decode(\"utf-8\") #decode\n user_comment_unicode_sanitized = user_comment_unicode.replace('UNICODE', '').replace('\\x00', '')\n self.info = user_comment_unicode_sanitized\n self.metadataType = 'parameters'\n self.compatible = True\n \n self.image.close()\n self.image = None\n del self.image\n elif ft.extension in ['mp4']:\n video = MP4(fn)\n self.data = {}\n try:\n if '\\xa9cmt' in video.tags:\n metadata = video.tags['\\xa9cmt']\n self.metadataType = \"video\"\n self.info = json.loads(metadata[0])\n self.compatible = True\n video = None\n del video\n else:\n self.compatible = False\n video = None\n del video\n except:\n self.compatible = False\n video = None\n del video\n else:\n self.compatible = False\n \n def findKeyName(self, data, keys):\n if isinstance(data, dict):\n for key, value in data.items():\n if key == keys:\n return value\n result = self.findKeyName(value, keys)\n if result is not None:\n return result\n elif isinstance(data, list):\n for item in data:\n result = self.findKeyName(item, keys)\n if result is not None:\n return result \n return None\n\n def getInfo(self): # messy\n if self.metadataType == 'parameters':\n matches = re.findall(r'([^:,]+): ([^,]+)', self.info.replace('\\n', ','))\n for match in matches:\n key = match[0].strip().lower().replace(' ', '_')\n value = match[1].strip()\n self.data[key] = value\n try:\n positive = str(re.split(r'Negative prompt: |Steps: ', self.info)[0])\n except:\n positive = \"\"\n self.data[\"prompt\"]=positive\n try:\n negative = str(re.split(r'Negative prompt: ', self.info, maxsplit=1)[1].split('Steps:')[0].strip())\n except:\n negative = \"\"\n self.data[\"negative_prompt\"] = negative\n loraTags = re.findall(r'<lora:[^>]+>', self.info)\n uniqueLoraTags = set(loraTags)\n loraString = ' '.join(uniqueLoraTags)\n self.data[\"lora\"] = loraString\n if \"model\" not in self.data:\n self.data[\"model\"] = \"\"\n return self.data\n if self.metadataType == 'comment': #novelai\n self.data[\"prompt\"] = str(self.info[\"prompt\"])\n self.data[\"negative_prompt\"] = str(self.info[\"uc\"])\n self.data[\"steps\"] = str(self.info[\"steps\"])\n self.data[\"sampler\"] = str(self.info[\"sampler\"])\n self.data[\"cfg_scale\"] = str(self.info[\"scale\"])\n self.data[\"seed\"] = str(self.info[\"seed\"])\n self.data[\"size\"] = str(self.info[\"height\"])+'x'+str(self.info[\"width\"])\n self.data[\"model\"] = ''\n self.data[\"model_hash\"] = ''\n loraTags = re.findall(r'<lora:[^>]+>', str(self.info))\n uniqueLoraTags = set(loraTags)\n loraString = ' '.join(uniqueLoraTags)\n self.data[\"lora\"] = loraString\n return self.data\n if self.metadataType == 'video':\n promptKey = self.findKeyName(data=self.info, keys=\"positive\")[0]\n self.data[\"prompt\"] = self.info.get('prompt', {}).get(f'{promptKey}', {}).get('inputs', {}).get('text')\n self.data[\"negative_prompt\"] = self.info.get('prompt', {}).get('320', {}).get('inputs', {}).get('text')\n self.data[\"steps\"] = self.info.get('prompt', {}).get('528', {}).get('inputs', {}).get('steps')\n self.data[\"sampler\"] = self.info.get('prompt', {}).get('528', {}).get('inputs', {}).get('sampler_name')\n self.data[\"cfg_scale\"] = self.info.get('prompt', {}).get('528', {}).get('inputs', {}).get('cfg')\n self.data[\"seed\"] = self.info.get('prompt', {}).get('528', {}).get('inputs', {}).get('seed')\n self.data[\"size\"] = str(self.info.get('prompt', {}).get('539', {}).get('inputs', {}).get('height'))+'x'+str(self.info.get('prompt', {}).get('539', {}).get('inputs', {}).get('width'))\n self.data[\"model\"] = self.info.get('prompt', {}).get('513', {}).get('inputs', {}).get('model_name')\n self.data[\"model_hash\"] = \"\"\n self.data[\"lora\"] = \"\"\n return self.data\n if self.metadataType == 'prompt':\n promptKey = self.findKeyName(data=self.info, keys=\"positive\")\n if type(promptKey) == list:\n promptKey = self.findKeyName(data=self.info, keys=\"positive\")[0]\n if \"pre_text\" in self.info.get(f'{promptKey}', {}).get('inputs', {}):\n self.data[\"prompt\"] = re.search(r'([^--]+)--neg', str(self.info[f'{promptKey}']['inputs']['pre_text'])).group(1)\n else:\n self.data[\"prompt\"] = self.info[f'{promptKey}']['inputs'].get('text', None)\n else:\n self.data[\"prompt\"] = promptKey\n negativePromptKey = self.findKeyName(data=self.info, keys=\"negative\")\n if type(negativePromptKey) == list:\n negativePromptKey = self.findKeyName(data=self.info, keys=\"negative\")[0]\n if \"pre_text\" in self.info.get(f'{promptKey}', {}).get('inputs', {}):\n self.data[\"negative_prompt\"] = re.search(r'--neg\\s*([^\\n]+)', str(self.info[f'{negativePromptKey}']['inputs']['pre_text'])).group(1).strip() #I hate ComfyUI\n else:\n self.data[\"negative_prompt\"] = self.info[f'{negativePromptKey}']['inputs'].get('text', None)\n #self.data[\"negative_prompt\"] = type(negativePromptKey)\n else:\n self.data[\"negative_prompt\"] = negativePromptKey\n self.data[\"steps\"] = self.findKeyName(data=self.info, keys=\"steps\")\n self.data[\"sampler\"] = self.findKeyName(data=self.info, keys=\"sampler_name\")\n self.data[\"cfg_scale\"] = self.findKeyName(data=self.info, keys=\"cfg\")\n self.data[\"seed\"] = self.findKeyName(data=self.info, keys=\"noise_seed\") or self.findKeyName(data=self.info, keys=\"seed\")\n self.data[\"size\"] = self.findKeyName(data=self.info, keys=\"resolution\")\n self.data[\"model\"] = self.findKeyName(data=self.info, keys=\"ckpt_name\")\n self.data[\"model_hash\"] = ''\n self.data[\"lora\"] = ''\n return self.data\n\n def save_metadata(self):\n #todo\n pass\n \n def getRaw(self):\n return self.info\n \n def positivePrompt(self):\n if self.compatible == False:\n return -1\n else:\n if self.metadataType == 'parameters':\n positive = \"\"\n return positive\n if self.metadataType == 'comment':\n positive = \"\"\n return positive\n\n '''def negativePrompt(self):\n if self.compatible == False:\n return -1 \n else:\n negative = \"\"\n return negative'''" }, { "identifier": "FileHandler", "path": "file_handler.py", "snippet": "class FileHandler:\n def __init__(self, main_window):\n self.main_window = main_window\n \n def downloadImage(self, url):\n try:\n response = requests.get(url.toString())\n if response.status_code == 200:\n # get the file extension from the content-type header\n url_filename = os.path.basename(unquote(url.toString()))\n invalid_characters = ['<', '>', ':', '\"', '/', '\\\\', '|', '?', '*']\n for char in invalid_characters:\n url_filename = url_filename.replace(char, '_')\n file_extension = response.headers.get('Content-Type').split('/')[-1]\n # create a unique filename in the current working directory\n filename = f\"{url_filename}.{file_extension}\"\n save_path = os.path.join(os.getcwd(), f\"{url_filename}.{file_extension}\")\n # save the image locally\n with open(filename, 'wb') as file:\n file.write(response.content)\n return save_path\n else:\n print(f\"Failed to download image. HTTP Status Code: {response.status_code}\")\n except Exception as e:\n print(f\"Error downloading image: {e}\")\n return None\n \n def copyTempImage(self, temp_file_path):\n try:\n # create a copy of the image file in the current working directory\n copied_path = os.path.join(os.getcwd(), os.path.basename(temp_file_path))\n shutil.copyfile(temp_file_path, copied_path)\n return copied_path\n except Exception as e:\n print(f\"Error copying temp image: {e}\")\n return None\n \n def openFileDialog(self):\n filenames, _ = QFileDialog.getOpenFileNames(\n self.main_window,\n \"Select image files\",\n \"\",\n \"Images and videos (*.png *.jpg *.gif *.webp *.mp4)\"\n )\n if filenames:\n new_files = []\n new_files = [filename for filename in filenames if not self.isFileInList(filename)]\n self.updateFileList(new_files)\n \n def updateFileList(self, file_paths):\n for file_path in file_paths:\n item = QListWidgetItem(file_path)\n self.main_window.fileList.addItem(item)\n\n if self.main_window.fileList.count() > 0:\n last_item = self.main_window.fileList.item(self.main_window.fileList.count() - 1)\n self.main_window.fileList.setCurrentItem(last_item)\n self.main_window.viewMetadata(last_item)\n else:\n self.main_window.viewMetadata(None)\n \n def clearFileList(self):\n self.main_window.fileList.clear()\n #self.main_window.imageScene.clear()\n self.main_window.selectedFile.clear()\n for _, widget, _ in self.main_window.widgetInfo:\n widget.clear()\n self.main_window.viewMetadata(None)\n\n def removeSelectedItem(self):\n selectedItem = self.main_window.fileList.currentItem()\n if selectedItem:\n selectedIndex = self.main_window.fileList.row(selectedItem)\n self.main_window.fileList.takeItem(selectedIndex)\n # if last index\n if selectedIndex == (self.main_window.fileList.count()):\n if self.main_window.fileList.count() > 0:\n last_item = self.main_window.fileList.item(self.main_window.fileList.count() - 1)\n self.main_window.fileList.setCurrentItem(last_item)\n self.main_window.viewMetadata(last_item)\n else:\n self.main_window.viewMetadata(None)\n else:\n self.main_window.viewMetadata(self.main_window.fileList.item(selectedIndex))\n\n def getFilesFromFolder(self, path):\n folder_path = Path(path)\n png_files = list(folder_path.rglob('*.[pP][nN][gG]'))\n jpg_files = list(folder_path.rglob('*.[jJ][pP][gG]'))\n webp_files = list(folder_path.rglob('*.[wW][eE][bB][pP]'))\n gif_files = list(folder_path.rglob('*.[gG][iI][fF]'))\n mp4_files = list(folder_path.rglob('*.[mM][pP][4]'))\n png_files = [str(file_path).replace('\\\\', '/') for file_path in png_files]\n jpg_files = [str(file_path).replace('\\\\', '/') for file_path in jpg_files]\n webp_files = [str(file_path).replace('\\\\', '/') for file_path in webp_files]\n gif_files = [str(file_path).replace('\\\\', '/') for file_path in gif_files]\n mp4_files = [str(file_path).replace('\\\\', '/') for file_path in mp4_files]\n image_files = set(png_files + jpg_files + webp_files + gif_files + mp4_files)\n unique_image_files = image_files\n\n return unique_image_files\n\n def isFileInList(self, file_path):\n for row in range(self.main_window.fileList.count()):\n item = self.main_window.fileList.item(row)\n if item.text() == file_path:\n return True\n return False\n\n def getFileList(self):\n return [self.main_window.fileList.item(row).text() for row in range(self.main_window.fileList.count())]" }, { "identifier": "CustomDockWidget", "path": "custom_widgets.py", "snippet": "class CustomDockWidget(QDockWidget):\n def __init__(self, main_window, parent=None):\n super().__init__(parent)\n self.setAcceptDrops(True)\n self.main_window = main_window\n\n def dragEnterEvent(self, event):\n self.main_window.dragEnterEvent(event)\n\n def dropEvent(self, event):\n self.main_window.dropEvent(event)" }, { "identifier": "CustomLineEdit", "path": "custom_widgets.py", "snippet": "class CustomLineEdit(QLineEdit):\n def keyPressEvent(self, event):\n if event.key() == Qt.Key.Key_Tab:\n self.focusNextPrevChild(True)\n else:\n super().keyPressEvent(event)" }, { "identifier": "CustomTextEdit", "path": "custom_widgets.py", "snippet": "class CustomTextEdit(QTextEdit):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.document().contentsChanged.connect(self.adjustSize)\n\n def adjustSize(self):\n document_height = self.document().size().height()\n current_height = self.height()\n if document_height != current_height:\n self.setFixedHeight(int(document_height) + 10 if document_height < 150 else 150)\n \n def showEvent(self, event):\n super().showEvent(event)\n self.adjustSize()\n\n def keyPressEvent(self, event):\n if event.key() == Qt.Key.Key_Tab:\n event.ignore()\n else:\n super().keyPressEvent(event)" }, { "identifier": "CustomListWidget", "path": "custom_widgets.py", "snippet": "class CustomListWidget(QListWidget):\n def wheelEvent(self, event: QWheelEvent):\n current_index = self.currentRow()\n total_items = self.count()\n if total_items == 0:\n return\n new_index = (current_index - 1) % total_items if event.angleDelta().y() > 0 else (current_index + 1) % total_items\n self.setCurrentRow(new_index)" }, { "identifier": "CustomTitleBar", "path": "custom_widgets.py", "snippet": "class CustomTitleBar(StandardTitleBar):\n def __init__(self, parent):\n super().__init__(parent)\n font=QFont(\"Segoe UI\", 10)\n self.minBtn.setHoverColor(Qt.GlobalColor.white)\n self.minBtn.setHoverBackgroundColor(QColor(0, 100, 182))\n self.minBtn.setPressedColor(Qt.GlobalColor.white)\n self.minBtn.setPressedBackgroundColor(QColor(54, 57, 65))\n self.minBtn.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n self.maxBtn.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n self.closeBtn.setFocusPolicy(Qt.FocusPolicy.NoFocus)\n self.titleLabel.setFont(font)\n self.titleLabel.setStyleSheet(\"\"\"\n QFont {\n font: Segoe UI,\n font_size: 10\n }\n \"\"\")" }, { "identifier": "ZoomableGraphicsView", "path": "custom_widgets.py", "snippet": "class ZoomableGraphicsView(QGraphicsView):\n def __init__(self, parent=None):\n super(ZoomableGraphicsView, self).__init__(parent)\n self.current_zoom = 1.0\n self.minimum_zoom = 0.1\n self.maximum_zoom = 25.0\n\n def wheelEvent(self, event: QWheelEvent):\n event.accept()\n factor = 1.2 if event.angleDelta().y() > 0 else 1.0 / 1.2\n new_zoom = self.current_zoom * factor\n new_zoom = max(self.minimum_zoom, min(self.maximum_zoom, new_zoom))\n scale_factor = new_zoom / self.current_zoom\n self.current_zoom = new_zoom\n self.scale(scale_factor, scale_factor)\n \n def resetZoom(self):\n self.current_zoom = 1.0" }, { "identifier": "resource_path", "path": "icon.py", "snippet": "def resource_path(relative_path):\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n return os.path.join(base_path, relative_path)" }, { "identifier": "AboutDialog", "path": "about_dialog.py", "snippet": "class AboutDialog(FramelessDialog):\n def __init__(self, main_window):\n super().__init__()\n self.main_window = main_window\n layout = QVBoxLayout(self)\n hlayout = QHBoxLayout()\n vlayout = QVBoxLayout()\n vlayout.setSpacing(0)\n vlayout.setContentsMargins(0, 0, 0, 0)\n logo = QLabel()\n icon = resource_path(\"icon/icon.ico\")\n pixmap = QPixmap(icon)\n pixmap = pixmap.scaledToWidth(80)\n logo.setPixmap(pixmap)\n font = QFont()\n font.setBold(True)\n font.setPointSize(16)\n title = QLabel(\"SDIMV\")\n title.setFont(font)\n githubLink = QLabel('<a href=\"https://github.com/maagic6/SDIMV\">GitHub</a>')\n githubLink.setOpenExternalLinks(True)\n vlayout.addWidget(title, alignment=Qt.AlignmentFlag.AlignTop)\n vlayout.addWidget(QLabel(\"v1.2.1\"), alignment=Qt.AlignmentFlag.AlignTop)\n vlayout.addWidget(githubLink, alignment=Qt.AlignmentFlag.AlignBottom)\n hlayout.addWidget(logo)\n hlayout.addLayout(vlayout)\n layout.addLayout(hlayout)\n self.setFixedSize(240,120)\n self.setContentsMargins(0,0,35,0)\n self.setWindowTitle(\"About\")\n \n def closeEvent(self, event):\n self.main_window.setEnabled(True)\n self.deleteLater()\n event.accept()\n\n def showEvent(self, event):\n main_window_center = self.main_window.geometry().center()\n self.move(main_window_center - self.rect().center())\n super().showEvent(event)" } ]
import sys, subprocess, qdarkstyle from PyQt6.QtWidgets import ( QApplication, QFrame, QGraphicsPixmapItem, QGraphicsScene, QGraphicsView, QGridLayout, QLabel, QLineEdit, QMenu, QToolBar, QVBoxLayout, QHBoxLayout, QWidget, QPushButton, QScrollArea, QDockWidget, QMessageBox, ) from PyQt6.QtGui import QIcon, QAction, QFont, QPainter, QMovie, QPixmap, QDesktopServices from PyQt6.QtCore import Qt, QRectF, QEvent, QUrl, QSettings, QSystemSemaphore, QSharedMemory from PyQt6.QtMultimedia import QMediaPlayer from PyQt6.QtMultimediaWidgets import QGraphicsVideoItem from pathlib import Path from qframelesswindow import FramelessMainWindow from image import imageProcess from file_handler import FileHandler from custom_widgets import CustomDockWidget, CustomLineEdit, CustomTextEdit, CustomListWidget, CustomTitleBar, ZoomableGraphicsView from icon import resource_path from about_dialog import AboutDialog
5,467
class MainWindow(FramelessMainWindow): def __init__(self): super().__init__() self.fileHandler = FileHandler(self) #window size self.setTitleBar(CustomTitleBar(self)) self.setWindowTitle('SDIMV') self.titleBar.raise_() self.settings = QSettings("maagic6", "SDIMV") savedGeometry = self.settings.value("main_window_geometry") if savedGeometry is not None: self.restoreGeometry(savedGeometry) else: self.resize(720,720) qr = self.frameGeometry() cp = self.screen().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) iconPath = resource_path("icon/icon.ico") self.setWindowIcon(QIcon(iconPath)) toolbar = QToolBar("Toolbar") toolbar.setStyleSheet("QToolBar {background: transparent;}" "QToolButton {background: transparent; border: none;}" "QToolButton:hover {background: rgba(195, 195, 255, 50);}") iconPath2 = resource_path("icon/add.png") iconPath3 = resource_path("icon/remove.png") iconPath4 = resource_path("icon/clear.png") iconPath5 = resource_path("icon/about.png") addAction = QAction(QIcon(iconPath2), "Add", self) addAction.triggered.connect(self.fileHandler.openFileDialog) removeAction = QAction(QIcon(iconPath3), "Remove", self) removeAction.triggered.connect(self.fileHandler.removeSelectedItem) clearAction = QAction(QIcon(iconPath4), "Clear", self) clearAction.triggered.connect(self.fileHandler.clearFileList) aboutAction = QAction(QIcon(iconPath5), "About", self) aboutAction.triggered.connect(self.showAboutDialog) toolbar.addAction(addAction) toolbar.addAction(removeAction) toolbar.addAction(clearAction) toolbar.addAction(aboutAction) toolbar.setObjectName("Toolbar") self.addToolBar(toolbar) self.imagePreviewFrame = QFrame() self.imagePreviewFrame.setFrameShape(QFrame.Shape.Box) self.imagePreviewFrame.setLineWidth(1) self.imagePreviewFrame.setFocusPolicy(Qt.FocusPolicy.NoFocus) self.imageFrame = QVBoxLayout() self.imagePreviewFrame.setLayout(self.imageFrame) self.imageScene = QGraphicsScene() self.imageView = ZoomableGraphicsView(self.imageScene) self.imageView.setRenderHint(QPainter.RenderHint.Antialiasing, True) self.imageView.setDragMode(QGraphicsView.DragMode.ScrollHandDrag) self.imageView.setFocusPolicy(Qt.FocusPolicy.NoFocus) self.imageFrame.addWidget(self.imageView)
class MainWindow(FramelessMainWindow): def __init__(self): super().__init__() self.fileHandler = FileHandler(self) #window size self.setTitleBar(CustomTitleBar(self)) self.setWindowTitle('SDIMV') self.titleBar.raise_() self.settings = QSettings("maagic6", "SDIMV") savedGeometry = self.settings.value("main_window_geometry") if savedGeometry is not None: self.restoreGeometry(savedGeometry) else: self.resize(720,720) qr = self.frameGeometry() cp = self.screen().availableGeometry().center() qr.moveCenter(cp) self.move(qr.topLeft()) iconPath = resource_path("icon/icon.ico") self.setWindowIcon(QIcon(iconPath)) toolbar = QToolBar("Toolbar") toolbar.setStyleSheet("QToolBar {background: transparent;}" "QToolButton {background: transparent; border: none;}" "QToolButton:hover {background: rgba(195, 195, 255, 50);}") iconPath2 = resource_path("icon/add.png") iconPath3 = resource_path("icon/remove.png") iconPath4 = resource_path("icon/clear.png") iconPath5 = resource_path("icon/about.png") addAction = QAction(QIcon(iconPath2), "Add", self) addAction.triggered.connect(self.fileHandler.openFileDialog) removeAction = QAction(QIcon(iconPath3), "Remove", self) removeAction.triggered.connect(self.fileHandler.removeSelectedItem) clearAction = QAction(QIcon(iconPath4), "Clear", self) clearAction.triggered.connect(self.fileHandler.clearFileList) aboutAction = QAction(QIcon(iconPath5), "About", self) aboutAction.triggered.connect(self.showAboutDialog) toolbar.addAction(addAction) toolbar.addAction(removeAction) toolbar.addAction(clearAction) toolbar.addAction(aboutAction) toolbar.setObjectName("Toolbar") self.addToolBar(toolbar) self.imagePreviewFrame = QFrame() self.imagePreviewFrame.setFrameShape(QFrame.Shape.Box) self.imagePreviewFrame.setLineWidth(1) self.imagePreviewFrame.setFocusPolicy(Qt.FocusPolicy.NoFocus) self.imageFrame = QVBoxLayout() self.imagePreviewFrame.setLayout(self.imageFrame) self.imageScene = QGraphicsScene() self.imageView = ZoomableGraphicsView(self.imageScene) self.imageView.setRenderHint(QPainter.RenderHint.Antialiasing, True) self.imageView.setDragMode(QGraphicsView.DragMode.ScrollHandDrag) self.imageView.setFocusPolicy(Qt.FocusPolicy.NoFocus) self.imageFrame.addWidget(self.imageView)
self.fileList = CustomListWidget()
5
2023-11-15 19:51:29+00:00
8k
TimbreWatermarking/TimbreWatermarking
watermarking_model/model/conv2_mel_rewm_modules.py
[ { "identifier": "FCBlock", "path": "watermarking_model/model/blocks.py", "snippet": "class FCBlock(nn.Module):\n \"\"\" Fully Connected Block \"\"\"\n\n def __init__(self, in_features, out_features, activation=None, bias=False, dropout=None, spectral_norm=False):\n super(FCBlock, self).__init__()\n self.fc_layer = nn.Sequential()\n self.fc_layer.add_module(\n \"fc_layer\",\n LinearNorm(\n in_features,\n out_features,\n bias,\n spectral_norm,\n ),\n )\n if activation is not None:\n self.fc_layer.add_module(\"activ\", activation)\n self.dropout = dropout\n\n def forward(self, x):\n x = self.fc_layer(x)\n if self.dropout is not None:\n x = F.dropout(x, self.dropout, self.training)\n return x" }, { "identifier": "PositionalEncoding", "path": "watermarking_model/model/blocks.py", "snippet": "class PositionalEncoding(nn.Module):\n def __init__(self, d_model, dropout=0.0, max_len=10000):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n # pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(1), :] # [WORD_NUM, BATCH, DIM]\n return self.dropout(x)" }, { "identifier": "Mish", "path": "watermarking_model/model/blocks.py", "snippet": "class Mish(nn.Module):\n def forward(self, x):\n return x * torch.tanh(F.softplus(x))" }, { "identifier": "Conv1DBlock", "path": "watermarking_model/model/blocks.py", "snippet": "class Conv1DBlock(nn.Module):\n \"\"\" 1D Convolutional Block \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, activation=None, dropout=None, spectral_norm=False):\n super(Conv1DBlock, self).__init__()\n\n self.conv_layer = nn.Sequential()\n self.conv_layer.add_module(\n \"conv_layer\",\n ConvNorm(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=1,\n padding=int((kernel_size - 1) / 2),\n dilation=1,\n w_init_gain=\"tanh\",\n spectral_norm=spectral_norm,\n ),\n )\n if activation is not None:\n self.conv_layer.add_module(\"activ\", activation)\n self.dropout = dropout\n\n def forward(self, x, mask=None):\n # x = x.contiguous().transpose(1, 2)\n x = self.conv_layer(x)\n\n if self.dropout is not None:\n x = F.dropout(x, self.dropout, self.training)\n\n # x = x.contiguous().transpose(1, 2)\n if mask is not None:\n x = x.masked_fill(mask.unsqueeze(-1), 0)\n\n return x" }, { "identifier": "Conv2Encoder", "path": "watermarking_model/model/blocks.py", "snippet": "class Conv2Encoder(nn.Module):\n def __init__(self, input_channel=1, hidden_dim=64, block='skip', n_layers=3):\n super(Conv2Encoder, self).__init__()\n if block == 'skip':\n core = SkipGatedBlock\n elif block == 'relu':\n core = ReluBlock\n else:\n raise ValueError(f\"Invalid block type: {block}\")\n\n layers = [core(c_in=input_channel, c_out=hidden_dim, kernel_size=3, stride=1, padding=1)]\n\n for i in range(n_layers-1):\n layers.append(core(c_in=hidden_dim, c_out=hidden_dim, kernel_size=3, stride=1, padding=1))\n\n self.main = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.main(x)" }, { "identifier": "WatermarkEmbedder", "path": "watermarking_model/model/blocks.py", "snippet": "class WatermarkEmbedder(nn.Module):\n def __init__(self, input_channel=1, hidden_dim=64, block='skip', n_layers=4):\n super(WatermarkEmbedder, self).__init__()\n if block == 'skip':\n core = SkipGatedBlock\n elif block == 'relu':\n core = ReluBlock\n else:\n raise ValueError(f\"Invalid block type: {block}\")\n\n layers = [core(c_in=input_channel, c_out=hidden_dim, kernel_size=3, stride=1, padding=1)]\n\n for i in range(n_layers-2):\n layers.append(core(c_in=hidden_dim, c_out=hidden_dim, kernel_size=3, stride=1, padding=1))\n\n layers.append(core(c_in=hidden_dim, c_out=1, kernel_size=1, stride=1, padding=0))\n\n self.main = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.main(x)" }, { "identifier": "WatermarkExtracter", "path": "watermarking_model/model/blocks.py", "snippet": "class WatermarkExtracter(nn.Module):\n def __init__(self, input_channel=1, hidden_dim=64, block='skip', n_layers=6):\n super(WatermarkExtracter, self).__init__()\n if block == 'skip':\n core = SkipGatedBlock\n elif block == 'relu':\n core = ReluBlock\n else:\n raise ValueError(f\"Invalid block type: {block}\")\n layers = [core(c_in=input_channel, c_out=hidden_dim, kernel_size=3, stride=1, padding=1)]\n\n for i in range(n_layers-2):\n layers.append(core(c_in=hidden_dim, c_out=hidden_dim, kernel_size=3, stride=1, padding=1))\n\n layers.append(core(c_in=hidden_dim, c_out=1, kernel_size=3, stride=1, padding=1))\n\n self.main = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.main(x)" }, { "identifier": "ReluBlock", "path": "watermarking_model/model/blocks.py", "snippet": "class ReluBlock(nn.Module):\n def __init__(self, c_in, c_out, kernel_size, stride, padding):\n super(ReluBlock, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(c_in, c_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=True),\n nn.InstanceNorm2d(c_out),\n nn.LeakyReLU()\n )\n\n def forward(self, x):\n return self.conv(x)" } ]
from base64 import encode from torch.nn import LeakyReLU, Tanh from .blocks import FCBlock, PositionalEncoding, Mish, Conv1DBlock, Conv2Encoder, WatermarkEmbedder, WatermarkExtracter, ReluBlock from distortions.frequency import TacotronSTFT, fixed_STFT, tacotron_mel from distortions.dl import distortion import torch import torch.nn as nn import pdb import hifigan import json import torchaudio import numpy as np import os import librosa import librosa.display import matplotlib.pyplot as plt import os import matplotlib.pyplot as plt import librosa import numpy as np import librosa.display
4,069
y = self.stft.inverse(carrier_wateramrked.squeeze(1), phase.squeeze(1)) return y, carrier_wateramrked def test_forward(self, x, msg, weight): num_samples = x.shape[2] spect, phase = self.stft.transform(x) carrier_encoded = self.ENc(spect.unsqueeze(1)) watermark_encoded = self.msg_linear_in(msg).transpose(1,2).unsqueeze(1).repeat(1,1,1,carrier_encoded.shape[3]) concatenated_feature = torch.cat((carrier_encoded, spect.unsqueeze(1), weight*watermark_encoded), dim=1) carrier_wateramrked = self.EM(concatenated_feature) self.stft.num_samples = num_samples y = self.stft.inverse(carrier_wateramrked.squeeze(1), phase.squeeze(1)) return y, carrier_wateramrked def save_forward(self, x, msg): num_samples = x.shape[2] spect, phase = self.stft.transform(x) # save spectrum save_spectrum(spect, phase, 'linear') carrier_encoded = self.ENc(spect.unsqueeze(1)) # save feature_map save_feature_map(carrier_encoded[0]) watermark_encoded = self.msg_linear_in(msg).transpose(1,2).unsqueeze(1).repeat(1,1,1,carrier_encoded.shape[3]) concatenated_feature = torch.cat((carrier_encoded, spect.unsqueeze(1), watermark_encoded), dim=1) carrier_wateramrked = self.EM(concatenated_feature) self.stft.num_samples = num_samples y = self.stft.inverse(carrier_wateramrked.squeeze(1), phase.squeeze(1)) return y, carrier_wateramrked class Decoder(nn.Module): def __init__(self, process_config, model_config, msg_length, win_dim, embedding_dim, nlayers_decoder=6, transformer_drop=0.1, attention_heads=8): super(Decoder, self).__init__() self.robust = model_config["robust"] if self.robust: self.dl = distortion() self.mel_transform = TacotronSTFT(filter_length=process_config["mel"]["n_fft"], hop_length=process_config["mel"]["hop_length"], win_length=process_config["mel"]["win_length"]) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.vocoder_step = model_config["structure"]["vocoder_step"] win_dim = int((process_config["mel"]["n_fft"] / 2) + 1) self.block = model_config["conv2"]["block"] self.EX = WatermarkExtracter(input_channel=1, hidden_dim=model_config["conv2"]["hidden_dim"], block=self.block) self.stft = fixed_STFT(process_config["mel"]["n_fft"], process_config["mel"]["hop_length"], process_config["mel"]["win_length"]) self.msg_linear_out = FCBlock(win_dim, msg_length) self.weight_linear = FCBlock(win_dim, 1) def forward(self, y, global_step): # print(y.shape) # import pdb # pdb.set_trace() y_identity = y.clone() if global_step > self.vocoder_step: y_mel = self.mel_transform.mel_spectrogram(y.squeeze(1)) # y = self.vocoder(y_mel) y_d = (self.mel_transform.griffin_lim(magnitudes=y_mel)).unsqueeze(1) else: y_d = y if self.robust: y_d_d = self.dl(y_d, self.robust) else: y_d_d = y_d # print(f"dl:{y.shape}") spect, phase = self.stft.transform(y_d_d) # pdb.set_trace() extracted_wm = self.EX(spect.unsqueeze(1)).squeeze(1) msg = torch.mean(extracted_wm,dim=2, keepdim=True).transpose(1,2) msg = self.msg_linear_out(msg) spect_identity, phase_identity = self.stft.transform(y_identity) extracted_wm_identity = self.EX(spect_identity.unsqueeze(1)).squeeze(1) msg_identity = torch.mean(extracted_wm_identity,dim=2, keepdim=True).transpose(1,2) msg_identity = self.msg_linear_out(msg_identity) return msg, msg_identity def get_weight(self, y): y_identity = y spect_identity, phase_identity = self.stft.transform(y_identity) extracted_wm_identity = self.EX(spect_identity.unsqueeze(1)).squeeze(1) msg_identity = torch.mean(extracted_wm_identity,dim=2, keepdim=True).transpose(1,2) weight = self.weight_linear(msg_identity) return weight def test_forward(self, y): spect, phase = self.stft.transform(y) extracted_wm = self.EX(spect.unsqueeze(1)).squeeze(1) msg = torch.mean(extracted_wm,dim=2, keepdim=True).transpose(1,2) msg = self.msg_linear_out(msg) return msg def save_forward(self, y): # save mel_spectrum y_mel = self.mel_transform.mel_spectrogram(y.squeeze(1)) save_spectrum(y_mel, y_mel, 'mel') y = (self.mel_transform.griffin_lim(magnitudes=y_mel)).unsqueeze(1) spect, phase = self.stft.transform(y) extracted_wm = self.EX(spect.unsqueeze(1)).squeeze(1) msg = torch.mean(extracted_wm,dim=2, keepdim=True).transpose(1,2) msg = self.msg_linear_out(msg) return msg def mel_test_forward(self, spect): extracted_wm = self.EX(spect.unsqueeze(1)).squeeze(1) msg = torch.mean(extracted_wm,dim=2, keepdim=True).transpose(1,2) msg = self.msg_linear_out(msg) return msg class Discriminator(nn.Module): def __init__(self, process_config): super(Discriminator, self).__init__() self.conv = nn.Sequential(
def save_spectrum(spect, phase, flag='linear'): root = "draw_figure" spec = librosa.amplitude_to_db(spect.squeeze(0).cpu().numpy(), ref=np.max) img=librosa.display.specshow(spec, sr=22050, x_axis='time', y_axis='log', y_coords=None); plt.axis('off') plt.savefig(os.path.join(root, flag + '_amplitude_spectrogram.png'), bbox_inches='tight', pad_inches=0.0) spec = librosa.amplitude_to_db(phase.squeeze(0).cpu().numpy(), ref=np.max) img=librosa.display.specshow(spec, sr=22050, x_axis='time', y_axis='log'); plt.axis('off') plt.savefig(os.path.join(root, flag + '_phase_spectrogram.png'), bbox_inches='tight', pad_inches=0.0) def save_feature_map(feature_maps): feature_maps = feature_maps.cpu().numpy() root = "draw_figure" output_folder = os.path.join(root,"feature_map_or") if not os.path.exists(output_folder): os.makedirs(output_folder) n_channels = feature_maps.shape[0] for channel_idx in range(n_channels): fig, ax = plt.subplots() ax.imshow(feature_maps[channel_idx, :, :], cmap='gray') ax.axis('off') output_file = os.path.join(output_folder, f'feature_map_channel_{channel_idx + 1}.png') plt.savefig(output_file, bbox_inches='tight', pad_inches=0.0) plt.close(fig) def get_vocoder(device): with open("hifigan/config.json", "r") as f: config = json.load(f) config = hifigan.AttrDict(config) vocoder = hifigan.Generator(config) ckpt = torch.load("./hifigan/model/VCTK_V1/generator_v1") vocoder.load_state_dict(ckpt["generator"]) vocoder.eval() vocoder.remove_weight_norm() vocoder.to(device) freeze_model_and_submodules(vocoder) return vocoder def freeze_model_and_submodules(model): for param in model.parameters(): param.requires_grad = False for module in model.children(): if isinstance(module, nn.Module): freeze_model_and_submodules(module) class Encoder(nn.Module): def __init__(self, process_config, model_config, msg_length, win_dim, embedding_dim, nlayers_encoder=6, transformer_drop=0.1, attention_heads=8): super(Encoder, self).__init__() self.name = "conv2" win_dim = int((process_config["mel"]["n_fft"] / 2) + 1) self.add_carrier_noise = False self.block = model_config["conv2"]["block"] self.layers_CE = model_config["conv2"]["layers_CE"] self.EM_input_dim = model_config["conv2"]["hidden_dim"] + 2 self.layers_EM = model_config["conv2"]["layers_EM"] self.vocoder_step = model_config["structure"]["vocoder_step"] #MLP for the input wm self.msg_linear_in = FCBlock(msg_length, win_dim, activation=LeakyReLU(inplace=True)) #stft transform self.stft = fixed_STFT(process_config["mel"]["n_fft"], process_config["mel"]["hop_length"], process_config["mel"]["win_length"]) self.ENc = Conv2Encoder(input_channel=1, hidden_dim = model_config["conv2"]["hidden_dim"], block=self.block, n_layers=self.layers_CE) self.EM = WatermarkEmbedder(input_channel=self.EM_input_dim, hidden_dim = model_config["conv2"]["hidden_dim"], block=self.block, n_layers=self.layers_EM) def forward(self, x, msg, weight, global_step): num_samples = x.shape[2] spect, phase = self.stft.transform(x) carrier_encoded = self.ENc(spect.unsqueeze(1)) watermark_encoded = self.msg_linear_in(msg).transpose(1,2).unsqueeze(1).repeat(1,1,1,carrier_encoded.shape[3]) concatenated_feature = torch.cat((carrier_encoded, spect.unsqueeze(1), weight*watermark_encoded), dim=1) carrier_wateramrked = self.EM(concatenated_feature) self.stft.num_samples = num_samples y = self.stft.inverse(carrier_wateramrked.squeeze(1), phase.squeeze(1)) return y, carrier_wateramrked def test_forward(self, x, msg, weight): num_samples = x.shape[2] spect, phase = self.stft.transform(x) carrier_encoded = self.ENc(spect.unsqueeze(1)) watermark_encoded = self.msg_linear_in(msg).transpose(1,2).unsqueeze(1).repeat(1,1,1,carrier_encoded.shape[3]) concatenated_feature = torch.cat((carrier_encoded, spect.unsqueeze(1), weight*watermark_encoded), dim=1) carrier_wateramrked = self.EM(concatenated_feature) self.stft.num_samples = num_samples y = self.stft.inverse(carrier_wateramrked.squeeze(1), phase.squeeze(1)) return y, carrier_wateramrked def save_forward(self, x, msg): num_samples = x.shape[2] spect, phase = self.stft.transform(x) # save spectrum save_spectrum(spect, phase, 'linear') carrier_encoded = self.ENc(spect.unsqueeze(1)) # save feature_map save_feature_map(carrier_encoded[0]) watermark_encoded = self.msg_linear_in(msg).transpose(1,2).unsqueeze(1).repeat(1,1,1,carrier_encoded.shape[3]) concatenated_feature = torch.cat((carrier_encoded, spect.unsqueeze(1), watermark_encoded), dim=1) carrier_wateramrked = self.EM(concatenated_feature) self.stft.num_samples = num_samples y = self.stft.inverse(carrier_wateramrked.squeeze(1), phase.squeeze(1)) return y, carrier_wateramrked class Decoder(nn.Module): def __init__(self, process_config, model_config, msg_length, win_dim, embedding_dim, nlayers_decoder=6, transformer_drop=0.1, attention_heads=8): super(Decoder, self).__init__() self.robust = model_config["robust"] if self.robust: self.dl = distortion() self.mel_transform = TacotronSTFT(filter_length=process_config["mel"]["n_fft"], hop_length=process_config["mel"]["hop_length"], win_length=process_config["mel"]["win_length"]) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.vocoder_step = model_config["structure"]["vocoder_step"] win_dim = int((process_config["mel"]["n_fft"] / 2) + 1) self.block = model_config["conv2"]["block"] self.EX = WatermarkExtracter(input_channel=1, hidden_dim=model_config["conv2"]["hidden_dim"], block=self.block) self.stft = fixed_STFT(process_config["mel"]["n_fft"], process_config["mel"]["hop_length"], process_config["mel"]["win_length"]) self.msg_linear_out = FCBlock(win_dim, msg_length) self.weight_linear = FCBlock(win_dim, 1) def forward(self, y, global_step): # print(y.shape) # import pdb # pdb.set_trace() y_identity = y.clone() if global_step > self.vocoder_step: y_mel = self.mel_transform.mel_spectrogram(y.squeeze(1)) # y = self.vocoder(y_mel) y_d = (self.mel_transform.griffin_lim(magnitudes=y_mel)).unsqueeze(1) else: y_d = y if self.robust: y_d_d = self.dl(y_d, self.robust) else: y_d_d = y_d # print(f"dl:{y.shape}") spect, phase = self.stft.transform(y_d_d) # pdb.set_trace() extracted_wm = self.EX(spect.unsqueeze(1)).squeeze(1) msg = torch.mean(extracted_wm,dim=2, keepdim=True).transpose(1,2) msg = self.msg_linear_out(msg) spect_identity, phase_identity = self.stft.transform(y_identity) extracted_wm_identity = self.EX(spect_identity.unsqueeze(1)).squeeze(1) msg_identity = torch.mean(extracted_wm_identity,dim=2, keepdim=True).transpose(1,2) msg_identity = self.msg_linear_out(msg_identity) return msg, msg_identity def get_weight(self, y): y_identity = y spect_identity, phase_identity = self.stft.transform(y_identity) extracted_wm_identity = self.EX(spect_identity.unsqueeze(1)).squeeze(1) msg_identity = torch.mean(extracted_wm_identity,dim=2, keepdim=True).transpose(1,2) weight = self.weight_linear(msg_identity) return weight def test_forward(self, y): spect, phase = self.stft.transform(y) extracted_wm = self.EX(spect.unsqueeze(1)).squeeze(1) msg = torch.mean(extracted_wm,dim=2, keepdim=True).transpose(1,2) msg = self.msg_linear_out(msg) return msg def save_forward(self, y): # save mel_spectrum y_mel = self.mel_transform.mel_spectrogram(y.squeeze(1)) save_spectrum(y_mel, y_mel, 'mel') y = (self.mel_transform.griffin_lim(magnitudes=y_mel)).unsqueeze(1) spect, phase = self.stft.transform(y) extracted_wm = self.EX(spect.unsqueeze(1)).squeeze(1) msg = torch.mean(extracted_wm,dim=2, keepdim=True).transpose(1,2) msg = self.msg_linear_out(msg) return msg def mel_test_forward(self, spect): extracted_wm = self.EX(spect.unsqueeze(1)).squeeze(1) msg = torch.mean(extracted_wm,dim=2, keepdim=True).transpose(1,2) msg = self.msg_linear_out(msg) return msg class Discriminator(nn.Module): def __init__(self, process_config): super(Discriminator, self).__init__() self.conv = nn.Sequential(
ReluBlock(1,16,3,1,1),
7
2023-11-13 01:40:03+00:00
8k
nillion-oss/tinysig
src/tinysig/tecdsa.py
[ { "identifier": "add", "path": "src/tinysig/utils.py", "snippet": "def add(values: list[int], size: int) -> int:\ndef add_ec(points: list[EccPoint]) -> int:\ndef generate_additive_shares(secret: int, n: int, size: int) -> list[int]:\ndef multiply(values: list[int], size: int) -> int:\ndef egcd(a: int, p: int) -> int:\ndef hash(message: str, q: int):\ndef verify_dsa_signature(message: int, r: int, s: int, y: int, p: int, q: int, g: int) -> None:\ndef verify_ecdsa_signature(message: int, r: int, s: int, Y: EccPoint, q: int, G: EccPoint) -> None:\n def __init__(self, message):\n def setUp(self): \n def test_add(self):\n def test_generate_additive_shares(self):\n def test_multiply(self):\n V = u1 * G + u2 * Y\nclass VerifySignatureError(Exception):\nclass TestUtils(unittest.TestCase):" }, { "identifier": "DSASetup", "path": "src/tinysig/setup.py", "snippet": "class DSASetup:\n \"\"\"\n Dataclass representing a DSA (Digital Signature Algorithm) setup.\n\n Example:\n setup = DSASetup.generate_dsa_setup()\n \"\"\"\n\n p: int\n \"\"\"The DSA modulus.\"\"\"\n q: int\n \"\"\"The order of the subgroup.\"\"\"\n g: int\n \"\"\"A generator of the subgroup.\"\"\"\n h: int\n \"\"\"A generator of the field :math:`\\mathbb{Z}_q`.\"\"\"\n\n def generate_dsa_setup():\n \"\"\"Generate a DSA setup based on system parameters.\"\"\"\n key = DSA.generate(2048)\n g = int(key._key['g'])\n p = int(key._key['p'])\n q = int(key._key['q']) \n h = get_generator(q)\n return DSASetup(p, q, g, h)" }, { "identifier": "ECDSASetup", "path": "src/tinysig/setup.py", "snippet": "class ECDSASetup:\n \"\"\"\n Dataclass representing an ECDSA (Elliptic Curve Digital Signature Algorithm) setup.\n\n Example:\n setup = ECDSASetup.generate_ecdsa_setup()\n \"\"\"\n\n curve: str\n \"\"\"The name of the elliptic curve.\"\"\"\n p: Optional[int] = None\n \"\"\"The finite field of the elliptic curve.\"\"\"\n q: Optional[int] = None\n \"\"\"The order of the elliptic curve group.\"\"\"\n G: Optional[EccPoint] = None\n \"\"\"A base point on the elliptic curve.\"\"\"\n h: Optional[int] = None\n \"\"\"A generator of field :math:`\\mathbb{Z}_q`.\"\"\"\n\n def generate_ecdsa_setup(self):\n \"\"\"\n Generate an ECDSA setup for the specified elliptic curve.\n\n Returns:\n ECDSASetup: An instance of ECDSASetup with generated parameters.\n\n Raises:\n ValueError: If the specified curve is not supported.\n\n Example:\n >>> setup = ECDSASetup(curve='P-256').generate_ecdsa_setup()\n \"\"\"\n\n supported_curves = self.supported_curves()\n curve = self.curve\n if curve not in supported_curves:\n raise ValueError(\"{} is not one of the specified curves. \\\n Please choose one of the following curves:\\n \\\n ['P-192', 'P-224', 'P-256', 'P-384', 'P-521']\".format(curve))\n p = int(ECC._curves[curve].p)\n q = int(ECC._curves[curve].order)\n G = ECC._curves[curve].G\n h = get_generator(int(q))\n return ECDSASetup(curve, p, q, G, h)\n \n @staticmethod\n def supported_curves():\n \"\"\"\n Get a list of supported elliptic curves.\n\n Returns:\n List[str]: A list of supported elliptic curve names.\n\n Example:\n >>> supported_curves = ECDSASetup.supported_curves()\n >>> print(supported_curves)\n ['P-192', 'P-224', 'P-256', 'P-384', 'P-521']\n \"\"\"\n \n return ['P-192', 'P-224', 'P-256', 'P-384', 'P-521']\n \n def print_supported_curves(self):\n \"\"\"\n Print the list of supported elliptic curves.\n \"\"\"\n\n supported_curves = self.supported_curves()\n print(\"Supported Elliptic Curves: \", supported_curves)" }, { "identifier": "Network", "path": "src/tinysig/network.py", "snippet": "class Network:\n \"\"\"Represents a network of nodes and clients.\n \n Manages the interactions and cryptographic operations within the network, \n including sharing secrets, broadcasting values, and reconstructing shared values.\n \"\"\"\n\n nodes: List[Node]\n \"\"\"List of nodes in the network.\"\"\"\n clients: List[Client]\n \"\"\"List of clients in the network.\"\"\"\n q: int\n \"\"\"Prime field.\"\"\"\n h: int \n \"\"\"Multiplicative field generator.\"\"\"\n\n def __init__(self, N, q, h=2, C=1):\n \"\"\"\n Initialize the network with 'N' nodes, prime field 'q', field generator 'h', and 'C' clients.\n \n Parameters:\n N (int): Number of nodes in the network.\n q (int): Prime field.\n h (int): Multiplicative field generator (default is 2).\n C (int): Number of clients in the network (default is 1).\n \"\"\"\n self.nodes = [Node(i+1) for i in range(N)]\n self.clients = [Client(i+1) for i in range(C)]\n self.N = N\n self.q = q\n self.h = h\n\n def print(self):\n \"\"\"Print a readable representation of the network, including nodes and clients with their databases.\"\"\"\n print(f\"Network(N={len(self.nodes)}, q={self.q},\")\n print(\" nodes=[\")\n for node in self.nodes:\n print(f\" Node(id={node.id},\")\n print(\" shares_db={\")\n for key, value in node.shares_db.items():\n print(f\" {key}: {value},\")\n print(\" },\")\n print(\" public_keys={\")\n for key, value in node.he_public_keys.items():\n print(f\" {key}: {value},\")\n print(\" },\")\n print(\" open_db={\")\n for key, value in node.open_db.items():\n print(f\" {key}: {value},\")\n print(\" }\")\n print(\" )\")\n print(\" ]\\n)\")\n print(\" clients=[\")\n for client in self.clients:\n print(f\" Client(id={client.id},\")\n print(\" shares_db={\")\n for key, value in client.shares_db.items():\n print(f\" {key}: {value},\")\n print(\" },\")\n print(\" public_keys={\")\n for key, value in client.he_public_keys.items():\n print(f\" {key}: {value},\")\n print(\" },\")\n print(f\" private_keys={client.he_private_key},\")\n print(\" open_db={\")\n for key, value in client.open_db.items():\n print(f\" {key}: {value},\")\n print(\" }\")\n print(\" )\")\n print(\" ]\\n)\")\n\n def reconstruct_local(self, type_share: str, get_label: str, save_label: str, party: Union[Client, Node]) -> None:\n \"\"\"Locally reconstruct exponent share ('exp') or base ('base') shared value.\"\"\"\n \n type_label = \"_sh_exp\" if type_share == \"exp\" else \"_sh_base\"\n p = (self.q - 1) if type_share == \"exp\" else self.q\n shares = [party.get_share(get_label+type_label+\"_node_\"+str(node.id)) for node in self.nodes]\n reconstructed = add(shares, p)\n party.set_share(reconstructed, save_label)\n\n def broadcast(self, element: int, label: str) -> None:\n \"\"\"Send element to all nodes.\"\"\"\n\n for node in self.nodes:\n node.open_db[label] = element\n\n def send(self, type_share: str, label: str, party: Union[Client, Node], delete=False) -> None:\n \"\"\"Send exponent ('exp') or base ('base') share to party.\"\"\"\n \n type_label = \"_sh_exp\" if type_share == \"exp\" else \"_sh_base\"\n for node in self.nodes:\n sh_node = node.get_share(label+type_label)\n sh_label = label+type_label+\"_node_\"+str(node.id)\n party.set_share(sh_node, sh_label)\n node.delete_share(label+type_label) if delete else None\n\n def share(self, secret: int, size: int, label: str) -> None:\n \"\"\"Share secret value with all\"\"\"\n\n shares = generate_additive_shares(secret, self.N, size)\n for node in self.nodes:\n node.set_share(shares[node.id - 1], label)\n\n def reveal(self, type_share: str, get_label: str, save_label: str, party: Union[Client, Node]) -> None:\n \"\"\"Send exponent ('exp') or base ('base') share to party.\"\"\" \n \n self.send(type_share, get_label, party)\n self.reconstruct_local(type_share, get_label, save_label, party)" }, { "identifier": "Client", "path": "src/tinysig/network.py", "snippet": "class Client(Node):\n \"\"\"Represents a client node in the network, inheriting from the 'Node' class.\"\"\"\n he_private_key: int = field(default=0)" } ]
from Crypto.Hash import SHA256 from phe import paillier from typing import List from .utils import add, add_ec, multiply, rand, egcd, verify_dsa_signature, verify_ecdsa_signature from .setup import DSASetup, ECDSASetup from .network import Network, Client
4,641
result = scalar * ec_point # DB management node.set_open(result, label_result) def encrypt_and_delete_exp_sh_local(self, label: str, client_id: int) -> None: """ Encrypt the share of the exponent element of the LAMBDA pair and delete the original LAMBDA pair. Parameters: label (str): The label for LAMBDA pair. client_id (int): Client id. Returns: None """ for node in self.nodes: # DB management clear_share = node.get_share(label+"_lambda_sh_exp") # Local operation: ## Encrypt share enc_sh_val = node.he_public_keys[client_id - 1].encrypt(clear_share) ## Delete lambda pair node.delete_share(label+"_lambda_sh_exp") node.delete_share(label+"_lambda_sh_base") # DB management sh_label = label+"_enc_sh_exp" node.set_share(enc_sh_val, sh_label) def send_public_key_to_client(self, label: str, client: Client) -> None: """ Nodes send public key to client. Parameters: label (str): The label for LAMBDA pair. client_id (int): Client id. Returns: None """ all_y = [node.get_open(label+"_pk") for node in self.nodes] # Check if all elements in the list are equal are_all_equal = all(y == all_y[0] for y in all_y) if are_all_equal: client.set_open(all_y[0], label+"_pk") else: raise PublicKeyDisagreement("Abort.") def distributed_key_generation_protocol(self, client_id: int, label=None) -> None: """ Execute a distributed key generation protocol for a specific client. Parameters: client_id (int): The unique identifier for the client. label (str, optional): A custom label associated with the client. Defaults to None. Returns: None """ # Check there exist a client client = next((client for client in self.clients if client.id == client_id), None) if client == None: raise TypeError(f"Client with id {client_id} is not part of the network.") label = str(client_id)+"th_client_"+str(label) if label else str(client_id)+"th_client_"+"x" delete = not self.debug # Step 1 self.get_lambda([label]) # Step 2 self.key_agreement_protocol(label, delete=delete) # Step 3 self.send_public_key_to_client(label, client) # Step 4 self.encrypt_and_delete_exp_sh_local(label, client_id) def compute_r_local(self, label: str, client: Client, delete=True) -> None: """ Compute r. Parameters: label (str): The label of the r element. client (Client): A client. Returns: None """ for node in self.nodes: # DB management R = node.get_open(label + "_pk") # Local operation r = R % self.q if self.setup == DSASetup else int(R.x) # DB management node.set_open(r, label + "_r") node.delete_open(label + "_pk") client.set_open(r, label + "_r") def invert_masked_factor_local(self, label) -> None: """ Invert a masked factor. Parameters: label (str): The label of the masked factor to be inverted. Returns: None """ for node in self.nodes: # DB management masked_factor = node.get_open(label+"_sk") share = node.get_share(label+"_lambda_sh_exp") # Local operation ## Invert masked factor
class ThresholdSignature(Network): clients: List[Client] def __init__(self, N, C, setup=None, debug=False): self.debug = debug if setup is None: self.dsa = DSASetup.generate_dsa_setup() self.setup = DSASetup super().__init__(N, self.dsa.q, self.dsa.h) elif type(setup) == DSASetup: self.dsa = setup self.setup = DSASetup super().__init__(N, self.dsa.q, self.dsa.h) elif type(setup) == ECDSASetup: self.ecdsa = setup.generate_ecdsa_setup() self.setup = ECDSASetup super().__init__(N, self.ecdsa.q, self.ecdsa.h) else: raise TypeError("Invalid type provided. " "Please use either 'DSASetup' or 'ECDSASetup' types." ) # Generate public and private keys for the paillier homomorphic encryption scheme for i in range(C): pub_key, priv_key = paillier.generate_paillier_keypair() self.clients[i].he_private_key = priv_key for node in self.nodes: node.he_public_keys[i] = pub_key for client in self.clients: client.he_public_keys[i] = pub_key def get_lambda(self, labels: list[str]) -> None: """ Emulates the generation of LAMBDA pairs :math:`([h^{\gamma}], [\gamma])` between all nodes. Parameters: labels (list[str]): A list of labels for which lambda values will be generated and stored. Returns: None """ n = len(labels) h = self.h q = self.q q_minus_one = q - 1 for l in range(n): # Locally generate lambda alpha = rand(q_minus_one) h_alpha = pow(h, alpha, q) self.share(alpha, q_minus_one, labels[l]+"_lambda_sh_exp") self.share(h_alpha, q, labels[l]+"_lambda_sh_base") def rss_protocol(self, size: int, label: str) -> None: """ Random Secret Sharing (RSS) Protocol. This function implements a one-round RSS protocol. The goal is to share a random secret value among a group of nodes using a specific label for the shares. Parameters: size (int): The maximum size of the random secret to be generated and shared. label (str): A label to identify the shared secrets and their associated operations. Returns: None """ # Round 1 for node in self.nodes: # Step 1: locally generate random secret random_element = rand(size) # Step 2: share random secret with all nodes self.share(random_element, size, label+"sh_node_"+str(node.id)) # All local for node in self.nodes: # DB management list_of_shares = [ node.get_share(label + "sh_node_" + str(other_node.id)) for other_node in self.nodes ] # Step 3: add locally all shares random_sum = add(list_of_shares, size) # DB management sh_label = label+"_sh_exp" node.set_share(random_sum, sh_label) if not self.debug: [node.delete_share(label + "sh_node_" + str(other_node.id)) for other_node in self.nodes] def pow_share_protocol(self, base_type: str, get_label: str, save_label: str) -> None: """ Compute a power-sharing protocol among a group of nodes. This function implements a one-round protocol to securely compute :math:`b^{s}` where the exponent is a secret shared element between the nodes. Parameters: base_type (str): The type of base used: 'exp', when base to be used is self.h; 'base', when the base to be used is self.dsa.g. Note: 'base' option can only be use for the DSA setup. get_label (str): The label to retrieve shares of 's' from nodes. save_label (str): The label to save the final result to. Returns: None """ if base_type not in ["exp", "base"]: raise ValueError("{} is not one of the specified base types.\ Please choose one of the following:\n \ ['exp', 'base']".format(base_type)) prime = self.q if base_type == "exp" else self.dsa.p # Round 1 for node in self.nodes: # DB management exponent = node.get_share(get_label+"_sh_"+base_type) # Step 1: compute base^share if base_type == "exp": h_exp = pow(self.h, exponent, prime) else: h_exp = pow(self.dsa.g, exponent, prime) # Step 2: Broadcast base^share to nodes self.broadcast(h_exp, "pow_share_node_"+str(node.id)) # All local for node in self.nodes: # DB management base_exps = [ node.get_open("pow_share_node_"+str(other_node.id)) for other_node in self.nodes ] # Step 3: multiply locally all powers of shares val = multiply(base_exps, prime) # DB management node.set_open(val, save_label) if not self.debug: [node.delete_open("pow_share_node_"+str(other_node.id)) for other_node in self.nodes] def ec_pow_share_protocol(self, get_label: str, save_label: str) -> None: """ Execute an elliptic curve (EC) version of power-sharing protocol. This function implements a one-round protocol to securely compute :math:`scalar\cdot G` where the scalar is a secret shared element between the nodes. Parameters: get_label (str): The label used to retrieve scalar shares from nodes. save_label (str): The label used to save the result of the power-sharing protocol. Returns: None """ # Round 1 for node in self.nodes: # DB management scalar_sh = node.get_share(get_label+"_sh_base") # Step 1: sh_G = scalar_sh * self.ecdsa.G # Step 2: self.broadcast(sh_G, "ec_pow_share_node_"+str(node.id)) # All local for node in self.nodes: # DB management base_exps = [ node.get_open("ec_pow_share_node_"+str(other_node.id)) for other_node in self.nodes ] # Step 3: add locally all point shares val = add_ec(base_exps) # DB management node.set_open(val, save_label) if not self.debug: [node.delete_open("ec_pow_share_node_"+str(other_node.id)) for other_node in self.nodes] def subtract_exp_shares_local(self, label_a: str, label_b: str, label_r: str) -> None: """ Subtract the shares of the exponent of two labels and store the result in another label. Parameters: label_a (str): The label for the first operand. label_b (str): The label for the second operand. label_r (str): The label where the result is stored. Returns: None """ q_minus_one = self.q - 1 for node in self.nodes: # DB management share_a = node.get_share(label_a+"_sh_exp") share_b = node.get_share(label_b+"_sh_exp") # Local operation: subtraction share_r = (share_a - share_b) % q_minus_one # DB management label = label_r+"_sh_exp" node.set_share(share_r, label) def pow_local(self, label_base: str, label_exponent: str, label_result: str) -> None: """ Compute the power of a base saved in open database raised to an exponent and store the result. Parameters: label_base (str): The label for the base. label_exponent (str): The label for the exponent. label_result (str): The label for the element where the result is stored. Returns: None """ for node in self.nodes: # DB management base = node.get_open(label_base) exponent = node.get_open(label_exponent) # Local operation: power result = pow(base, exponent, self.dsa.p) # DB management node.set_open(result, label_result) def key_agreement_protocol(self, label: str, delete=True) -> None: """ Perform a key agreement protocol to derive a mask of the secret key and the corresponding public key. Parameters: label (str): The label of the pair associated with the secret key mask. delete (bool, optional): Whether to delete intermediate data after the protocol. Defaults to True. Returns: None """ q_minus_one = self.q - 1 # Round 1 # Step 1: random_label = "random" self.rss_protocol(q_minus_one, random_label) # Round 2 # Step 2: random_minus_label = random_label + "_minus_" + label self.subtract_exp_shares_local(random_label, label + "_lambda", random_minus_label) base_type_exp = "exp" self.pow_share_protocol(base_type_exp, random_minus_label, label + "_sk") if self.setup == DSASetup: # Step 3: base_type_base = "base" self.pow_share_protocol(base_type_base, label + "_lambda", label + "_pre_pk") # Step 4: self.pow_local(label + "_pre_pk", label + "_sk", label + "_pk") else: # Step 3: self.ec_pow_share_protocol(label + "_lambda", label + "_pre_pk") # Step 4: self.ec_mult_local(label + "_pre_pk", label + "_sk", label + "_pk") # DB management ## Option only for testing purposes if delete: [node.delete_share(random_minus_label+"_sh_exp") for node in self.nodes] [node.delete_share(random_label+"_sh_exp") for node in self.nodes] [node.delete_open(label + "_pre_pk") for node in self.nodes] def ec_mult_local(self, label_ec_point: str, label_scalar: str, label_result: str) -> None: """ Compute the multiplication of a scalar value with an elliptic point curve and store the result. Parameters: label_ec_point (str): The label for the elliptic curve point. label_scalar (str): The label for the scalar. label_result (str): The label for the element where the result is stored. Returns: None """ for node in self.nodes: # DB management ec_point = node.get_open(label_ec_point) scalar = node.get_open(label_scalar) # Local operation: mult result = scalar * ec_point # DB management node.set_open(result, label_result) def encrypt_and_delete_exp_sh_local(self, label: str, client_id: int) -> None: """ Encrypt the share of the exponent element of the LAMBDA pair and delete the original LAMBDA pair. Parameters: label (str): The label for LAMBDA pair. client_id (int): Client id. Returns: None """ for node in self.nodes: # DB management clear_share = node.get_share(label+"_lambda_sh_exp") # Local operation: ## Encrypt share enc_sh_val = node.he_public_keys[client_id - 1].encrypt(clear_share) ## Delete lambda pair node.delete_share(label+"_lambda_sh_exp") node.delete_share(label+"_lambda_sh_base") # DB management sh_label = label+"_enc_sh_exp" node.set_share(enc_sh_val, sh_label) def send_public_key_to_client(self, label: str, client: Client) -> None: """ Nodes send public key to client. Parameters: label (str): The label for LAMBDA pair. client_id (int): Client id. Returns: None """ all_y = [node.get_open(label+"_pk") for node in self.nodes] # Check if all elements in the list are equal are_all_equal = all(y == all_y[0] for y in all_y) if are_all_equal: client.set_open(all_y[0], label+"_pk") else: raise PublicKeyDisagreement("Abort.") def distributed_key_generation_protocol(self, client_id: int, label=None) -> None: """ Execute a distributed key generation protocol for a specific client. Parameters: client_id (int): The unique identifier for the client. label (str, optional): A custom label associated with the client. Defaults to None. Returns: None """ # Check there exist a client client = next((client for client in self.clients if client.id == client_id), None) if client == None: raise TypeError(f"Client with id {client_id} is not part of the network.") label = str(client_id)+"th_client_"+str(label) if label else str(client_id)+"th_client_"+"x" delete = not self.debug # Step 1 self.get_lambda([label]) # Step 2 self.key_agreement_protocol(label, delete=delete) # Step 3 self.send_public_key_to_client(label, client) # Step 4 self.encrypt_and_delete_exp_sh_local(label, client_id) def compute_r_local(self, label: str, client: Client, delete=True) -> None: """ Compute r. Parameters: label (str): The label of the r element. client (Client): A client. Returns: None """ for node in self.nodes: # DB management R = node.get_open(label + "_pk") # Local operation r = R % self.q if self.setup == DSASetup else int(R.x) # DB management node.set_open(r, label + "_r") node.delete_open(label + "_pk") client.set_open(r, label + "_r") def invert_masked_factor_local(self, label) -> None: """ Invert a masked factor. Parameters: label (str): The label of the masked factor to be inverted. Returns: None """ for node in self.nodes: # DB management masked_factor = node.get_open(label+"_sk") share = node.get_share(label+"_lambda_sh_exp") # Local operation ## Invert masked factor
inv_masked_factor = egcd(masked_factor, self.q)
0
2023-11-14 13:55:41+00:00
8k
Exscientia/physicsml
src/physicsml/models/mace/modules/blocks.py
[ { "identifier": "Activation", "path": "src/physicsml/models/mace/modules/_activation.py", "snippet": "class Activation(torch.nn.Module):\n r\"\"\"Scalar activation function.\n\n Odd scalar inputs require activation functions with a defined parity (odd or even).\n\n Parameters\n ----------\n irreps_in : `e3nn.o3.Irreps`\n representation of the input\n\n acts : list of function or None\n list of activation functions, `None` if non-scalar or identity\n\n Examples\n --------\n\n >>> a = Activation(\"256x0o\", [torch.abs])\n >>> a.irreps_out\n 256x0e\n\n >>> a = Activation(\"256x0o+16x1e\", [None, None])\n >>> a.irreps_out\n 256x0o+16x1e\n \"\"\"\n\n def __init__(self, irreps_in: o3.Irreps, acts: List[Optional[torch.nn.Module]]):\n super().__init__()\n irreps_in = o3.Irreps(irreps_in)\n if len(irreps_in) != len(acts):\n raise ValueError(\n f\"Irreps in and number of activation functions does not match: {len(acts), (irreps_in, acts)}\",\n )\n\n # normalize the second moment\n acts = [normalize2mom(act) if act is not None else None for act in acts]\n\n from e3nn.util._argtools import _get_device\n\n irreps_out = []\n for (mul, (l_in, p_in)), act in zip(irreps_in, acts):\n if act is not None:\n if l_in != 0:\n raise ValueError(\n \"Activation: cannot apply an activation function to a non-scalar input.\",\n )\n\n x = torch.linspace(0, 10, 256, device=_get_device(act))\n\n a1, a2 = act(x), act(-x)\n if (a1 - a2).abs().max() < 1e-5:\n p_act = 1\n elif (a1 + a2).abs().max() < 1e-5:\n p_act = -1\n else:\n p_act = 0\n\n p_out = p_act if p_in == -1 else p_in\n irreps_out.append((mul, (0, p_out)))\n\n if p_out == 0:\n raise ValueError(\n \"Activation: the parity is violated! The input scalar is odd but the activation is neither \"\n \"even nor odd.\",\n )\n else:\n irreps_out.append((mul, (l_in, p_in)))\n\n self.irreps_in = irreps_in\n self.irreps_out = o3.Irreps(irreps_out)\n self.acts = torch.nn.ModuleList(acts) # type: ignore\n assert len(self.irreps_in) == len(self.acts)\n\n self.ir_dims: List[int] = [ir.dim for _, ir in self.irreps_in]\n\n def __repr__(self) -> str:\n acts = \"\".join([\"x\" if a is not None else \" \" for a in self.acts])\n return f\"{self.__class__.__name__} [{acts}] ({self.irreps_in} -> {self.irreps_out})\"\n\n def forward(self, features: torch.Tensor, dim: int = -1) -> torch.Tensor:\n \"\"\"evaluate\n\n Parameters\n ----------\n features : `torch.Tensor`\n tensor of shape ``(...)``\n\n Returns\n -------\n `torch.Tensor`\n tensor of shape the same shape as the input\n \"\"\"\n # - PROFILER - with torch.autograd.profiler.record_function(repr(self)):\n output = []\n index = 0\n\n for i, act in enumerate(self.acts):\n ir_dim = self.ir_dims[i]\n mul, ir = self.irreps_in[i]\n\n if act is not None:\n output.append(act(features.narrow(dim, index, mul)))\n else:\n output.append(features.narrow(dim, index, mul * ir_dim))\n index += mul * ir_dim\n\n if len(output) > 1:\n return torch.cat(output, dim=dim)\n elif len(output) == 1:\n out: torch.Tensor = output[0]\n return out\n else:\n return torch.zeros_like(features)" }, { "identifier": "reshape_irreps", "path": "src/physicsml/models/mace/modules/irreps_tools.py", "snippet": "class reshape_irreps(torch.nn.Module):\n def __init__(self, irreps: o3.Irreps) -> None:\n super().__init__()\n self.irreps = o3.Irreps(irreps)\n self.dims = []\n self.muls = []\n for mul, ir in self.irreps:\n d = ir.dim\n self.dims.append(d)\n self.muls.append(mul)\n\n def forward(self, tensor: torch.Tensor) -> torch.Tensor:\n ix = 0\n out = []\n batch, _ = tensor.shape\n for mul, d in zip(self.muls, self.dims):\n field = tensor[:, ix : ix + mul * d] # [batch, sample, mul * repr]\n ix += mul * d\n field = field.reshape(batch, mul, d)\n out.append(field)\n return torch.cat(out, dim=-1)" }, { "identifier": "tp_out_irreps_with_instructions", "path": "src/physicsml/models/mace/modules/irreps_tools.py", "snippet": "def tp_out_irreps_with_instructions(\n irreps1: o3.Irreps,\n irreps2: o3.Irreps,\n target_irreps: o3.Irreps,\n) -> Tuple[o3.Irreps, List]:\n trainable = True\n\n # Collect possible irreps and their instructions\n irreps_out_list: List[Tuple[int, o3.Irreps]] = []\n instructions = []\n for i, (mul, ir_in) in enumerate(irreps1):\n for j, (_, ir_edge) in enumerate(irreps2):\n for ir_out in ir_in * ir_edge: # | l1 - l2 | <= l <= l1 + l2\n if ir_out in target_irreps:\n k = len(irreps_out_list) # instruction index\n irreps_out_list.append((mul, ir_out))\n instructions.append((i, j, k, \"uvu\", trainable))\n\n # We sort the output irreps of the tensor product so that we can simplify them\n # when they are provided to the second o3.Linear\n irreps_out = o3.Irreps(irreps_out_list)\n irreps_out, permut, _ = irreps_out.sort()\n\n # Permute the output indexes of the instructions to match the sorted irreps:\n instructions = [\n (i_in1, i_in2, permut[i_out], mode, train)\n for i_in1, i_in2, i_out, mode, train in instructions\n ]\n\n return irreps_out, instructions" }, { "identifier": "BesselBasis", "path": "src/physicsml/models/mace/modules/radial.py", "snippet": "class BesselBasis(torch.nn.Module):\n def __init__(\n self,\n r_max: float,\n num_basis: int = 8,\n trainable: bool = False,\n ) -> None:\n super().__init__()\n\n bessel_weights = torch.pi * torch.arange(1, num_basis + 1) / r_max\n\n if trainable:\n self.bessel_weights = torch.nn.Parameter(bessel_weights)\n else:\n self.register_buffer(\"bessel_weights\", bessel_weights)\n\n self.r_max = r_max\n self.prefactor = sqrt(2.0 / r_max)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor: # [..., 1]\n bessel_funcs = self.prefactor * (\n torch.sin(self.bessel_weights * x) / x\n ) # [..., num_basis]\n return bessel_funcs" }, { "identifier": "PolynomialCutoff", "path": "src/physicsml/models/mace/modules/radial.py", "snippet": "class PolynomialCutoff(torch.nn.Module):\n def __init__(self, r_max: float, p: int = 6) -> None:\n super().__init__()\n\n self.r_max = r_max\n self.p = p\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n envelope = (\n 1.0\n - ((self.p + 1.0) * (self.p + 2.0) / 2.0)\n * torch.pow(x / self.r_max, self.p)\n + self.p * (self.p + 2.0) * torch.pow(x / self.r_max, self.p + 1)\n - (self.p * (self.p + 1.0) / 2) * torch.pow(x / self.r_max, self.p + 2)\n )\n\n poly: torch.Tensor = envelope * (x < self.r_max)\n return poly" }, { "identifier": "SymmetricContraction", "path": "src/physicsml/models/mace/modules/symmetric_contraction.py", "snippet": "class SymmetricContraction(CodeGenMixin, torch.nn.Module):\n def __init__(\n self,\n irreps_in: o3.Irreps,\n irreps_out: o3.Irreps,\n correlation: Union[int, Dict[str, int]],\n irrep_normalization: Literal[\"component\", \"norm\"] = \"component\",\n path_normalization: Literal[\"element\", \"path\"] = \"element\",\n internal_weights: Optional[bool] = None,\n shared_weights: Optional[torch.Tensor] = None,\n num_elements: Optional[int] = None,\n ) -> None:\n super().__init__()\n\n assert irrep_normalization in [\"component\", \"norm\", \"none\"]\n assert path_normalization in [\"element\", \"path\", \"none\"]\n\n self.irreps_in = o3.Irreps(irreps_in)\n self.irreps_out = o3.Irreps(irreps_out)\n\n del irreps_in, irreps_out\n\n if not isinstance(correlation, dict):\n corr = correlation\n correlation = {}\n for irrep_out in self.irreps_out:\n correlation[irrep_out] = corr\n\n assert shared_weights or not internal_weights\n\n if internal_weights is None:\n internal_weights = True\n\n self.internal_weights = internal_weights\n self.shared_weights = shared_weights\n\n del internal_weights, shared_weights\n\n self.contractions = torch.nn.ModuleList()\n for irrep_out in self.irreps_out:\n self.contractions.append(\n Contraction(\n irreps_in=self.irreps_in,\n irrep_out=o3.Irreps(str(irrep_out.ir)),\n correlation=correlation[irrep_out],\n internal_weights=self.internal_weights,\n num_elements=num_elements,\n weights=self.shared_weights,\n ),\n )\n\n def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n outs = [contraction(x, y) for contraction in self.contractions]\n return torch.cat(outs, dim=-1)" } ]
from typing import Optional from e3nn import nn, o3 from torch_geometric.utils.scatter import scatter from ._activation import Activation from .irreps_tools import reshape_irreps, tp_out_irreps_with_instructions from .radial import BesselBasis, PolynomialCutoff from .symmetric_contraction import SymmetricContraction import torch
3,849
class RadialEmbeddingBlock(torch.nn.Module): def __init__( self, r_max: float, num_bessel: int, num_polynomial_cutoff: int, ) -> None: super().__init__() self.bessel_fn = BesselBasis(r_max=r_max, num_basis=num_bessel) self.cutoff_fn = PolynomialCutoff(r_max=r_max, p=num_polynomial_cutoff) self.out_dim = num_bessel def forward( self, edge_lengths: torch.Tensor, # [n_edges, 1] ) -> torch.Tensor: bessel = self.bessel_fn(edge_lengths) # [n_edges, n_basis] cutoff = self.cutoff_fn(edge_lengths) # [n_edges, 1] output: torch.Tensor = bessel * cutoff # [n_edges, n_basis] return output class NodeUpdateBlock(torch.nn.Module): def __init__( self, node_attrs_irreps: o3.Irreps, node_feats_irreps: o3.Irreps, hidden_irreps: o3.Irreps, residual_connection: bool, ) -> None: super().__init__() # net to compute W m_i self.linear = o3.Linear( hidden_irreps, hidden_irreps, internal_weights=True, shared_weights=True, ) if residual_connection: # residual connection from original node attrs and node features self.residual_connection_layer = o3.FullyConnectedTensorProduct( node_feats_irreps, node_attrs_irreps, hidden_irreps, ) else: self.residual_connection_layer = None def forward( self, m_i: torch.Tensor, node_feats: torch.Tensor, node_attrs: torch.Tensor, ) -> torch.Tensor: if self.residual_connection_layer is not None: node_feats = self.linear(m_i) + self.residual_connection_layer( node_feats, node_attrs, ) else: node_feats = self.linear(m_i) return node_feats class MessageBlock(torch.nn.Module): def __init__( self, interaction_irreps: o3.Irreps, node_attrs_irreps: o3.Irreps, hidden_irreps: o3.Irreps, correlation: int, ) -> None: super().__init__() # symmetric contraction to make A_i into messages m_i = W B_i self.symmetric_contractions = SymmetricContraction( irreps_in=interaction_irreps, irreps_out=hidden_irreps, correlation=correlation, num_elements=node_attrs_irreps.dim, ) def forward(self, a_i: torch.Tensor, node_attrs: torch.Tensor) -> torch.Tensor: # contract the A_i's with element dependent weights and generalised CG coefs to get m_i = W B_i m_i: torch.Tensor = self.symmetric_contractions(a_i, node_attrs) return m_i class InteractionBlock(torch.nn.Module): def __init__( self, node_feats_irreps: o3.Irreps, node_attrs_irreps: o3.Irreps, edge_attrs_irreps: o3.Irreps, edge_feats_irreps: o3.Irreps, interaction_irreps: o3.Irreps, avg_num_neighbours: float, mix_with_node_attrs: bool = False, ) -> None: super().__init__() self.avg_num_neighbours = avg_num_neighbours self.linear_node_feats = o3.Linear( node_feats_irreps, node_feats_irreps, internal_weights=True, shared_weights=True, ) # TensorProduct # find the only possible results from the tensor prod of node feats with edge attrs into targets # only do the tensor prod for these possibilities
class NonLinearReadoutBlock(torch.nn.Module): def __init__( self, irreps_in: o3.Irreps, MLP_irreps: o3.Irreps, irreps_out: o3.Irreps, ) -> None: super().__init__() self.linear_1 = o3.Linear(irreps_in=irreps_in, irreps_out=MLP_irreps) self.non_linearity = Activation(irreps_in=MLP_irreps, acts=[torch.nn.SiLU()]) self.linear_2 = o3.Linear(irreps_in=MLP_irreps, irreps_out=irreps_out) def forward(self, x: torch.Tensor) -> torch.Tensor: # [n_nodes, irreps] # [..., ] x = self.linear_1(x) x = self.non_linearity(x) x = self.linear_2(x) return x class RadialEmbeddingBlock(torch.nn.Module): def __init__( self, r_max: float, num_bessel: int, num_polynomial_cutoff: int, ) -> None: super().__init__() self.bessel_fn = BesselBasis(r_max=r_max, num_basis=num_bessel) self.cutoff_fn = PolynomialCutoff(r_max=r_max, p=num_polynomial_cutoff) self.out_dim = num_bessel def forward( self, edge_lengths: torch.Tensor, # [n_edges, 1] ) -> torch.Tensor: bessel = self.bessel_fn(edge_lengths) # [n_edges, n_basis] cutoff = self.cutoff_fn(edge_lengths) # [n_edges, 1] output: torch.Tensor = bessel * cutoff # [n_edges, n_basis] return output class NodeUpdateBlock(torch.nn.Module): def __init__( self, node_attrs_irreps: o3.Irreps, node_feats_irreps: o3.Irreps, hidden_irreps: o3.Irreps, residual_connection: bool, ) -> None: super().__init__() # net to compute W m_i self.linear = o3.Linear( hidden_irreps, hidden_irreps, internal_weights=True, shared_weights=True, ) if residual_connection: # residual connection from original node attrs and node features self.residual_connection_layer = o3.FullyConnectedTensorProduct( node_feats_irreps, node_attrs_irreps, hidden_irreps, ) else: self.residual_connection_layer = None def forward( self, m_i: torch.Tensor, node_feats: torch.Tensor, node_attrs: torch.Tensor, ) -> torch.Tensor: if self.residual_connection_layer is not None: node_feats = self.linear(m_i) + self.residual_connection_layer( node_feats, node_attrs, ) else: node_feats = self.linear(m_i) return node_feats class MessageBlock(torch.nn.Module): def __init__( self, interaction_irreps: o3.Irreps, node_attrs_irreps: o3.Irreps, hidden_irreps: o3.Irreps, correlation: int, ) -> None: super().__init__() # symmetric contraction to make A_i into messages m_i = W B_i self.symmetric_contractions = SymmetricContraction( irreps_in=interaction_irreps, irreps_out=hidden_irreps, correlation=correlation, num_elements=node_attrs_irreps.dim, ) def forward(self, a_i: torch.Tensor, node_attrs: torch.Tensor) -> torch.Tensor: # contract the A_i's with element dependent weights and generalised CG coefs to get m_i = W B_i m_i: torch.Tensor = self.symmetric_contractions(a_i, node_attrs) return m_i class InteractionBlock(torch.nn.Module): def __init__( self, node_feats_irreps: o3.Irreps, node_attrs_irreps: o3.Irreps, edge_attrs_irreps: o3.Irreps, edge_feats_irreps: o3.Irreps, interaction_irreps: o3.Irreps, avg_num_neighbours: float, mix_with_node_attrs: bool = False, ) -> None: super().__init__() self.avg_num_neighbours = avg_num_neighbours self.linear_node_feats = o3.Linear( node_feats_irreps, node_feats_irreps, internal_weights=True, shared_weights=True, ) # TensorProduct # find the only possible results from the tensor prod of node feats with edge attrs into targets # only do the tensor prod for these possibilities
tp_out_irreps, instructions = tp_out_irreps_with_instructions(
2
2023-11-10 13:54:53+00:00
8k
naver-ai/scob
model/decoders/transformer_decoder.py
[ { "identifier": "BaseDecoder", "path": "model/decoders/base.py", "snippet": "class BaseDecoder(nn.Module, metaclass=abc.ABCMeta):\n downstream_evaluator = DownstreamEvaluator()\n\n @abc.abstractmethod\n def get_step_out_dict(\n self,\n batch,\n batch_idx,\n loader_idx,\n dec_out_dict,\n enc_kwargs,\n dec_kwargs,\n ):\n \"\"\"val-test step\"\"\"" }, { "identifier": "WeightLoss", "path": "model/losses.py", "snippet": "class WeightLoss:\n \"\"\"Weight loss per sample in each decoder\"\"\"\n\n def __init__(self, dataset_items):\n self.loss_weight_dict = {}\n for dataset_item in dataset_items:\n data_name = dataset_item.name\n for task in dataset_item.tasks:\n self.loss_weight_dict[(data_name, task.name)] = task.loss_weight\n\n def __call__(self, dec_batch, example_losses):\n weighted_losses = []\n for example_idx, (data_name, task) in enumerate(\n zip(dec_batch[\"dataset_names\"], dec_batch[\"task_names\"])\n ):\n loss_weight = self.loss_weight_dict[(data_name, task)]\n weighted_loss = loss_weight * example_losses[example_idx]\n weighted_losses.append(weighted_loss)\n return torch.stack(weighted_losses)" }, { "identifier": "get_loss", "path": "model/losses.py", "snippet": "def get_loss(loss_cfg):\n if loss_cfg.name == \"ce\":\n loss_func = nn.CrossEntropyLoss(reduction=\"none\")\n elif loss_cfg.name == \"focal\":\n loss_func = FocalLoss(\n alpha=loss_cfg.focal_alpha,\n gamma=loss_cfg.focal_gamma,\n reduction=\"none\",\n )\n elif loss_cfg.name == \"l1\":\n loss_func = nn.L1Loss(reduction=\"none\")\n elif loss_cfg.name == \"mse\":\n loss_func = nn.MSELoss(reduction=\"none\")\n else:\n raise ValueError(f\"Unknown loss_cfg.name={loss_cfg.name}\")\n return loss_func" }, { "identifier": "get_tokenizer", "path": "model/model_utils.py", "snippet": "def get_tokenizer(\n huggingface_path,\n tokenizer_name,\n decoder_name,\n resume_model_path=None,\n w_pretrained_model_path=None,\n tokenizer_path=None,\n):\n \"\"\"Get Tokenizer for model\n Args:\n huggingface_path (str): huggingface_path\n tokenizer_name (str): tokenizer name\n decoder_name (str): decoder name\n resume_model_path (str): resume_model_path\n w_pretrained_model_path (str): pretrained_model_path\n tokenizer_path (str): tokenizer_path\n\n Returns:\n tokenizer (Tokenizer): tokenizer\n \"\"\"\n pretrained_model_path = None\n if resume_model_path:\n pretrained_model_path = resume_model_path\n elif w_pretrained_model_path:\n pretrained_model_path = w_pretrained_model_path\n assert (tokenizer_path is None) or (pretrained_model_path is None)\n\n if tokenizer_path is None and pretrained_model_path is not None:\n # Check \".../tokenizer/\" dir exists,\n # since encoder only pre-trained model doesn't have \".../tokenizer/\" dir\n tokenizer_path_parent = os.path.join(\n os.path.dirname(pretrained_model_path), \"tokenizers\"\n )\n if os.path.exists(tokenizer_path_parent):\n tokenizer_path = os.path.join(\n tokenizer_path_parent, decoder_name, tokenizer_name\n )\n else:\n raise ValueError(f\"Tokenizer dir not found: {tokenizer_path_parent}\")\n\n if tokenizer_path is None:\n tokenizer_path = os.path.join(huggingface_path, tokenizer_name)\n else:\n if not os.path.exists(tokenizer_path):\n raise ValueError(\n f\"Current tokenizer, {tokenizer_name}, \"\n \"is different from the one in pre-trained model.\"\n )\n\n if tokenizer_name in [\n \"bert-base-cased\",\n \"bert-base-multilingual-cased\",\n \"roberta-base\",\n \"xlm-roberta-base\",\n \"facebook/bart-base\",\n \"facebook/mbart-large-cc25\",\n \"char_en__bert-base-cased\",\n ]:\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)\n elif tokenizer_name == \"hyunwoongko/asian-bart-ecjk\":\n tokenizer = AsianBartTokenizer.from_pretrained(tokenizer_path)\n else:\n raise ValueError(f\"Invalid tokenizer_name={tokenizer_name}\")\n\n if tokenizer_name == \"char_en__bert-base-cased\":\n char_pre_tokenizer = pre_tokenizers.Sequence(\n [pre_tokenizers.Split(\"\", behavior=\"removed\")]\n )\n tokenizer.backend_tokenizer.pre_tokenizer = char_pre_tokenizer\n\n print(\"-\" * 80)\n print(f\"tokenizer_loaded from {tokenizer_path}\")\n print(\"-\" * 80)\n return tokenizer" }, { "identifier": "COMMON_SPECIAL_TOKENS", "path": "utils/constants.py", "snippet": "COMMON_SPECIAL_TOKENS = [\n \"[START_PROMPT]\",\n \"[END_PROMPT]\",\n \"[dataset]\",\n \"[DONTCARE]\",\n \"[END]\",\n \"[DIV]\",\n \"[START_OCR_READ]\",\n \"[START_OCR_READ_TextInstance_PADDING]\",\n \"[START_BOX]\",\n \"[END_BOX]\",\n \"[START_TEXT_READ]\",\n \"[START_TEXT]\",\n \"[END_TEXT]\",\n \"[START_OCR_READ_2HEAD]\",\n \"[START_TEXT_BLOCK]\",\n \"[END_TEXT_BLOCK]\",\n \"[EMPTY_TEXT_IMAGE]\",\n \"[CHAR_PAD]\",\n]" }, { "identifier": "DecoderTypes", "path": "utils/constants.py", "snippet": "class DecoderTypes:\n TRANSFORMER = \"transformer_decoder\"" }, { "identifier": "HeadTypes", "path": "utils/constants.py", "snippet": "class HeadTypes:\n TWO_HEAD = \"2head\"\n BASE = \"base\"" }, { "identifier": "Tasks", "path": "utils/constants.py", "snippet": "class Tasks:\n # base head tasks\n OCR_READ = \"ocr_read\"\n TEXT_READ = \"text_read\"\n DONUT_KIE = \"donut_kie\"\n OCR_READ_TEXTINSTANCEPADDING = \"ocr_read_TextInstancePadding\"\n TABLE_PARSING = \"table_parsing\"\n OTOR = \"otor\"\n OTOR_ORACLE = \"otor_oracle\"\n\n # 2head tasks\n OCR_READ_2HEAD = \"ocr_read_2head\"" }, { "identifier": "get_file", "path": "utils/misc.py", "snippet": "def get_file(dataset_path, prefix, postfix, ext):\n file_name = f\"{prefix}{ext}\" if postfix is None else f\"{prefix}_{postfix}{ext}\"\n file_path = os.path.join(dataset_path, file_name)\n return file_path" }, { "identifier": "is_otor", "path": "utils/misc.py", "snippet": "def is_otor(task_name, or_oracle=False, oracle=False):\n if or_oracle:\n if task_name in [\"otor\", \"otor_oracle\"]:\n return True\n elif oracle:\n if task_name == \"otor_oracle\":\n return True\n else:\n if task_name == \"otor\":\n return True\n return False" } ]
from collections import defaultdict from asian_bart import AsianBartForCausalLM from Levenshtein import distance from overrides import overrides from transformers import AutoConfig, AutoModelForCausalLM from model.decoders.base import BaseDecoder from model.losses import WeightLoss, get_loss from model.model_utils import get_tokenizer from utils.constants import COMMON_SPECIAL_TOKENS, DecoderTypes, HeadTypes, Tasks from utils.misc import get_file, is_otor import torch import torch.nn as nn import torch.nn.functional as F import transformers
4,339
reg_outputs = self.reg_head(decoder_outputs.hidden_states[-1]) block_quads = dec_batch["block_quads"][:, 1:].contiguous() block_first_tokens = dec_batch["block_first_tokens"][:, 1:].contiguous() loc_loss = self.loc_loss_2head_func(reg_outputs, block_quads) loc_loss = loc_loss.mean(-1) loc_loss = loc_loss * block_first_tokens loc_loss = loc_loss.sum(1) / block_first_tokens.sum(1) # To avoid NaN loss loc_loss = torch.nan_to_num(loc_loss, nan=0.0, posinf=0.0, neginf=0.0) loc_loss = self.weight_loss(dec_batch, loc_loss).mean() return loc_loss def get_decoder_hidden_states(self, decoder_outputs): decoder_hidden_states = [] for token_idx, all_layers_hidden_states in enumerate( decoder_outputs.hidden_states[:-1] ): if token_idx == 0: decoder_hidden_states.append( all_layers_hidden_states[-1][:, -1, :].unsqueeze(1).contiguous() ) else: decoder_hidden_states.append(all_layers_hidden_states[-1].contiguous()) if len(decoder_hidden_states) == 0: return 0 decoder_hidden_states = torch.cat(decoder_hidden_states, 1) return decoder_hidden_states def get_pr_loc_outputs(self, dec_batch, decoder_outputs, reg_outputs): bsz = reg_outputs.shape[0] pr_loc_outputs = [[] for _ in range(bsz)] for example_idx, sequence in enumerate(decoder_outputs.sequences): task_name = dec_batch["task_names"][example_idx] target_block_token_id = self.target_block_token_id_dict[task_name] is_target_block_token_indices = ( sequence[:-1][dec_batch["max_end_prompt_token_idx"] + 1 :] == target_block_token_id ) pr_loc_outputs[example_idx] = reg_outputs[ example_idx, is_target_block_token_indices, : ] return pr_loc_outputs def get_pr_confs(self, dec_batch, decoder_outputs): logits = torch.stack(decoder_outputs.scores).permute(1, 0, 2).contiguous() probs = nn.functional.softmax(logits, -1).cpu().numpy() pr_confs = [] for example_idx, sequence in enumerate(decoder_outputs.sequences): task_name = dec_batch["task_names"][example_idx] target_block_start_token_id = self.target_block_start_token_id_dict[ task_name ] target_block_token_id = self.target_block_token_id_dict[task_name] wo_prompt_seq = sequence[ torch.nonzero(sequence == self.end_prompt_token_id) + 1 : ] seq_pr_confs = [] token_probs = [] for token_idx, token_id in enumerate(wo_prompt_seq): if task_name == "object_detection": if token_id == target_block_token_id: seq_pr_confs.append( probs[example_idx, token_idx - 1].max() ) # cls confidence # seq_pr_confs.append(probs[example_idx, token_idx-5:token_idx-1].max(1).mean()) # box confidence mean else: if token_id == target_block_start_token_id: # [START_TEXT_BLOCK] token_probs.clear() elif token_id == target_block_token_id: # [END_TEXT_BLOCK] # Since pr_reg_outputs is generated on every [END_TEXT_BLOCK] if len(token_probs) == 0: block_prob = 0.0 else: block_prob = sum(token_probs) / len(token_probs) seq_pr_confs.append(block_prob) token_probs.clear() else: token_probs.append(probs[example_idx, token_idx, token_id]) pr_confs.append(seq_pr_confs) return pr_confs def __get_reg_head(self, n_loc_head_hidden_layers): # https://github.com/facebookresearch/detr/blob/main/models/detr.py#L38 # https://github.com/facebookresearch/detr/blob/091a817eca74b8b97e35e4531c1c39f89fbe38eb/models/detr.py#L289 reg_heads = [] hidden_dim = self.model.config.hidden_size for _ in range(n_loc_head_hidden_layers): reg_heads.extend([nn.Linear(hidden_dim, hidden_dim), nn.ReLU()]) reg_heads.append(nn.Linear(hidden_dim, 8)) return nn.Sequential(*reg_heads) def prepare_inputs_for_inference( self, input_ids, past=None, attention_mask=None, use_cache=None, **model_kwargs ): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past is not None: input_ids = input_ids[:, -1:] return { "input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past, "use_cache": use_cache, "encoder_hidden_states": model_kwargs["encoder_hidden_states"], } def _add_special_tokens(self, dataset_items): special_tokens = [] for dataset_item in dataset_items:
""" SCOB Copyright (c) 2023-present NAVER Cloud Corp. MIT license """ class TransformerDecoder(BaseDecoder): def __init__(self, cfg, decoder_cfg, decoder_name): super().__init__() self.cfg = cfg self.decoder_cfg = decoder_cfg self.decoder_name = decoder_name self.model = _get_huggingface_decoder(decoder_cfg) if self.decoder_cfg.kwargs.n_prune_layers > 0: _prune_decoder_layers(self.model, self.decoder_cfg) self.tokenizer = get_tokenizer( decoder_cfg.huggingface_path, decoder_cfg.kwargs.tokenizer_name, self.decoder_name, cfg.model.resume_model_path, cfg.model.w_pretrained_model_path, decoder_cfg.kwargs.tokenizer_path, ) self.decoder_max_length = decoder_cfg.kwargs.decoder_max_length self._add_special_tokens(cfg.dataset_items) self.end_prompt_token = "[END_PROMPT]" self.end_prompt_token_id = self.tokenizer.convert_tokens_to_ids( self.end_prompt_token ) self.decoder_end_token = "[END]" self.end_token_id = self.tokenizer.convert_tokens_to_ids(self.decoder_end_token) self.target_block_start_token_id_dict = { Tasks.OCR_READ: self.tokenizer.convert_tokens_to_ids("[START_BOX]"), Tasks.OCR_READ_2HEAD: self.tokenizer.convert_tokens_to_ids( "[START_TEXT_BLOCK]" ), Tasks.OCR_READ_TEXTINSTANCEPADDING: self.tokenizer.convert_tokens_to_ids( "[START_BOX]" ), Tasks.OTOR: self.tokenizer.convert_tokens_to_ids("[START_BOX]"), Tasks.OTOR_ORACLE: self.tokenizer.convert_tokens_to_ids("[START_BOX]"), } self.target_block_token_id_dict = { Tasks.OCR_READ: self.tokenizer.convert_tokens_to_ids("[END_BOX]"), Tasks.OCR_READ_TEXTINSTANCEPADDING: self.tokenizer.convert_tokens_to_ids( "[END_BOX]" ), Tasks.OCR_READ_2HEAD: self.tokenizer.convert_tokens_to_ids( "[END_TEXT_BLOCK]" ), Tasks.OTOR: self.tokenizer.convert_tokens_to_ids("[END_BOX]"), Tasks.OTOR_ORACLE: self.tokenizer.convert_tokens_to_ids("[END_BOX]"), } self.model.resize_token_embeddings(len(self.tokenizer)) self.model.prepare_inputs_for_generation = self.prepare_inputs_for_inference self.weight_loss = WeightLoss(cfg.dataset_items) self.loss_func = get_loss(decoder_cfg.loss_func) self.ignore_index = -100 self.calc_val_loss = decoder_cfg.calc_val_loss self.calc_confidence = decoder_cfg.calc_confidence self.connector_size = self.model.config.hidden_size if decoder_cfg.head_type == HeadTypes.TWO_HEAD: self.loc_loss_2head_func = get_loss(decoder_cfg.loc_loss_2head_func) self.reg_head = self.__get_reg_head( decoder_cfg.kwargs.n_loc_head_hidden_layers ) if decoder_cfg.scob.use: self.projector_q = self._get_projector(decoder_cfg.scob.project_dim) self.projector_k = self._get_projector(decoder_cfg.scob.project_dim) self.otor_scale_factor = decoder_cfg.scob.loss_weight def _get_projector(self, output_dim): hidden_size = self.model.config.hidden_size projection_head = nn.Sequential( nn.Linear(hidden_size, hidden_size), nn.ReLU(True), nn.Linear(hidden_size, output_dim), ) return projection_head @overrides def forward(self, dec_batch, connector_out, enc_kwargs): if self.training: output_dict, losses_dict, kwargs = self.train_forward( dec_batch, connector_out ) else: output_dict, losses_dict, kwargs = self.valtest_forward( dec_batch, connector_out ) return output_dict, losses_dict, kwargs def train_forward(self, dec_batch, connector_out): input_ids = dec_batch["input_ids"] attention_mask = dec_batch["attention_mask"] labels = dec_batch["labels"] if is_otor(dec_batch["task_names"][0], oracle=True): input_ids = torch.cat((input_ids, dec_batch["origin_input_ids"]), 0) attention_mask = torch.cat((attention_mask, attention_mask), 0) labels = torch.cat((labels, dec_batch["origin_labels"]), 0) elif is_otor(dec_batch["task_names"][0]): input_ids = torch.cat((input_ids, dec_batch["origin_input_ids"]), 0) attention_mask = torch.cat((attention_mask, attention_mask), 0) labels = torch.cat((labels, labels), 0) input_ids = input_ids[:, :-1].contiguous() attention_mask = attention_mask[:, :-1].contiguous() labels = labels[:, 1:].contiguous() decoder_outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=connector_out, # labels=labels, # comment out to get per-example loss output_hidden_states=True, ) projection = None if self.decoder_cfg.scob.use: bsz = decoder_outputs.hidden_states[-1].shape[0] projection = torch.cat( [ self.projector_q(decoder_outputs.hidden_states[-1][: int(bsz / 2)]), self.projector_k(decoder_outputs.hidden_states[-1][int(bsz / 2) :]), ], dim=0, ) pr_sequences = None # Not used in Train phase token_loss = self.get_per_example_loss( dec_batch, decoder_outputs.logits, labels, projection ) supcon_loss = 0 if ( is_otor(dec_batch["task_names"][0], or_oracle=True) and self.decoder_cfg.scob.use ): supcon_loss = token_loss["supcon_loss"] token_loss = token_loss["token_loss"] aux_loss = 0 if self.decoder_cfg.aux_loss: aux_loss = self.get_per_example_aux_loss( dec_batch, decoder_outputs.hidden_states[1:-1], labels ) if self.do_2head_regression(dec_batch): loc_loss = self.get_per_example_2head_loc_loss(dec_batch, decoder_outputs) else: loc_loss = 0 token_loss *= self.decoder_cfg.token_loss_weight loc_loss *= self.decoder_cfg.loc_loss_weight decoder_loss = token_loss + loc_loss + aux_loss + supcon_loss output_dict = { "pr_sequences": pr_sequences, "pr_loc_outputs": None, # Not used in Train phase "pr_confs": None, # Not used in Train phase } losses_dict = { "decoder_token_loss": token_loss, "decoder_aux_loss": aux_loss, "decoder_supcon_loss": supcon_loss, "decoder_loc_loss": loc_loss, "decoder_loss": decoder_loss, } kwargs = {} return output_dict, losses_dict, kwargs def do_2head_regression(self, dec_batch): do_reg = False if self.decoder_cfg.head_type == HeadTypes.TWO_HEAD and ( "block_quads" in dec_batch or "od_block_quads" in dec_batch ): do_reg = True return do_reg def valtest_forward(self, dec_batch, connector_out): input_ids = dec_batch["input_ids"] if is_otor(dec_batch["task_names"][0], or_oracle=True): bsz, _, _ = connector_out.size() connector_out = connector_out[: int(bsz / 2)] bsz = input_ids.shape[0] do_reg = self.do_2head_regression(dec_batch) # https://github.com/huggingface/transformers/blob/master/src/transformers/generation_utils.py#L640 decoder_outputs = self.model.generate( input_ids=input_ids[:, : dec_batch["max_end_prompt_token_idx"] + 1], encoder_hidden_states=connector_out, max_length=self.decoder_max_length + 1, num_beams=1, early_stopping=False if self.calc_val_loss else True, use_cache=True, eos_token_id=self.end_token_id, return_dict_in_generate=True, output_scores=True, output_hidden_states=do_reg, forced_eos_token_id=False, ) if self.decoder_cfg.kwargs.tokenizer_name.startswith("char_"): pr_sequences = [] for pr_idx in range(decoder_outputs.sequences.shape[0]): pr_sequence = "".join( self.tokenizer.convert_ids_to_tokens( decoder_outputs.sequences[pr_idx, :-1] ) ) pr_sequences.append(pr_sequence) else: pr_sequences = self.tokenizer.batch_decode( decoder_outputs.sequences[:, :-1] ) if self.calc_val_loss: scores = torch.stack(decoder_outputs.scores).permute(1, 0, 2).contiguous() scores = scores[:, :-1, :].contiguous() labels = dec_batch["labels"][ :, dec_batch["max_end_prompt_token_idx"] + 1 : ].contiguous() decoder_token_loss = self.get_per_example_loss(dec_batch, scores, labels) else: decoder_token_loss = 0 # dummy value if do_reg: decoder_hidden_states = self.get_decoder_hidden_states(decoder_outputs) if torch.is_tensor(decoder_hidden_states): reg_outputs = self.reg_head(decoder_hidden_states) pr_loc_outputs = self.get_pr_loc_outputs( dec_batch, decoder_outputs, reg_outputs ) else: pr_loc_outputs = [[] for _ in range(bsz)] if self.calc_val_loss: decoder_loc_loss = 0 # Not implemented yet else: decoder_loc_loss = 0 # dummy value else: pr_loc_outputs = [[] for _ in range(bsz)] decoder_loc_loss = 0 if self.calc_confidence: pr_confs = self.get_pr_confs(dec_batch, decoder_outputs) else: pr_confs = [[] for _ in range(bsz)] decoder_token_loss *= self.decoder_cfg.token_loss_weight decoder_loc_loss *= self.decoder_cfg.loc_loss_weight decoder_loss = decoder_token_loss + decoder_loc_loss output_dict = { "pr_sequences": pr_sequences, "pr_loc_outputs": pr_loc_outputs, "pr_confs": pr_confs, } losses_dict = { "decoder_token_loss": decoder_token_loss, "decoder_loc_loss": decoder_loc_loss, "decoder_loss": decoder_loss, } kwargs = {} return output_dict, losses_dict, kwargs def get_per_example_2head_loc_loss(self, dec_batch, decoder_outputs): reg_outputs = self.reg_head(decoder_outputs.hidden_states[-1]) block_quads = dec_batch["block_quads"][:, 1:].contiguous() block_first_tokens = dec_batch["block_first_tokens"][:, 1:].contiguous() loc_loss = self.loc_loss_2head_func(reg_outputs, block_quads) loc_loss = loc_loss.mean(-1) loc_loss = loc_loss * block_first_tokens loc_loss = loc_loss.sum(1) / block_first_tokens.sum(1) # To avoid NaN loss loc_loss = torch.nan_to_num(loc_loss, nan=0.0, posinf=0.0, neginf=0.0) loc_loss = self.weight_loss(dec_batch, loc_loss).mean() return loc_loss def get_decoder_hidden_states(self, decoder_outputs): decoder_hidden_states = [] for token_idx, all_layers_hidden_states in enumerate( decoder_outputs.hidden_states[:-1] ): if token_idx == 0: decoder_hidden_states.append( all_layers_hidden_states[-1][:, -1, :].unsqueeze(1).contiguous() ) else: decoder_hidden_states.append(all_layers_hidden_states[-1].contiguous()) if len(decoder_hidden_states) == 0: return 0 decoder_hidden_states = torch.cat(decoder_hidden_states, 1) return decoder_hidden_states def get_pr_loc_outputs(self, dec_batch, decoder_outputs, reg_outputs): bsz = reg_outputs.shape[0] pr_loc_outputs = [[] for _ in range(bsz)] for example_idx, sequence in enumerate(decoder_outputs.sequences): task_name = dec_batch["task_names"][example_idx] target_block_token_id = self.target_block_token_id_dict[task_name] is_target_block_token_indices = ( sequence[:-1][dec_batch["max_end_prompt_token_idx"] + 1 :] == target_block_token_id ) pr_loc_outputs[example_idx] = reg_outputs[ example_idx, is_target_block_token_indices, : ] return pr_loc_outputs def get_pr_confs(self, dec_batch, decoder_outputs): logits = torch.stack(decoder_outputs.scores).permute(1, 0, 2).contiguous() probs = nn.functional.softmax(logits, -1).cpu().numpy() pr_confs = [] for example_idx, sequence in enumerate(decoder_outputs.sequences): task_name = dec_batch["task_names"][example_idx] target_block_start_token_id = self.target_block_start_token_id_dict[ task_name ] target_block_token_id = self.target_block_token_id_dict[task_name] wo_prompt_seq = sequence[ torch.nonzero(sequence == self.end_prompt_token_id) + 1 : ] seq_pr_confs = [] token_probs = [] for token_idx, token_id in enumerate(wo_prompt_seq): if task_name == "object_detection": if token_id == target_block_token_id: seq_pr_confs.append( probs[example_idx, token_idx - 1].max() ) # cls confidence # seq_pr_confs.append(probs[example_idx, token_idx-5:token_idx-1].max(1).mean()) # box confidence mean else: if token_id == target_block_start_token_id: # [START_TEXT_BLOCK] token_probs.clear() elif token_id == target_block_token_id: # [END_TEXT_BLOCK] # Since pr_reg_outputs is generated on every [END_TEXT_BLOCK] if len(token_probs) == 0: block_prob = 0.0 else: block_prob = sum(token_probs) / len(token_probs) seq_pr_confs.append(block_prob) token_probs.clear() else: token_probs.append(probs[example_idx, token_idx, token_id]) pr_confs.append(seq_pr_confs) return pr_confs def __get_reg_head(self, n_loc_head_hidden_layers): # https://github.com/facebookresearch/detr/blob/main/models/detr.py#L38 # https://github.com/facebookresearch/detr/blob/091a817eca74b8b97e35e4531c1c39f89fbe38eb/models/detr.py#L289 reg_heads = [] hidden_dim = self.model.config.hidden_size for _ in range(n_loc_head_hidden_layers): reg_heads.extend([nn.Linear(hidden_dim, hidden_dim), nn.ReLU()]) reg_heads.append(nn.Linear(hidden_dim, 8)) return nn.Sequential(*reg_heads) def prepare_inputs_for_inference( self, input_ids, past=None, attention_mask=None, use_cache=None, **model_kwargs ): input_shape = input_ids.shape if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) if past is not None: input_ids = input_ids[:, -1:] return { "input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past, "use_cache": use_cache, "encoder_hidden_states": model_kwargs["encoder_hidden_states"], } def _add_special_tokens(self, dataset_items): special_tokens = [] for dataset_item in dataset_items:
special_token_file = get_file(
8
2023-11-15 00:40:08+00:00
8k
speckai/speck
src/python/speck/connections/anthropic.py
[ { "identifier": "NOT_GIVEN", "path": "src/python/speck/chat/entities.py", "snippet": "NOT_GIVEN = None" }, { "identifier": "ChatConfig", "path": "src/python/speck/chat/entities.py", "snippet": "class ChatConfig:\n # Todo: add typed params here\n # Todo: Create conversions to other formats\n def __init__(\n self,\n *,\n provider: str = None,\n model: OpenAIModel,\n stream: bool = False,\n _log: bool = True,\n temperature: Union[Optional[float], NotGiven] = NOT_GIVEN,\n max_tokens: Union[Optional[int], NotGiven] = NOT_GIVEN,\n top_p: Union[Optional[float], NotGiven] = NOT_GIVEN,\n frequency_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,\n presence_penalty: Union[Optional[float], NotGiven] = NOT_GIVEN,\n **config_kwargs,\n ):\n if \"log_config\" in config_kwargs:\n del config_kwargs[\"log_config\"]\n\n self.provider = provider\n self.model = model\n self.stream = stream\n self._log = _log\n self.temperature = temperature\n self.max_tokens = max_tokens\n self.top_p = top_p\n self.frequency_penalty = frequency_penalty\n self.presence_penalty = presence_penalty\n self.chat_args = config_kwargs\n # If this is modified, update to_dict too\n\n def to_dict(self):\n return {\n \"provider\": self.provider,\n \"model\": str(self.model), # Assuming model can be represented as a string\n \"stream\": self.stream,\n \"_log\": self._log,\n \"temperature\": self._convert_optional(self.temperature),\n \"max_tokens\": self._convert_optional(self.max_tokens),\n \"top_p\": self._convert_optional(self.top_p),\n \"frequency_penalty\": self._convert_optional(self.frequency_penalty),\n \"presence_penalty\": self._convert_optional(self.presence_penalty),\n \"chat_args\": self.chat_args,\n }\n\n def _convert_optional(self, value):\n return None if isinstance(value, NotGiven) else value\n\n @classmethod\n def create(cls, config: ChatConfigTypes, kwargs: dict = None) -> \"ChatConfig\":\n if isinstance(config, cls):\n if kwargs is not None:\n return cls(**{**config.__dict__, **kwargs})\n else:\n return config\n elif isinstance(config, dict):\n return cls(**config)\n elif kwargs:\n return cls(**kwargs)\n else:\n raise NotImplementedError\n\n def get(self, key: str, default: Any = None) -> Any:\n return getattr(self, key, default)\n\n def convert(self, provider: str = \"speck\") -> \"ChatConfig\":\n \"\"\"\n Convert to another config format\n \"\"\"\n if provider == \"openai\":\n return OpenAIChatConfig(\n model=self.model,\n stream=self.stream,\n _log=self._log,\n temperature=self.temperature,\n max_tokens=self.max_tokens,\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n **self._kwargs,\n )\n\n return self\n\n def log_chat(\n self,\n *,\n log_config: LogConfig,\n prompt: Prompt,\n response: Response,\n provider: str = \"speck\",\n ):\n config = self.convert()\n ChatLogger.log(\n log_config=log_config,\n provider=provider,\n model=str(config.model),\n prompt=prompt,\n response=response,\n **config.chat_args,\n )\n\n def encode(self, encoding: str = \"utf-8\"):\n return self.__str__().encode(encoding)\n\n def __str__(self):\n return f\"ChatConfig(provider={self.provider}, model={self.model}, stream={self.stream}, _log={self._log}, temperature={self.temperature}, max_tokens={self.max_tokens}, top_p={self.top_p}, frequency_penalty={self.frequency_penalty}, presence_penalty={self.presence_penalty}, _kwargs={self._kwargs})\"" }, { "identifier": "IChatClient", "path": "src/python/speck/chat/entities.py", "snippet": "class IChatClient(ABC):\n def debug_chat(\n self, prompt: \"Prompt\", config: \"ChatConfig\"\n ) -> (\"Prompt\", \"ChatConfig\"):\n data = run_debug_websocket(self._client, self, prompt, config)\n\n print(data)\n if data.get(\"prompt\") and data.get(\"config\"):\n prompt = Prompt(**data[\"prompt\"])\n config = ChatConfig(**data[\"config\"])\n\n return prompt, config\n\n @abstractmethod\n def chat(\n self,\n prompt: PromptTypes,\n config: Union[ChatConfig, NotGiven] = NOT_GIVEN,\n **config_kwargs,\n ) -> Union[Response, Stream]:\n pass\n\n @abstractmethod\n async def achat(\n self,\n prompt: PromptTypes,\n config: Union[ChatConfig, NotGiven] = NOT_GIVEN,\n **config_kwargs,\n ) -> Union[Response, Stream]:\n pass" }, { "identifier": "LogConfig", "path": "src/python/speck/chat/entities.py", "snippet": "class LogConfig(BaseModel):\n api_key: str\n endpoint: str = \"https://api.getspeck.ai\"\n\n class Config:\n extra = \"allow\"" }, { "identifier": "MessageChunk", "path": "src/python/speck/chat/entities.py", "snippet": "class MessageChunk(BaseModel):\n content: Union[str, None]\n\n def encode(self, encoding: str = \"utf-8\"):\n content = self.content or \"\"\n return content.encode(encoding)" }, { "identifier": "Prompt", "path": "src/python/speck/chat/entities.py", "snippet": "class Prompt(str):\n messages: list[Message]\n variables: Union[dict[str, str], None] = None\n\n def to_dict(self):\n return {\n \"messages\": self.messages,\n \"variables\": self.variables,\n }\n\n def __init__(\n self,\n messages: PromptTypes,\n variables: Union[dict[str, str], None] = None,\n **kwargs,\n ):\n if isinstance(messages, str):\n messages = [Message(role=\"user\", content=messages)]\n elif isinstance(messages, Message):\n messages = [messages]\n elif isinstance(messages, list):\n if all(isinstance(message, Message) for message in messages):\n pass\n elif all(isinstance(message, dict) for message in messages):\n messages = [\n Message(role=message[\"role\"], content=message[\"content\"])\n for message in messages\n ]\n else:\n raise ValueError(\n f\"Invalid type for messages: {type(messages)}\\n{messages}\"\n )\n\n self.messages = messages\n self.variables = variables\n super().__init__()\n\n @classmethod\n def create(\n cls, messages: PromptTypes, variables: dict[str, str] = None\n ) -> \"Prompt\":\n if isinstance(messages, cls):\n # Todo: clone object and add variables\n return messages\n return cls(messages=messages, variables=variables)\n\n @classmethod\n def _read(cls, lines: str) -> \"Prompt\":\n # Todo: add config parsing\n config = {}\n messages = []\n\n current_min_spaces = 0\n current_section = None\n current_message = []\n\n def add_message():\n nonlocal current_message, current_min_spaces\n if current_message:\n messages.append(\n Message(\n role=current_section,\n content=\"\\n\".join(\n [m[current_min_spaces:] for m in current_message]\n ),\n )\n )\n current_message = []\n current_min_spaces = 0\n\n for line in lines.split(\"\\n\"):\n line = line.rstrip(\"\\r\")\n if line.startswith(\"<\"):\n line = line.strip()\n add_message()\n current_section = line[1:-1].lower()\n elif current_section == \"config\" and \"=\" in line:\n key, value = line.split(\"=\", 1)\n config[key.strip()] = value.strip()\n elif current_section in [\"system\", \"user\", \"assistant\"]:\n min_spaces = len(line) - len(line.lstrip())\n if 0 < min_spaces < current_min_spaces or current_min_spaces == 0:\n current_min_spaces = min_spaces\n current_message.append(line)\n\n add_message()\n return cls(messages=messages)\n\n @classmethod\n def read(cls, path: str, name: Union[str, None] = None) -> \"Prompt\":\n with open(path, \"r\") as f:\n if name is not None:\n prompts = cls.read_all(path)\n return prompts[name]\n else:\n return cls._read(f.read())\n\n @classmethod\n def read_all(cls, path: str) -> dict[str, \"Prompt\"]:\n with open(path, \"r\") as f:\n prompts = {}\n lines = []\n current_prompt_name = None\n current_min_spaces = -1\n\n for line in f:\n line = line.rstrip(\"\\n\").rstrip(\"\\r\")\n if line.lstrip().startswith(\"<\"):\n min_spaces = len(line) - len(line.lstrip())\n stripped_line = line.strip()\n\n if stripped_line.startswith(\"<prompt\") and min_spaces == 0:\n if current_prompt_name:\n prompts[current_prompt_name] = cls._read(\n \"\\n\".join([m[current_min_spaces:] for m in lines])\n )\n current_prompt_name = stripped_line[8:-1].strip()\n current_min_spaces = -1\n lines = []\n elif stripped_line.startswith(\"</prompt>\") and min_spaces == 0:\n prompts[current_prompt_name] = cls._read(\n \"\\n\".join([m[current_min_spaces:] for m in lines])\n )\n current_prompt_name = None\n current_min_spaces = -1\n lines = []\n else:\n lines.append(line)\n if current_min_spaces == -1 or min_spaces < current_min_spaces:\n current_min_spaces = min_spaces\n else:\n lines.append(line)\n\n return prompts\n\n def _file(self):\n file = []\n for message in self.messages:\n file.append(f\"<{message.role.lower()}>\")\n for line in message.content.split(\"\\n\"):\n file.append(\" \" * 4 + line)\n return \"\\n\".join(file)\n\n @classmethod\n def write(cls, prompt: Union[\"Prompt\", dict[str, \"Prompt\"]], path: str):\n with open(path, \"w\") as f:\n if isinstance(prompt, dict):\n content = \"\"\n for name, prompt in prompt.items():\n content += f\"<prompt {name}>\\n\"\n content += \"\\n\".join(\n [\" \" * 4 + line for line in prompt._file().split(\"\\n\")]\n )\n content += \"\\n</prompt>\\n\\n\"\n f.write(content.strip())\n else:\n f.write(prompt._file())\n\n def __new__(\n cls,\n messages: PromptTypes,\n **kwargs,\n ):\n # Todo: Handle string, Message, and list[Message]\n instance = super(Prompt, cls).__new__(cls, str(messages))\n return instance\n\n @classmethod\n def from_openai(cls, messages: list[dict[str, str]]):\n return cls(\n messages=[\n Message(role=message[\"role\"], content=message[\"content\"])\n for message in messages\n ]\n )\n\n def to_list(self):\n return [\n {\n \"role\": message.role,\n \"content\": message.content.format_map(SafeDict(self.variables or {})),\n }\n for message in self.messages\n ]\n\n def to_dict(self):\n return {\n \"messages\": [\n {\"role\": message.role, \"content\": message.content}\n for message in self.messages\n ],\n \"variables\": self.variables or {},\n }\n\n @staticmethod\n def _apply_variables(\n messages: list[Message], variables: dict[str, str]\n ) -> list[Message]:\n return [\n Message(\n role=message.role,\n content=message.content.format_map(SafeDict(variables or {})),\n )\n for message in messages\n ]\n\n def _check_duplicate_keys(self, other_variables: dict[str, str]) -> dict[str, str]:\n duplicate_keys = set((self.variables or {}).keys()).intersection(\n set((other_variables or {}).keys())\n )\n return {\n key: self.variables[key]\n for key in duplicate_keys\n if self.variables[key] != other_variables[key]\n }\n\n def _remove_duplicate_keys_from_messages(\n self, other_variables: dict[str, str]\n ) -> list[Message]:\n messages = self.messages\n applied_variables = self._check_duplicate_keys(other_variables)\n if len(applied_variables) > 0:\n messages = self._apply_variables(self.messages, applied_variables)\n\n return messages\n\n def format(self, *args, **kwargs):\n # return self.__class__(\n # messages=[\n # Message(\n # role=message.role, content=message.content.format(*args, **kwargs)\n # )\n # for message in self.messages\n # ]\n # )\n\n messages = self._remove_duplicate_keys_from_messages(kwargs)\n return self.__class__(\n messages=[\n Message(role=message.role, content=message.content)\n for message in messages\n ],\n variables={**SafeDict(self.variables or {}), **kwargs},\n )\n\n def __add__(self, other):\n if isinstance(other, Message):\n return self.__class__(\n messages=self.messages + [other], variables={**(self.variables or {})}\n )\n elif isinstance(other, Prompt):\n # Check if there are duplicate keys\n messages = self._remove_duplicate_keys_from_messages(other.variables or {})\n\n return self.__class__(\n messages=messages + other.messages,\n variables={\n **SafeDict(self.variables or {}),\n **SafeDict(other.variables or {}),\n },\n )\n else:\n raise NotImplementedError\n\n def __str__(self):\n return (\n \"\\n\".join(\n [f\"{message.role}: {message.content}\" for message in self.messages]\n )\n + \"\\n\"\n + str(self.variables or {})\n )" }, { "identifier": "Response", "path": "src/python/speck/chat/entities.py", "snippet": "class Response(BaseModel):\n content: str\n prompt_tokens: Union[int, None] = None\n completion_tokens: Union[int, None] = None\n raw: Union[dict, None] = None\n\n def __init__(\n self,\n content: str,\n closed: bool = False,\n prompt_tokens: Union[int, None] = None,\n completion_tokens: Union[int, None] = None,\n raw: Union[dict, None] = None,\n **kwargs,\n ):\n super().__init__(\n content=content,\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n raw=raw,\n )\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n @classmethod\n def create(cls, response: ResponseTypes) -> \"Response\":\n if isinstance(response, cls):\n return response\n elif isinstance(response, str):\n return cls(content=response)\n else:\n raise NotImplementedError\n\n def __str__(self):\n return f\"Response({self.content}, raw={self.raw})\"" }, { "identifier": "Stream", "path": "src/python/speck/chat/entities.py", "snippet": "class Stream:\n # processor that has lambda which returns MessageDelta\n def __init__(\n self,\n client: \"Speck\",\n iterator: Iterator[Any],\n kwargs: dict,\n log_config: \"LogConfig\",\n processor: Callable[[Any], MessageChunk],\n ):\n self._client = client\n self.message: str = \"\"\n self.tokens: int = 0\n self._iterator = iterator\n self._kwargs = kwargs\n self._processor = processor\n self._has_logged = False\n self._closed = False\n self._log_config = log_config\n\n def _log(self):\n if not self._has_logged:\n self._has_logged = True\n\n kwargs = self._kwargs\n kwargs[\"prompt\"] = self._kwargs.get(\"prompt\", [])\n kwargs[\"temperature\"] = self._kwargs.get(\"temperature\", \"N/A\")\n kwargs[\"model\"] = self._kwargs.get(\"model\", \"N/A\")\n kwargs[\"response\"] = Response(\n content=self.message, raw={}, closed=True, completion_tokens=self.tokens\n )\n\n # Todo: add prompt_tokens using tiktoken\n ChatLogger.log(log_config=self._log_config, **kwargs)\n\n def _process(self, item) -> MessageChunk:\n return self._processor(item)\n\n def __next__(self) -> MessageChunk:\n try:\n if self._closed:\n raise StopIteration\n\n # next_item = None\n # while next_item is None:\n next_item = next(self._iterator)\n\n item: MessageChunk = self._process(next_item)\n if item.content:\n self.message += item.content\n self.tokens += 1\n return item\n except StopIteration:\n self._log()\n raise\n\n def __iter__(self) -> Iterator[MessageChunk]:\n return self\n\n def close(self):\n try:\n self._closed = True\n # todo: make this work for packages other than openai\n self._iterator.response.close()\n except AttributeError:\n pass" }, { "identifier": "IConnector", "path": "src/python/speck/connections/connector.py", "snippet": "class IConnector(ABC):\n _client: \"Speck\"\n\n def __init__(self, client: \"Speck\", provider: Providers):\n self._client = client\n self.provider = provider\n\n # @abstractmethod\n # def process_message(self, messages: Messages, model: str) -> str:\n # pass\n\n def _get_log_kwargs(self, prompt: Prompt, response: Response, **kwargs):\n return {\n \"provider\": self.provider,\n \"model\": kwargs.get(\"model\"),\n \"temperature\": kwargs.get(\"temperature\"),\n \"stream\": kwargs.get(\"stream\", False),\n \"prompt\": prompt,\n \"config\": kwargs,\n \"response\": response,\n }\n\n def log(\n self, *, log_config: LogConfig, prompt: Prompt, response: Response, **kwargs\n ):\n # Todo: refactor to use config.log_chat !!!\n ChatLogger.log(\n log_config=log_config,\n **self._get_log_kwargs(prompt, response, **kwargs),\n )\n\n def __str__(self):\n return f\"Client({self.provider.value})\"" }, { "identifier": "Providers", "path": "src/python/speck/connections/providers.py", "snippet": "class Providers(Enum):\n Anthropic = \"Anthropic\"\n AzureOpenAI = \"AzureOpenAI\"\n OpenAI = \"OpenAI\"\n CustomProvider = \"CustomProvider\"\n Replicate = \"Replicate\"" } ]
import json import httpx import requests from typing import Union from ..chat.entities import ( NOT_GIVEN, ChatConfig, IChatClient, LogConfig, MessageChunk, Prompt, Response, Stream, ) from .connector import IConnector from .providers import Providers
5,422
""" Name: Anthropic URL: https://anthropic.ai/ Features: - Chat """ def _process_chunk(obj) -> MessageChunk: return MessageChunk(content=obj["completion"]) class AnthropicStream: def __init__(self, iterator): self.iterator = iterator self.closed = False def __iter__(self): return self def __next__(self): while True: if self.closed: raise StopIteration obj = next(self.iterator) line = obj.decode("utf-8") if line.startswith("data: "): data = json.loads(line[6:]) if data.get("stop_reason") is not None: self.closed = True if data.get("completion") is not None: return data else: continue class AnthropicResponse(Response): def __init__(self, obj): content = obj["completion"] super().__init__( content=content, prompt_tokens=None, completion_tokens=None, raw=obj, ) class AnthropicConnector(IConnector, IChatClient): def __init__(self, api_key: str = None, client: "Speck" = None): super().__init__(client=client, provider=Providers.Anthropic) if api_key is not None: self.api_key = api_key self.url = "https://api.anthropic.com/v1/complete" def _convert_messages_to_prompt(self, messages: Prompt) -> str: res = "" if messages.messages[0].role == "system": res = "System: " + messages.messages[0].content + "\n\n" for msg in messages.messages: if msg.role == "system": continue res += ( f"{'Human' if msg.role == 'user' else 'Assistant'}: " + msg.content + "\n\n" ) res += "Assistant:" return res def _process_kwargs(self, prompt: Prompt, config: ChatConfig, **config_kwargs): if config is NOT_GIVEN: config = ChatConfig(**config_kwargs) # Todo: convert to default config based on class param # Remove all None values all_kwargs = {k: v for k, v in vars(config).items() if v is not None} input = self._convert_messages_to_prompt(prompt) headers = { "anthropic-version": config.get("anthropic_version", "2023-06-01"), "content-type": "application/json", "x-api-key": self.api_key, } data = { "model": config.model, "prompt": input, "max_tokens_to_sample": config.max_tokens or 100, "stream": config.stream, "temperature": config.temperature, **config.chat_args, } blocked_kwargs = ["provider", "_log", "chat_args", "stream", "max_tokens"] for k, v in all_kwargs.items(): if k not in data and k not in blocked_kwargs: data[k] = v log_config: LogConfig = None if config_kwargs.get("_log"): if self._client.log_config: log_config = self._client.log_config elif not config_kwargs.get("log_config"): raise ValueError( "No log config found. Define the log config in the log or client." ) else: log_config = config_kwargs.get("log_config") return headers, data, all_kwargs, log_config def chat( self, prompt: Prompt, config: ChatConfig = NOT_GIVEN, **config_kwargs
""" Name: Anthropic URL: https://anthropic.ai/ Features: - Chat """ def _process_chunk(obj) -> MessageChunk: return MessageChunk(content=obj["completion"]) class AnthropicStream: def __init__(self, iterator): self.iterator = iterator self.closed = False def __iter__(self): return self def __next__(self): while True: if self.closed: raise StopIteration obj = next(self.iterator) line = obj.decode("utf-8") if line.startswith("data: "): data = json.loads(line[6:]) if data.get("stop_reason") is not None: self.closed = True if data.get("completion") is not None: return data else: continue class AnthropicResponse(Response): def __init__(self, obj): content = obj["completion"] super().__init__( content=content, prompt_tokens=None, completion_tokens=None, raw=obj, ) class AnthropicConnector(IConnector, IChatClient): def __init__(self, api_key: str = None, client: "Speck" = None): super().__init__(client=client, provider=Providers.Anthropic) if api_key is not None: self.api_key = api_key self.url = "https://api.anthropic.com/v1/complete" def _convert_messages_to_prompt(self, messages: Prompt) -> str: res = "" if messages.messages[0].role == "system": res = "System: " + messages.messages[0].content + "\n\n" for msg in messages.messages: if msg.role == "system": continue res += ( f"{'Human' if msg.role == 'user' else 'Assistant'}: " + msg.content + "\n\n" ) res += "Assistant:" return res def _process_kwargs(self, prompt: Prompt, config: ChatConfig, **config_kwargs): if config is NOT_GIVEN: config = ChatConfig(**config_kwargs) # Todo: convert to default config based on class param # Remove all None values all_kwargs = {k: v for k, v in vars(config).items() if v is not None} input = self._convert_messages_to_prompt(prompt) headers = { "anthropic-version": config.get("anthropic_version", "2023-06-01"), "content-type": "application/json", "x-api-key": self.api_key, } data = { "model": config.model, "prompt": input, "max_tokens_to_sample": config.max_tokens or 100, "stream": config.stream, "temperature": config.temperature, **config.chat_args, } blocked_kwargs = ["provider", "_log", "chat_args", "stream", "max_tokens"] for k, v in all_kwargs.items(): if k not in data and k not in blocked_kwargs: data[k] = v log_config: LogConfig = None if config_kwargs.get("_log"): if self._client.log_config: log_config = self._client.log_config elif not config_kwargs.get("log_config"): raise ValueError( "No log config found. Define the log config in the log or client." ) else: log_config = config_kwargs.get("log_config") return headers, data, all_kwargs, log_config def chat( self, prompt: Prompt, config: ChatConfig = NOT_GIVEN, **config_kwargs
) -> Union[AnthropicResponse, Stream]:
7
2023-11-15 05:46:05+00:00
8k
hahnyuan/ASVD4LLM
huggingface_repos/build_asvd_repo.py
[ { "identifier": "evaluate_model", "path": "evaluate.py", "snippet": "@torch.no_grad()\ndef evaluate_model(\n model,\n tokenizer,\n model_name,\n tasks,\n eval_ppl=\"\",\n num_fewshot=0,\n limit=-1,\n batch_size=1,\n):\n \"\"\"\n model: model name\n limit: number of test samples for debug, set to -1 is no limit\n tasks: str tasks are split by ,\n num_fewshot: Number of examples in few-shot context\n eval_ppl: str datasets are split by , such as 'wikitext2,ptb,c4'\n \"\"\"\n lm = EvalLM(model, tokenizer, batch_size=batch_size)\n results = {}\n if eval_ppl:\n for dataset in eval_ppl.split(\",\"):\n cache_testloader = (\n f\"/tmp/{dataset}_testloader_{model_name.replace('/', '_')}_all.cache\"\n )\n if os.path.exists(cache_testloader):\n testloader = torch.load(cache_testloader)\n # print(f\"load calibration from {cache_testloader}\")\n else:\n testloader = get_eval_loaders(dataset, tokenizer)\n torch.save(testloader, cache_testloader)\n # print(dataset)\n testenc = testloader.input_ids\n nsamples = testenc.numel() // lm.seqlen\n use_cache = lm.model.config.use_cache\n lm.model.config.use_cache = False\n lm.model.eval()\n nlls = []\n\n for i in tqdm(range(nsamples)):\n batch = testenc[:, (i * lm.seqlen) : ((i + 1) * lm.seqlen)].to(\n lm.device\n )\n outputs = lm.model.model(batch)\n hidden_states = outputs[0] # .to(lm.model.lm_head.weight.device)\n logits = lm.model.lm_head(hidden_states) # .contiguous()\n shift_logits = logits[:, :-1, :] # .contiguous()\n shift_labels = testenc[:, (i * lm.seqlen) : ((i + 1) * lm.seqlen)][\n :, 1:\n ].to(lm.device)\n loss_fct = nn.CrossEntropyLoss()\n loss = loss_fct(\n shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1),\n )\n neg_log_likelihood = loss.float() * lm.seqlen\n nlls.append(neg_log_likelihood)\n if i == limit:\n break\n # if i == 1:\n # print(\n # \"memory_allocated\",\n # i,\n # torch.cuda.memory_allocated() / 1024 / 1024,\n # \"max memory_allocated\",\n # torch.cuda.max_memory_allocated() / 1024**2,\n # )\n\n ppl = torch.exp(torch.stack(nlls).sum() / (len(nlls) * lm.seqlen))\n print(dataset, ppl.item())\n lm.model.config.use_cache = use_cache\n # pprint(model)\n results[dataset] = ppl.item()\n if tasks == \"mmlu\":\n tasks = \"hendrycksTest-abstract_algebra,hendrycksTest-anatomy,hendrycksTest-astronomy,hendrycksTest-business_ethics,hendrycksTest-clinical_knowledge,hendrycksTest-college_biology,hendrycksTest-college_chemistry,hendrycksTest-college_computer_science,hendrycksTest-college_mathematics,hendrycksTest-college_medicine,hendrycksTest-college_physics,hendrycksTest-computer_security,hendrycksTest-conceptual_physics,hendrycksTest-econometrics,hendrycksTest-electrical_engineering,hendrycksTest-elementary_mathematics,hendrycksTest-formal_logic,hendrycksTest-global_facts,hendrycksTest-high_school_biology,hendrycksTest-high_school_chemistry,hendrycksTest-high_school_computer_science,hendrycksTest-high_school_european_history,hendrycksTest-high_school_geography,hendrycksTest-high_school_government_and_politics,hendrycksTest-high_school_macroeconomics,hendrycksTest-high_school_mathematics,hendrycksTest-high_school_microeconomics,hendrycksTest-high_school_physics,hendrycksTest-high_school_psychology,hendrycksTest-high_school_statistics,hendrycksTest-high_school_us_history,hendrycksTest-high_school_world_history,hendrycksTest-human_aging,hendrycksTest-human_sexuality,hendrycksTest-international_law,hendrycksTest-jurisprudence,hendrycksTest-logical_fallacies,hendrycksTest-machine_learning,hendrycksTest-management,hendrycksTest-marketing,hendrycksTest-medical_genetics,hendrycksTest-miscellaneous,hendrycksTest-moral_disputes,hendrycksTest-moral_scenarios,hendrycksTest-nutrition,hendrycksTest-philosophy,hendrycksTest-prehistory,hendrycksTest-professional_accounting,hendrycksTest-professional_law,hendrycksTest-professional_medicine,hendrycksTest-professional_psychology,hendrycksTest-public_relations,hendrycksTest-security_studies,hendrycksTest-sociology,hendrycksTest-us_foreign_policy,hendrycksTest-virology,hendrycksTest-world_religions\"\n if tasks == \"llmqat\":\n # tasks = \"boolq,piqa,hellaswag,winogrande,arc_easy,arc_challenge,openbookqa\"\n tasks = \"lambada_openai,openbookqa\"\n if tasks != \"\":\n t_results = evaluator.simple_evaluate(\n lm,\n tasks=tasks.split(\",\"),\n batch_size=batch_size,\n num_fewshot=num_fewshot,\n limit=None if limit == -1 else limit,\n no_cache=True,\n )\n t_results = t_results[\"results\"]\n acc_list = [\n t_results[key][\"acc\"] for key in t_results.keys() if \"acc\" in t_results[key]\n ]\n t_results[\"mean\"] = sum(acc_list) / len(acc_list)\n results.update(t_results)\n print(results)\n # print mean\n print(f\"\\n\\n===== mean acc: {sum(acc_list)/len(acc_list)} =====\\n\\n\")\n\n return results" }, { "identifier": "get_calib_data", "path": "datautils.py", "snippet": "def get_calib_data(name, tokenizer, model_id, nsamples, seqlen=2048, seed=3):\n print(f\" get_ptq_calib_data {name}, nsamples={nsamples}, seqlen={seqlen}, {seed}\")\n cache_file = (\n f\"cache/{name}_{model_id.replace('/','_')}_{nsamples}_{seqlen}_{seed}.pt\"\n )\n if not os.path.exists(\"cache\"):\n os.makedirs(\"cache\")\n if os.path.exists(cache_file):\n traindataset = torch.load(cache_file)\n return traindataset\n if name == \"c4\":\n traindata = load_dataset(\n \"allenai/c4\",\n \"allenai--c4\",\n data_files={\"train\": \"en/c4-train.00000-of-01024.json.gz\"},\n split=\"train\",\n )\n tot_text = \"\\n\\n\".join(traindata[\"text\"])\n elif name == \"wikitext2\":\n traindata = load_dataset(\"wikitext\", \"wikitext-2-raw-v1\", split=\"train\")\n tot_text = \"\\n\\n\".join(traindata[\"text\"])\n else:\n raise NotImplementedError\n print(f\"tot_text={len(tot_text)}\")\n traindataset = []\n for _ in range(nsamples):\n i = random.randint(0, len(tot_text) - seqlen - 1)\n j = i + seqlen * 10\n trainenc = tokenizer(tot_text[i:j], return_tensors=\"pt\")\n inp = trainenc.input_ids[:, :seqlen]\n attention_mask = torch.ones_like(inp)\n traindataset.append({\"input_ids\": inp, \"attention_mask\": attention_mask})\n torch.save(traindataset, cache_file)\n return traindataset" }, { "identifier": "calib_input_distribution", "path": "act_aware_utils.py", "snippet": "@torch.no_grad()\ndef calib_input_distribution(model, calib_loader, method, use_cache=True):\n model_id = model.config._name_or_path\n cache_file = (\n f\"cache/{model_id.replace('/','_')}_calib_input_distribution_{method}.pt\"\n )\n if os.path.exists(cache_file) and use_cache:\n all_scaling_diag_matrix = torch.load(cache_file, map_location=\"cpu\")\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module.scaling_diag_matrix = all_scaling_diag_matrix[name].to(\n module.weight.device\n )\n return\n model.eval()\n # set hook for every Linear layers\n\n def hook(module, input, output):\n if \"abs_mean\" in method:\n abs_mean = input[0].abs().mean(dim=-2).detach().view(-1)\n module.scaling_diag_matrix += abs_mean\n elif \"abs_max\" in method:\n abs_max = input[0].abs().amax(dim=-2).detach().view(-1)\n module.scaling_diag_matrix = torch.where(\n abs_max > module.scaling_diag_matrix,\n abs_max,\n module.scaling_diag_matrix,\n )\n # abs_max = input[0].abs().amax(dim=-2).detach().view(-1)\n # module.scaling_diag_matrix += abs_max\n\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module.scaling_diag_matrix = 0\n module.register_forward_hook(hook)\n\n # get activation distribution\n for batch in tqdm(calib_loader):\n # print(batch)\n batch = {k: v.to(model.device) for k, v in batch.items()}\n model(**batch)\n\n # remove and save scaling_diag_matrix\n all_scaling_diag_matrix = {}\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module._forward_hooks.clear()\n all_scaling_diag_matrix[name] = module.scaling_diag_matrix\n torch.save(all_scaling_diag_matrix, cache_file)" }, { "identifier": "calib_fisher_info", "path": "act_aware_utils.py", "snippet": "def calib_fisher_info(model, calib_loader, use_cache=True):\n model_id = model.config._name_or_path\n cache_file = f\"cache/{model_id.replace('/','_')}_calib_fisher_info.pt\"\n if os.path.exists(cache_file) and use_cache:\n all_fisher_info = torch.load(cache_file, map_location=\"cpu\")\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module.fisher_info = all_fisher_info[name].to(module.weight.device)\n return\n model.eval()\n\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module.fisher_info = 0\n\n # get fisher info\n for batch in tqdm(calib_loader):\n input_ids = batch[\"input_ids\"][:, :-1].to(model.device)\n labels = batch[\"input_ids\"][:, 1:].to(model.device)\n out = model(input_ids=input_ids, labels=labels)\n out[0].backward()\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module.fisher_info += module.weight.grad.detach().pow(2).mean(0)\n model.zero_grad()\n\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module.fisher_info = module.fisher_info.div(len(calib_loader)).sqrt()\n\n # remove and save fisher_info\n all_fisher_info = {}\n for name, module in model.named_modules():\n if isinstance(module, nn.Linear):\n module._forward_hooks.clear()\n all_fisher_info[name] = module.fisher_info\n torch.save(all_fisher_info, cache_file)" }, { "identifier": "calib_sensitivity_ppl", "path": "sensitivity.py", "snippet": "@torch.no_grad()\ndef calib_sensitivity_ppl(model, calib_loader, args, use_cache=True):\n model_id = model.config._name_or_path\n cache_file = f\"cache/{model_id.replace('/','_')}_sensitivity_{args.scaling_method}_{args.alpha}_{args.n_calib_samples}_{args.calib_dataset}.pt\"\n if os.path.exists(cache_file) and use_cache:\n sensitivity_dict = torch.load(cache_file, map_location=\"cpu\")\n return sensitivity_dict\n model.eval()\n\n full_name_dict = {module: name for name, module in model.named_modules()}\n linear_info = {}\n modules = [model]\n while len(modules) > 0:\n submodule = modules.pop()\n for name, raw_linear in submodule.named_children():\n if isinstance(raw_linear, nn.Linear):\n full_name = full_name_dict[raw_linear]\n linear_info[raw_linear] = {\n \"father\": submodule,\n \"name\": name,\n \"full_name\": full_name,\n }\n else:\n modules.append(raw_linear)\n\n sensitivity_dict = {}\n param_ratio_candidates = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n input_ids = torch.cat([_[\"input_ids\"] for _ in calib_loader], 0)\n print(f\"input_ids.shape={input_ids.shape}\")\n pbar = tqdm(total=len(linear_info) * len(param_ratio_candidates))\n for raw_linear, info in linear_info.items():\n sensitivity_dict[info[\"full_name\"]] = {}\n for param_ratio in param_ratio_candidates:\n svd_linear = SVDLinear.from_linear(\n raw_linear,\n param_ratio=param_ratio,\n alpha=args.alpha,\n act_aware=True,\n )\n setattr(info[\"father\"], info[\"name\"], svd_linear)\n\n ppl = evaluate_perplexity(model, input_ids, args.n_calib_samples)\n sensitivity_dict[info[\"full_name\"]][param_ratio] = ppl\n print(f\"{info['full_name']} {param_ratio} {ppl}\")\n pbar.update(1)\n setattr(info[\"father\"], info[\"name\"], raw_linear)\n torch.save(sensitivity_dict, cache_file)\n return sensitivity_dict" }, { "identifier": "calib_sensitivity_stable_rank", "path": "sensitivity.py", "snippet": "@torch.no_grad()\ndef calib_sensitivity_stable_rank(model, calib_loader, args, use_cache=True):\n model_id = model.config._name_or_path\n cache_file = f\"cache/{model_id.replace('/','_')}_sensitivity_stable_rank_{args.scaling_method}_{args.alpha}_{args.n_calib_samples}_{args.calib_dataset}.pt\"\n if os.path.exists(cache_file) and use_cache:\n sensitivity_dict = torch.load(cache_file, map_location=\"cpu\")\n return sensitivity_dict\n model.eval()\n\n full_name_dict = {module: name for name, module in model.named_modules()}\n linear_info = {}\n modules = [model]\n while len(modules) > 0:\n submodule = modules.pop()\n for name, raw_linear in submodule.named_children():\n if isinstance(raw_linear, nn.Linear):\n full_name = full_name_dict[raw_linear]\n linear_info[raw_linear] = {\n \"father\": submodule,\n \"name\": name,\n \"full_name\": full_name,\n }\n else:\n modules.append(raw_linear)\n\n sensitivity_dict = {}\n param_ratio_candidates = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n input_ids = torch.cat([_[\"input_ids\"] for _ in calib_loader], 0)\n print(f\"input_ids.shape={input_ids.shape}\")\n pbar = tqdm(total=len(linear_info) * len(param_ratio_candidates))\n for raw_linear, info in linear_info.items():\n sensitivity_dict[info[\"full_name\"]] = {}\n\n # stable rank is defined to be the ratio between squared Frobenius norm and the squared spectral norm of a matrix\n w=raw_linear.weight\n w=w#*raw_linear.scaling_diag_matrix.view(1,-1)**args.alpha\n w_fro=torch.norm(w, p='fro')**2\n _,singular_values,_=torch.svd(w.float(),compute_uv=False)\n spectral_norm=torch.max(singular_values)\n w_spec=spectral_norm**2\n sr=(w_fro/w_spec)**0.5\n\n for param_ratio in param_ratio_candidates:\n sensitivity_dict[info[\"full_name\"]][param_ratio] = -sr*param_ratio**0.1\n pbar.update(1)\n torch.save(sensitivity_dict, cache_file)\n return sensitivity_dict" }, { "identifier": "rtn_quant_sequential", "path": "quantization.py", "snippet": "@torch.no_grad()\ndef rtn_quant_sequential(model, wbits):\n print(\"Starting ...\")\n\n if \"opt\" in model.config._name_or_path:\n layers = model.model.decoder.layers\n elif \"llama\" in model.config._name_or_path:\n layers = model.model.layers\n for i in range(len(layers)):\n layer = layers[i].to(model.device)\n subset = find_layers(layer)\n for name in subset:\n quantizer = Quantizer()\n quantizer.configure(\n wbits, perchannel=True, sym=False, mse=False\n )\n quantizer.find_params(subset[name].weight.data.float(), weight=True)\n wq=quantizer.quantize(subset[name].weight.data.float())\n subset[name].weight.data = wq.to(subset[name].weight.data.dtype)\n print(f\"Quantizing {name} finished\")\n del layer\n torch.cuda.empty_cache()" }, { "identifier": "binary_search_truncation_rank", "path": "binary_search.py", "snippet": "def binary_search_truncation_rank(model, sensitivity_dict, calib_loader, args):\n module_dict = {name: module for name, module in model.named_modules()}\n full_name_dict = {module: name for name, module in model.named_modules()}\n linear_info = {}\n modules = [model]\n while len(modules) > 0:\n submodule = modules.pop()\n for name, raw_linear in submodule.named_children():\n if isinstance(raw_linear, nn.Linear):\n full_name = full_name_dict[raw_linear]\n linear_info[raw_linear] = {\n \"father\": submodule,\n \"name\": name,\n \"full_name\": full_name,\n }\n else:\n modules.append(raw_linear)\n\n sensitivity_list = []\n for layername, v in sensitivity_dict.items():\n for ratio, ppl in v.items():\n sensitivity_list.append((layername, ratio, ppl))\n sorted_sensitive_list = sorted(sensitivity_list, key=lambda x: -x[2])\n\n # binary search\n high = len(sorted_sensitive_list) - 1\n low = 0\n assert args.ppl_target > 0 or args.param_ratio_target > 0\n\n input_ids = torch.cat([_[\"input_ids\"] for _ in calib_loader], 0)\n while low < high:\n mid = (low + high) // 2\n layers_min_ratio = {layername: 1 for layername in sensitivity_dict.keys()}\n for layername, ratio, ppl in sorted_sensitive_list[mid:]:\n layers_min_ratio[layername] = min(layers_min_ratio[layername], ratio)\n tot_params = 0\n compress_params = 0\n if args.ppl_target > 0:\n for layername, ratio in layers_min_ratio.items():\n raw_linear = module_dict[layername]\n info = linear_info[raw_linear]\n svd_linear = SVDLinear.from_linear(\n raw_linear,\n param_ratio=ratio,\n alpha=args.alpha,\n act_aware=args.act_aware,\n sigma_fuse=args.sigma_fuse,\n )\n setattr(info[\"father\"], info[\"name\"], svd_linear)\n tot_params += raw_linear.weight.numel()\n compress_params += raw_linear.weight.numel() * ratio\n ppl = evaluate_perplexity(model, input_ids, args.n_calib_samples)\n param_ratio = compress_params / tot_params\n msg = f\"low={low} mid={mid}, high={high}, ppl={ppl}, param_ratio={param_ratio}\"\n print(msg)\n if ppl < args.ppl_target:\n high = mid\n else:\n low = mid + 1\n else:\n for layername, ratio in layers_min_ratio.items():\n raw_linear = module_dict[layername]\n tot_params += raw_linear.weight.numel()\n compress_params += raw_linear.weight.numel() * ratio\n param_ratio = compress_params / tot_params\n msg = f\"low={low} mid={mid}, high={high}, param_ratio={param_ratio}({compress_params}/{tot_params})\"\n print(msg)\n if param_ratio > args.param_ratio_target:\n high = mid\n else:\n low = mid + 1\n\n print(f\"Searching finished, decomposing layers...\")\n layers_min_ratio = {layername: 1 for layername in sensitivity_dict.keys()}\n for layername, ratio, ppl in sorted_sensitive_list[mid:]:\n layers_min_ratio[layername] = min(layers_min_ratio[layername], ratio)\n for layername, ratio in tqdm(layers_min_ratio.items()):\n # set ratio\n raw_linear = module_dict[layername]\n info = linear_info[raw_linear]\n svd_linear = SVDLinear.from_linear(\n raw_linear,\n param_ratio=ratio,\n alpha=args.alpha,\n act_aware=args.act_aware,\n sigma_fuse=args.sigma_fuse,\n )\n setattr(info[\"father\"], info[\"name\"], svd_linear)" }, { "identifier": "SVDLinear", "path": "modules/svd_linear.py", "snippet": "class SVDLinear(nn.Module):\n def __init__(self, U, S, V, bias=None,sigma_fuse='UV') -> None:\n super().__init__()\n self.ALinear = nn.Linear(U.size(1), U.size(0), bias=bias is not None)\n \n if bias is not None:\n self.ALinear.bias.data = bias\n self.BLinear = nn.Linear(V.size(1), V.size(0), bias=False)\n self.truncation_rank=S.size(0)\n if sigma_fuse == 'UV':\n self.ALinear.weight.data = U.mul(S.sqrt()).contiguous()\n self.BLinear.weight.data = V.t().mul(S.sqrt().view(-1, 1)).contiguous()\n elif sigma_fuse == 'U':\n self.ALinear.weight.data = U.mul(S).contiguous()\n self.BLinear.weight.data = V.t().contiguous()\n elif sigma_fuse == 'V':\n self.ALinear.weight.data = U.contiguous()\n self.BLinear.weight.data = V.t().mul(S.view(-1, 1)).contiguous()\n \n\n @staticmethod\n def from_linear(\n linear: nn.Linear,\n param_ratio: float,\n act_aware=False,\n ic_split=1,\n oc_split=1,\n alpha=1,\n sigma_fuse=\"UV\"\n ):\n if param_ratio >= 1:\n return linear\n n_params = linear.weight.numel()\n compressed_params = int(n_params * param_ratio)\n assert ic_split == 1 or oc_split == 1\n rank = compressed_params // (linear.in_features + linear.out_features)\n # print(\"rank\", rank)\n w = linear.weight.data.float()\n if act_aware:\n scaling_diag_matrix = 1 # avoid zero division\n if hasattr(linear, \"scaling_diag_matrix\"):\n # print(\"WARNING: scaling_diag_matrix is used\")\n scaling_diag_matrix *= linear.scaling_diag_matrix**alpha\n # scaling_diag_matrix *= linear.scaling_diag_matrix**0.5\n if hasattr(linear, \"fisher_info\"):\n scaling_diag_matrix *= linear.fisher_info**alpha\n # scaling_diag_matrix *= linear.fisher_info**1\n # if not (scaling_diag_matrix == scaling_diag_matrix).all():\n # breakpoint()\n scaling_diag_matrix += 1e-6 # avoid zero division\n w = w * scaling_diag_matrix.view(1, -1)\n Us = []\n Ss = []\n Vs = []\n try:\n U, S, V = torch.svd_lowrank(w, q=rank)\n except:\n print(f\"svd failed for {linear}, disable act_aware\")\n return (\n nn.Linear(linear.in_features, linear.out_features)\n .to(linear.weight.dtype)\n .to(linear.weight.device)\n )\n if act_aware:\n V = V / scaling_diag_matrix.view(-1, 1)\n Us = [U]\n Ss = [S]\n Vs = [V]\n\n if linear.bias is not None:\n bias = linear.bias.data\n else:\n bias = None\n\n # nan or inf check\n for S in Ss:\n if (S!=S).any():\n print(\"nan in S\")\n return (\n nn.Linear(linear.in_features, linear.out_features)\n .to(linear.weight.dtype)\n .to(linear.weight.device)\n )\n for U in Us:\n if (U!=U).any():\n print(\"nan in U\")\n return (\n nn.Linear(linear.in_features, linear.out_features)\n .to(linear.weight.dtype)\n .to(linear.weight.device)\n )\n for V in Vs:\n if (V!=V).any():\n print(\"nan in V\")\n return (\n nn.Linear(linear.in_features, linear.out_features)\n .to(linear.weight.dtype)\n .to(linear.weight.device)\n )\n\n assert len(Us) == len(Ss) == len(Vs) == 1\n new_linear = SVDLinear(Us[0], Ss[0], Vs[0], bias,sigma_fuse)\n return new_linear.to(linear.weight.dtype)\n\n def forward(self, inp):\n # compute USV^Tx + b\n y = self.BLinear(inp)\n y = self.ALinear(y)\n return y" } ]
import sys import argparse import torch import os import json from transformers import AutoModelForCausalLM, AutoTokenizer, OPTForCausalLM from transformers.models.opt.configuration_opt import OPTConfig from evaluate import evaluate_model from datautils import get_calib_data from act_aware_utils import calib_input_distribution, calib_fisher_info from sensitivity import calib_sensitivity_ppl, calib_sensitivity_stable_rank from quantization import rtn_quant_sequential from binary_search import binary_search_truncation_rank from modules.svd_linear import SVDLinear
6,422
sys.path.append(".") def main(args): model_id = args.model_id # Load model tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True ) # sensitivity calibration calib_loader = get_calib_data(args.calib_dataset, tokenizer, model_id, 256) if "fisher" in args.scaling_method:
sys.path.append(".") def main(args): model_id = args.model_id # Load model tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True ) # sensitivity calibration calib_loader = get_calib_data(args.calib_dataset, tokenizer, model_id, 256) if "fisher" in args.scaling_method:
calib_fisher_info(model, calib_loader, args.use_cache)
3
2023-11-10 02:18:36+00:00
8k
chaiNNer-org/spandrel
src/spandrel/architectures/GFPGAN/arch/gfpganv1_arch.py
[ { "identifier": "FusedLeakyReLU", "path": "src/spandrel/architectures/GFPGAN/arch/fused_act.py", "snippet": "class FusedLeakyReLU(nn.Module):\n def __init__(self, channel, negative_slope=0.2, scale=2**0.5):\n super().__init__()\n\n self.bias = nn.Parameter(torch.zeros(channel))\n self.negative_slope = negative_slope\n self.scale = scale\n\n def forward(self, input):\n return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)" }, { "identifier": "ConvLayer", "path": "src/spandrel/architectures/GFPGAN/arch/stylegan2_arch.py", "snippet": "class ConvLayer(nn.Sequential):\n \"\"\"Conv Layer used in StyleGAN2 Discriminator.\n\n Args:\n in_channels (int): Channel number of the input.\n out_channels (int): Channel number of the output.\n kernel_size (int): Kernel size.\n downsample (bool): Whether downsample by a factor of 2.\n Default: False.\n resample_kernel (list[int]): A list indicating the 1D resample\n kernel magnitude. A cross production will be applied to\n extent 1D resample kernel to 2D resample kernel.\n Default: (1, 3, 3, 1).\n bias (bool): Whether with bias. Default: True.\n activate (bool): Whether use activateion. Default: True.\n \"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size,\n downsample=False,\n resample_kernel=(1, 3, 3, 1),\n bias=True,\n activate=True,\n ):\n layers = []\n # downsample\n if downsample:\n layers.append(\n UpFirDnSmooth(\n resample_kernel,\n upsample_factor=1,\n downsample_factor=2,\n kernel_size=kernel_size,\n )\n )\n stride = 2\n self.padding = 0\n else:\n stride = 1\n self.padding = kernel_size // 2\n # conv\n layers.append(\n EqualConv2d(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=self.padding,\n bias=bias and not activate,\n )\n )\n # activation\n if activate:\n if bias:\n layers.append(FusedLeakyReLU(out_channels))\n else:\n layers.append(ScaledLeakyReLU(0.2))\n\n super().__init__(*layers)" }, { "identifier": "EqualConv2d", "path": "src/spandrel/architectures/GFPGAN/arch/stylegan2_arch.py", "snippet": "class EqualConv2d(nn.Module):\n \"\"\"Equalized Linear as StyleGAN2.\n\n Args:\n in_channels (int): Channel number of the input.\n out_channels (int): Channel number of the output.\n kernel_size (int): Size of the convolving kernel.\n stride (int): Stride of the convolution. Default: 1\n padding (int): Zero-padding added to both sides of the input.\n Default: 0.\n bias (bool): If ``True``, adds a learnable bias to the output.\n Default: ``True``.\n bias_init_val (float): Bias initialized value. Default: 0.\n \"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n bias=True,\n bias_init_val=0,\n ):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.scale = 1 / math.sqrt(in_channels * kernel_size**2)\n\n self.weight = nn.Parameter(\n torch.randn(out_channels, in_channels, kernel_size, kernel_size)\n )\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))\n else:\n self.register_parameter(\"bias\", None)\n\n def forward(self, x):\n out = F.conv2d(\n x,\n self.weight * self.scale,\n bias=self.bias,\n stride=self.stride,\n padding=self.padding,\n )\n\n return out\n\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}(in_channels={self.in_channels}, \"\n f\"out_channels={self.out_channels}, \"\n f\"kernel_size={self.kernel_size},\"\n f\" stride={self.stride}, padding={self.padding}, \"\n f\"bias={self.bias is not None})\"\n )" }, { "identifier": "EqualLinear", "path": "src/spandrel/architectures/GFPGAN/arch/stylegan2_arch.py", "snippet": "class EqualLinear(nn.Module):\n \"\"\"Equalized Linear as StyleGAN2.\n\n Args:\n in_channels (int): Size of each sample.\n out_channels (int): Size of each output sample.\n bias (bool): If set to ``False``, the layer will not learn an additive\n bias. Default: ``True``.\n bias_init_val (float): Bias initialized value. Default: 0.\n lr_mul (float): Learning rate multiplier. Default: 1.\n activation (None | str): The activation after ``linear`` operation.\n Supported: 'fused_lrelu', None. Default: None.\n \"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n bias=True,\n bias_init_val=0,\n lr_mul=1,\n activation=None,\n ):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.lr_mul = lr_mul\n self.activation = activation\n if self.activation not in [\"fused_lrelu\", None]:\n raise ValueError(\n f\"Wrong activation value in EqualLinear: {activation}\"\n \"Supported ones are: ['fused_lrelu', None].\"\n )\n self.scale = (1 / math.sqrt(in_channels)) * lr_mul\n\n self.weight = nn.Parameter(torch.randn(out_channels, in_channels).div_(lr_mul))\n if bias:\n self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val))\n else:\n self.register_parameter(\"bias\", None)\n\n def forward(self, x):\n if self.bias is None:\n bias = None\n else:\n bias = self.bias * self.lr_mul\n if self.activation == \"fused_lrelu\":\n out = F.linear(x, self.weight * self.scale)\n out = fused_leaky_relu(out, bias)\n else:\n out = F.linear(x, self.weight * self.scale, bias=bias)\n return out\n\n def __repr__(self):\n return (\n f\"{self.__class__.__name__}(in_channels={self.in_channels}, \"\n f\"out_channels={self.out_channels}, bias={self.bias is not None})\"\n )" }, { "identifier": "ResBlock", "path": "src/spandrel/architectures/GFPGAN/arch/stylegan2_arch.py", "snippet": "class ResBlock(nn.Module):\n \"\"\"Residual block used in StyleGAN2 Discriminator.\n\n Args:\n in_channels (int): Channel number of the input.\n out_channels (int): Channel number of the output.\n resample_kernel (list[int]): A list indicating the 1D resample\n kernel magnitude. A cross production will be applied to\n extent 1D resample kernel to 2D resample kernel.\n Default: (1, 3, 3, 1).\n \"\"\"\n\n def __init__(self, in_channels, out_channels, resample_kernel=(1, 3, 3, 1)):\n super().__init__()\n\n self.conv1 = ConvLayer(in_channels, in_channels, 3, bias=True, activate=True)\n self.conv2 = ConvLayer(\n in_channels,\n out_channels,\n 3,\n downsample=True,\n resample_kernel=resample_kernel,\n bias=True,\n activate=True,\n )\n self.skip = ConvLayer(\n in_channels,\n out_channels,\n 1,\n downsample=True,\n resample_kernel=resample_kernel,\n bias=False,\n activate=False,\n )\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.conv2(out)\n skip = self.skip(x)\n out = (out + skip) / math.sqrt(2)\n return out" }, { "identifier": "ScaledLeakyReLU", "path": "src/spandrel/architectures/GFPGAN/arch/stylegan2_arch.py", "snippet": "class ScaledLeakyReLU(nn.Module):\n \"\"\"Scaled LeakyReLU.\n\n Args:\n negative_slope (float): Negative slope. Default: 0.2.\n \"\"\"\n\n def __init__(self, negative_slope=0.2):\n super().__init__()\n self.negative_slope = negative_slope\n\n def forward(self, x):\n out = F.leaky_relu(x, negative_slope=self.negative_slope)\n return out * math.sqrt(2)" }, { "identifier": "StyleGAN2Generator", "path": "src/spandrel/architectures/GFPGAN/arch/stylegan2_arch.py", "snippet": "class StyleGAN2Generator(nn.Module):\n \"\"\"StyleGAN2 Generator.\n\n Args:\n out_size (int): The spatial size of outputs.\n num_style_feat (int): Channel number of style features. Default: 512.\n num_mlp (int): Layer number of MLP style layers. Default: 8.\n channel_multiplier (int): Channel multiplier for large networks of\n StyleGAN2. Default: 2.\n resample_kernel (list[int]): A list indicating the 1D resample kernel\n magnitude. A cross production will be applied to extent 1D resample\n kernel to 2D resample kernel. Default: (1, 3, 3, 1).\n lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01.\n narrow (float): Narrow ratio for channels. Default: 1.0.\n \"\"\"\n\n def __init__(\n self,\n out_size,\n num_style_feat=512,\n num_mlp=8,\n channel_multiplier=2,\n resample_kernel=(1, 3, 3, 1),\n lr_mlp=0.01,\n narrow=1,\n ):\n super().__init__()\n # Style MLP layers\n self.num_style_feat = num_style_feat\n style_mlp_layers = [NormStyleCode()]\n for i in range(num_mlp):\n style_mlp_layers.append(\n EqualLinear(\n num_style_feat,\n num_style_feat,\n bias=True,\n bias_init_val=0,\n lr_mul=lr_mlp,\n activation=\"fused_lrelu\",\n )\n )\n self.style_mlp = nn.Sequential(*style_mlp_layers)\n\n channels = {\n \"4\": int(512 * narrow),\n \"8\": int(512 * narrow),\n \"16\": int(512 * narrow),\n \"32\": int(512 * narrow),\n \"64\": int(256 * channel_multiplier * narrow),\n \"128\": int(128 * channel_multiplier * narrow),\n \"256\": int(64 * channel_multiplier * narrow),\n \"512\": int(32 * channel_multiplier * narrow),\n \"1024\": int(16 * channel_multiplier * narrow),\n }\n self.channels = channels\n\n self.constant_input = ConstantInput(channels[\"4\"], size=4)\n self.style_conv1 = StyleConv(\n channels[\"4\"],\n channels[\"4\"],\n kernel_size=3,\n num_style_feat=num_style_feat,\n demodulate=True,\n sample_mode=None,\n resample_kernel=resample_kernel,\n )\n self.to_rgb1 = ToRGB(\n channels[\"4\"],\n num_style_feat,\n upsample=False,\n resample_kernel=resample_kernel,\n )\n\n self.log_size = int(math.log(out_size, 2))\n self.num_layers = (self.log_size - 2) * 2 + 1\n self.num_latent = self.log_size * 2 - 2\n\n self.style_convs = nn.ModuleList()\n self.to_rgbs = nn.ModuleList()\n self.noises = nn.Module()\n\n in_channels = channels[\"4\"]\n # noise\n for layer_idx in range(self.num_layers):\n resolution = 2 ** ((layer_idx + 5) // 2)\n shape = [1, 1, resolution, resolution]\n self.noises.register_buffer(f\"noise{layer_idx}\", torch.randn(*shape))\n # style convs and to_rgbs\n for i in range(3, self.log_size + 1):\n out_channels = channels[f\"{2**i}\"]\n self.style_convs.append(\n StyleConv(\n in_channels,\n out_channels,\n kernel_size=3,\n num_style_feat=num_style_feat,\n demodulate=True,\n sample_mode=\"upsample\",\n resample_kernel=resample_kernel,\n )\n )\n self.style_convs.append(\n StyleConv(\n out_channels,\n out_channels,\n kernel_size=3,\n num_style_feat=num_style_feat,\n demodulate=True,\n sample_mode=None,\n resample_kernel=resample_kernel,\n )\n )\n self.to_rgbs.append(\n ToRGB(\n out_channels,\n num_style_feat,\n upsample=True,\n resample_kernel=resample_kernel,\n )\n )\n in_channels = out_channels\n\n def make_noise(self):\n \"\"\"Make noise for noise injection.\"\"\"\n device = self.constant_input.weight.device\n noises = [torch.randn(1, 1, 4, 4, device=device)]\n\n for i in range(3, self.log_size + 1):\n for _ in range(2):\n noises.append(torch.randn(1, 1, 2**i, 2**i, device=device))\n\n return noises\n\n def get_latent(self, x):\n return self.style_mlp(x)\n\n def mean_latent(self, num_latent):\n latent_in = torch.randn(\n num_latent, self.num_style_feat, device=self.constant_input.weight.device\n )\n latent = self.style_mlp(latent_in).mean(0, keepdim=True)\n return latent\n\n def forward(\n self,\n styles,\n input_is_latent=False,\n noise=None,\n randomize_noise=True,\n truncation=1,\n truncation_latent=None,\n inject_index=None,\n return_latents=False,\n ):\n \"\"\"Forward function for StyleGAN2Generator.\n\n Args:\n styles (list[Tensor]): Sample codes of styles.\n input_is_latent (bool): Whether input is latent style.\n Default: False.\n noise (Tensor | None): Input noise or None. Default: None.\n randomize_noise (bool): Randomize noise, used when 'noise' is\n False. Default: True.\n truncation (float): TODO. Default: 1.\n truncation_latent (Tensor | None): TODO. Default: None.\n inject_index (int | None): The injection index for mixing noise.\n Default: None.\n return_latents (bool): Whether to return style latents.\n Default: False.\n \"\"\"\n # style codes -> latents with Style MLP layer\n if not input_is_latent:\n styles = [self.style_mlp(s) for s in styles]\n # noises\n if noise is None:\n if randomize_noise:\n noise = [None] * self.num_layers # for each style conv layer\n else: # use the stored noise\n noise = [\n getattr(self.noises, f\"noise{i}\") for i in range(self.num_layers)\n ]\n # style truncation\n if truncation < 1:\n style_truncation = []\n for style in styles:\n style_truncation.append(\n truncation_latent + truncation * (style - truncation_latent)\n )\n styles = style_truncation\n # get style latent with injection\n if len(styles) == 1:\n inject_index = self.num_latent\n\n if styles[0].ndim < 3:\n # repeat latent code for all the layers\n latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)\n else: # used for encoder with different latent code for each layer\n latent = styles[0]\n elif len(styles) == 2: # mixing noises\n if inject_index is None:\n inject_index = random.randint(1, self.num_latent - 1)\n latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1)\n latent2 = (\n styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1)\n )\n latent = torch.cat([latent1, latent2], 1)\n\n # main generation\n out = self.constant_input(latent.shape[0])\n out = self.style_conv1(out, latent[:, 0], noise=noise[0])\n skip = self.to_rgb1(out, latent[:, 1])\n\n i = 1\n for conv1, conv2, noise1, noise2, to_rgb in zip(\n self.style_convs[::2],\n self.style_convs[1::2],\n noise[1::2],\n noise[2::2],\n self.to_rgbs,\n ):\n out = conv1(out, latent[:, i], noise=noise1)\n out = conv2(out, latent[:, i + 1], noise=noise2)\n skip = to_rgb(out, latent[:, i + 2], skip)\n i += 2\n\n image = skip\n\n if return_latents:\n return image, latent\n else:\n return image, None" } ]
import math import random import torch from torch import nn from torch.nn import functional as F from .fused_act import FusedLeakyReLU from .stylegan2_arch import ( ConvLayer, EqualConv2d, EqualLinear, ResBlock, ScaledLeakyReLU, StyleGAN2Generator, )
6,247
latent2 = ( styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) ) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip( self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs, ): out = conv1(out, latent[:, i], noise=noise1) # the conditions may have fewer levels if i < len(conditions): # SFT part to combine the conditions if self.sft_half: # only apply SFT to half of the channels out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) out_sft = out_sft * conditions[i - 1] + conditions[i] out = torch.cat([out_same, out_sft], dim=1) else: # apply SFT to all the channels out = out * conditions[i - 1] + conditions[i] out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None class ConvUpLayer(nn.Module): """Convolutional upsampling layer. It uses bilinear upsampler + Conv. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. kernel_size (int): Size of the convolving kernel. stride (int): Stride of the convolution. Default: 1 padding (int): Zero-padding added to both sides of the input. Default: 0. bias (bool): If ``True``, adds a learnable bias to the output. Default: ``True``. bias_init_val (float): Bias initialized value. Default: 0. activate (bool): Whether use activateion. Default: True. """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, bias_init_val=0, activate=True, ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding # self.scale is used to scale the convolution weights, which is related to the common initializations. self.scale = 1 / math.sqrt(in_channels * kernel_size**2) self.weight = nn.Parameter( torch.randn(out_channels, in_channels, kernel_size, kernel_size) ) if bias and not activate: self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val)) else: self.register_parameter("bias", None) # activation if activate: if bias: self.activation = FusedLeakyReLU(out_channels) else: self.activation = ScaledLeakyReLU(0.2) else: self.activation = None def forward(self, x): # bilinear upsample out = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=False) # conv out = F.conv2d( out, self.weight * self.scale, bias=self.bias, stride=self.stride, padding=self.padding, ) # activation if self.activation is not None: out = self.activation(out) return out class ResUpBlock(nn.Module): """Residual block with upsampling. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. """ def __init__(self, in_channels, out_channels): super().__init__()
# type: ignore class StyleGAN2GeneratorSFT(StyleGAN2Generator): """StyleGAN2 Generator with SFT modulation (Spatial Feature Transform). Args: out_size (int): The spatial size of outputs. num_style_feat (int): Channel number of style features. Default: 512. num_mlp (int): Layer number of MLP style layers. Default: 8. channel_multiplier (int): Channel multiplier for large networks of StyleGAN2. Default: 2. resample_kernel (list[int]): A list indicating the 1D resample kernel magnitude. A cross production will be applied to extent 1D resample kernel to 2D resample kernel. Default: (1, 3, 3, 1). lr_mlp (float): Learning rate multiplier for mlp layers. Default: 0.01. narrow (float): The narrow ratio for channels. Default: 1. sft_half (bool): Whether to apply SFT on half of the input channels. Default: False. """ def __init__( self, out_size, num_style_feat=512, num_mlp=8, channel_multiplier=2, resample_kernel=(1, 3, 3, 1), lr_mlp=0.01, narrow=1, sft_half=False, ): super().__init__( out_size, num_style_feat=num_style_feat, num_mlp=num_mlp, channel_multiplier=channel_multiplier, resample_kernel=resample_kernel, lr_mlp=lr_mlp, narrow=narrow, ) self.sft_half = sft_half def forward( self, styles, conditions, input_is_latent=False, noise=None, randomize_noise=True, truncation=1, truncation_latent=None, inject_index=None, return_latents=False, ): """Forward function for StyleGAN2GeneratorSFT. Args: styles (list[Tensor]): Sample codes of styles. conditions (list[Tensor]): SFT conditions to generators. input_is_latent (bool): Whether input is latent style. Default: False. noise (Tensor | None): Input noise or None. Default: None. randomize_noise (bool): Randomize noise, used when 'noise' is False. Default: True. truncation (float): The truncation ratio. Default: 1. truncation_latent (Tensor | None): The truncation latent tensor. Default: None. inject_index (int | None): The injection index for mixing noise. Default: None. return_latents (bool): Whether to return style latents. Default: False. """ # style codes -> latents with Style MLP layer if not input_is_latent: styles = [self.style_mlp(s) for s in styles] # noises if noise is None: if randomize_noise: noise = [None] * self.num_layers # for each style conv layer else: # use the stored noise noise = [ getattr(self.noises, f"noise{i}") for i in range(self.num_layers) ] # style truncation if truncation < 1: style_truncation = [] for style in styles: style_truncation.append( truncation_latent + truncation * (style - truncation_latent) ) styles = style_truncation # get style latents with injection if len(styles) == 1: inject_index = self.num_latent if styles[0].ndim < 3: # repeat latent code for all the layers latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) else: # used for encoder with different latent code for each layer latent = styles[0] elif len(styles) == 2: # mixing noises if inject_index is None: inject_index = random.randint(1, self.num_latent - 1) latent1 = styles[0].unsqueeze(1).repeat(1, inject_index, 1) latent2 = ( styles[1].unsqueeze(1).repeat(1, self.num_latent - inject_index, 1) ) latent = torch.cat([latent1, latent2], 1) # main generation out = self.constant_input(latent.shape[0]) out = self.style_conv1(out, latent[:, 0], noise=noise[0]) skip = self.to_rgb1(out, latent[:, 1]) i = 1 for conv1, conv2, noise1, noise2, to_rgb in zip( self.style_convs[::2], self.style_convs[1::2], noise[1::2], noise[2::2], self.to_rgbs, ): out = conv1(out, latent[:, i], noise=noise1) # the conditions may have fewer levels if i < len(conditions): # SFT part to combine the conditions if self.sft_half: # only apply SFT to half of the channels out_same, out_sft = torch.split(out, int(out.size(1) // 2), dim=1) out_sft = out_sft * conditions[i - 1] + conditions[i] out = torch.cat([out_same, out_sft], dim=1) else: # apply SFT to all the channels out = out * conditions[i - 1] + conditions[i] out = conv2(out, latent[:, i + 1], noise=noise2) skip = to_rgb(out, latent[:, i + 2], skip) # feature back to the rgb space i += 2 image = skip if return_latents: return image, latent else: return image, None class ConvUpLayer(nn.Module): """Convolutional upsampling layer. It uses bilinear upsampler + Conv. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. kernel_size (int): Size of the convolving kernel. stride (int): Stride of the convolution. Default: 1 padding (int): Zero-padding added to both sides of the input. Default: 0. bias (bool): If ``True``, adds a learnable bias to the output. Default: ``True``. bias_init_val (float): Bias initialized value. Default: 0. activate (bool): Whether use activateion. Default: True. """ def __init__( self, in_channels, out_channels, kernel_size, stride=1, padding=0, bias=True, bias_init_val=0, activate=True, ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.kernel_size = kernel_size self.stride = stride self.padding = padding # self.scale is used to scale the convolution weights, which is related to the common initializations. self.scale = 1 / math.sqrt(in_channels * kernel_size**2) self.weight = nn.Parameter( torch.randn(out_channels, in_channels, kernel_size, kernel_size) ) if bias and not activate: self.bias = nn.Parameter(torch.zeros(out_channels).fill_(bias_init_val)) else: self.register_parameter("bias", None) # activation if activate: if bias: self.activation = FusedLeakyReLU(out_channels) else: self.activation = ScaledLeakyReLU(0.2) else: self.activation = None def forward(self, x): # bilinear upsample out = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=False) # conv out = F.conv2d( out, self.weight * self.scale, bias=self.bias, stride=self.stride, padding=self.padding, ) # activation if self.activation is not None: out = self.activation(out) return out class ResUpBlock(nn.Module): """Residual block with upsampling. Args: in_channels (int): Channel number of the input. out_channels (int): Channel number of the output. """ def __init__(self, in_channels, out_channels): super().__init__()
self.conv1 = ConvLayer(in_channels, in_channels, 3, bias=True, activate=True)
1
2023-11-17 01:11:47+00:00
8k
Mj23978/OpenServer
openserver/core/llm_models/llm_model_factory.py
[ { "identifier": "ChatTogetherModel", "path": "openserver/core/llm_models/together.py", "snippet": "class ChatTogetherModel(BaseChat):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = ChatTogetherLLM(\n together_api_key=input.api_key,\n model=input.model_name if input.model_name else \"Open-Orca/Mistral-7B-OpenOrca\",\n top_p=input.top_p,\n top_k=input.top_k,\n temperature=input.temperature,\n max_tokens=input.max_tokens,\n stop=input.stop,\n cache=input.cache,\n verbose=input.verbose,\n callbacks=input.callbacks,\n metadata=input.metadata,\n ) # type: ignore\n\n def compelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n messages=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = await self.client.agenerate(messages=prompts, callbacks=callbacks, metadata=metadata)\n return result" }, { "identifier": "TogetherModel", "path": "openserver/core/llm_models/together.py", "snippet": "class TogetherModel(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = TogetherLLM(\n together_api_key=input.api_key,\n model=input.model_name if input.model_name else \"Open-Orca/Mistral-7B-OpenOrca\",\n top_p=input.top_p,\n top_k=input.top_k,\n temperature=input.temperature,\n max_tokens=input.max_tokens,\n stop=input.stop,\n cache=input.cache,\n verbose=input.verbose,\n callbacks=input.callbacks,\n metadata=input.metadata,\n ) # type: ignore\n\n def compelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n prompts=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None):\n result = await self.client.agenerate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result" }, { "identifier": "AI21Model", "path": "openserver/core/llm_models/ai21.py", "snippet": "class AI21Model(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = AI21(\n ai21_api_key=input.api_key,\n model=input.model_name if input.model_name else \"j2-jumbo-instruct\",\n temperature=input.temperature,\n maxTokens=input.max_tokens,\n topP=input.top_p,\n stop=input.stop,\n cache=input.cache,\n verbose=input.verbose,\n callback_manager=cast(BaseCallbackManager, input.callback_manager),\n metadata=input.metadata,\n ) # type: ignore\n\n def compelete(self, prompts: List[str], callbacks: Callbacks | List[Callbacks] = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(prompts=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[str], callbacks: Callbacks | List[Callbacks] = None, metadata: Dict[str, Any] | None = None):\n result = await self.client.agenerate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result" }, { "identifier": "ChatFireworksModel", "path": "openserver/core/llm_models/fireworks.py", "snippet": "class ChatFireworksModel(BaseChat):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = ChatFireworks(\n fireworks_api_key=input.api_key,\n model=input.model_name if input.model_name else \"accounts/fireworks/models/llama-v2-13b-code-instruct\",\n max_retries=input.max_retries,\n cache=input.cache,\n verbose=input.verbose,\n callbacks=input.callbacks,\n metadata=input.metadata,\n model_kwargs={\"temperature\": input.temperature,\n \"max_tokens\": input.max_tokens, \"top_p\": input.top_p}\n ) # type: ignore\n\n def compelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n messages=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = await self.client.agenerate(messages=prompts, callbacks=callbacks, metadata=metadata)\n return result" }, { "identifier": "FireworksModel", "path": "openserver/core/llm_models/fireworks.py", "snippet": "class FireworksModel(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = Fireworks(\n fireworks_api_key=input.api_key,\n model=input.model_name if input.model_name else \"accounts/cresta-ai/models/openorca-7b-fast\",\n max_retries=input.max_retries,\n cache=input.cache,\n verbose=input.verbose,\n callbacks=input.callbacks,\n metadata=input.metadata,\n model_kwargs={\"temperature\": input.temperature,\n \"max_tokens\": input.max_tokens, \"top_p\": input.top_p}\n ) # type: ignore\n\n def compelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n prompts=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None):\n result = await self.client.agenerate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result" }, { "identifier": "ChatGooglePalmModel", "path": "openserver/core/llm_models/palm.py", "snippet": "class ChatGooglePalmModel(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = ChatGooglePalm(\n google_api_key=input.api_key,\n model_name=input.model_name if input.model_name else \"models/chat-bison-001\",\n top_p=input.top_p,\n top_k=input.top_k,\n temperature=input.temperature,\n # max_output_tokens=input.max_tokens, \n cache=input.cache, \n verbose=input.verbose,\n callbacks=cast(BaseCallbackManager, input.callbacks),\n metadata=input.metadata,\n ) # type: ignore\n\n def compelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n messages=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = await self.client.agenerate(messages=prompts, callbacks=callbacks, metadata=metadata)\n return result" }, { "identifier": "GooglePalmModel", "path": "openserver/core/llm_models/palm.py", "snippet": "class GooglePalmModel(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = GooglePalm(\n google_api_key=input.api_key,\n model_name=input.model_name if input.model_name else \"models/text-bison-001\",\n top_p=input.top_p,\n top_k=input.top_k,\n temperature=input.temperature,\n # max_output_tokens=input.max_tokens, \n cache=input.cache, \n verbose=input.verbose,\n callbacks=cast(BaseCallbackManager, input.callbacks),\n metadata=input.metadata,\n ) # type: ignore\n\n def compelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(prompts=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None):\n result = await self.client.agenerate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result" }, { "identifier": "LLmInputInterface", "path": "openserver/core/llm_models/base.py", "snippet": "class LLmInputInterface:\n def __init__(self, model: str, api_key: str | None = None, stop: List[str] = [\"### Humen:\", \"### Instruction:\", \"### Assistant:\", \"\\nQuestion:\"], max_tokens=4196, repeat_penalty=0.2,\n responses: List[str] | None = None, top_k=30, top_p=0.95, streaming: bool = False, temperature=0.2, cache=True, verbose=True, max_retries=10, n_ctx: int = 2048, f16_kv=True, \n n_gpu_layers: int = 50, n_threads=4, metadata: Dict[str, Any] | None = None, callbacks: Callbacks | None = None, grammer: str | LlamaGrammar | None = None, \n grammer_path: str | Path | None = None, model_kwargs={}, base_url: str | None = None):\n self.api_key: str | None = api_key\n self.model_name: str = model\n self.model_kwargs: Dict[str, Any] = model_kwargs\n self.stop: List[str] = stop\n self.max_tokens: int = max_tokens\n self.repeat_penalty: float = repeat_penalty\n self.top_k: int = top_k\n self.top_p: float = top_p\n self.temperature: float = temperature\n self.cache: bool = cache\n self.verbose: bool = verbose\n self.max_retries: int = max_retries\n self.responses: List[str] | None = responses\n self.stream: bool = streaming\n self.n_ctx: int = n_ctx\n self.f16_kv: bool = f16_kv\n self.n_gpu_layers: int = n_gpu_layers\n self.n_threads: int = n_threads\n self.grammer: str | LlamaGrammar | None = grammer\n self.grammer_path: str | Path | None = grammer_path\n self.callbacks: Callbacks = callbacks\n self.metadata = metadata\n self.base_url = base_url" }, { "identifier": "LLMType", "path": "openserver/core/llm_models/base.py", "snippet": "class LLMType(Enum):\n AI21 = \"ai21\"\n COHERE = \"cohere\"\n FAKE = \"fake\"\n FIREWORKS = \"fireworks\"\n FREE = \"free\"\n LLAMACPP = \"llamacpp\"\n OPENAI = \"openai\"\n PALM = \"palm\"\n TOGETHER = \"together\"\n\n @classmethod\n def get_type(cls, type: str):\n type_enum_value = None\n for enum_value in LLMType:\n if type == enum_value.value:\n type_enum_value = enum_value\n break\n return type_enum_value or cls.FREE" }, { "identifier": "ChatCohereModel", "path": "openserver/core/llm_models/cohere.py", "snippet": "class ChatCohereModel(BaseChat):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = ChatCohere(\n cohere_api_key=input.api_key,\n model=input.model_name if input.model_name else \"command\",\n temperature=input.temperature,\n stop=input.stop,\n cache=input.cache,\n verbose=input.verbose,\n callbacks=input.callbacks,\n metadata=input.metadata,\n ) # type: ignore\n\n def compelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n messages=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = await self.client.agenerate(messages=prompts, callbacks=callbacks, metadata=metadata)\n return result" }, { "identifier": "CohereModel", "path": "openserver/core/llm_models/cohere.py", "snippet": "class CohereModel(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = Cohere(\n cohere_api_key=input.api_key,\n model=input.model_name if input.model_name else \"command\",\n k=input.top_k,\n temperature=input.temperature,\n max_tokens=input.max_tokens,\n frequency_penalty=input.repeat_penalty,\n max_retries=input.max_retries,\n stop=input.stop,\n cache=input.cache,\n verbose=input.verbose,\n callbacks=input.callbacks,\n metadata=input.metadata,\n ) # type: ignore\n\n def compelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n prompts=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None):\n result = await self.client.agenerate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result" }, { "identifier": "ChatOpenAIModel", "path": "openserver/core/llm_models/openai.py", "snippet": "class ChatOpenAIModel(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = ChatOpenAI(\n api_key=input.api_key,\n base_url=input.base_url or get_config(\"OPENAI_BASE_URL\"),\n model=input.model_name if input.model_name else \"gpt-3.5-turbo\",\n temperature=input.temperature,\n max_tokens=input.max_tokens,\n max_retries=input.max_retries,\n cache=input.cache,\n streaming=input.stream,\n verbose=input.verbose,\n callbacks=input.callbacks,\n metadata=input.metadata,\n ) # type: ignore\n\n def compelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n messages=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = await self.client.agenerate(messages=prompts, callbacks=callbacks, metadata=metadata)\n return result" }, { "identifier": "OpenAIModel", "path": "openserver/core/llm_models/openai.py", "snippet": "class OpenAIModel(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = OpenAI(\n api_key=input.api_key,\n base_url=input.base_url or get_config(\"OPENAI_BASE_URL\"),\n model=input.model_name if input.model_name else \"gpt-3.5-turbo\",\n batch_size=input.top_k,\n top_p=input.top_p,\n temperature=input.temperature,\n max_tokens=input.max_tokens,\n frequency_penalty=input.repeat_penalty,\n max_retries=input.max_retries,\n cache=input.cache,\n streaming=input.stream,\n verbose=input.verbose,\n callbacks=input.callbacks,\n metadata=input.metadata,\n ) # type: ignore\n\n def compelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n prompts=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None):\n result = await self.client.agenerate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result" }, { "identifier": "LlamaCppModel", "path": "openserver/core/llm_models/llama_cpp.py", "snippet": "class LlamaCppModel(BaseLlmModel):\n def __init__(self, inp: LLmInputInterface) -> None:\n LlamaCpp.update_forward_refs()\n if inp.grammer is not None:\n inp.f16_kv=True\n self.client = LlamaCpp(\n model_path=inp.model_name,\n top_k=inp.top_k,\n grammar=inp.grammer,\n grammar_path=inp.grammer_path,\n model_kwargs=inp.model_kwargs,\n top_p=inp.top_p,\n n_ctx=inp.n_ctx,\n f16_kv=inp.f16_kv,\n temperature=inp.temperature,\n n_gpu_layers=inp.n_gpu_layers,\n max_tokens=inp.max_tokens,\n stop=inp.stop,\n cache=inp.cache,\n streaming=inp.stream,\n verbose=True,\n callbacks=inp.callbacks,\n ) # type: ignore\n\n def compelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result\n\n async def acompelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None):\n result = await self.client.agenerate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result" }, { "identifier": "ChatG4FModel", "path": "openserver/core/llm_models/gf4.py", "snippet": "class ChatG4FModel(BaseChat):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = ChatG4FLLM(\n model=input.model_name if input.model_name else \"gpt-3.5-turbo\",\n max_retries=input.max_retries,\n cache=input.cache,\n verbose=input.verbose,\n streaming=input.stream,\n metadata=input.metadata,\n callbacks=input.callbacks,\n ) # type: ignore\n\n def compelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n messages=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = await self.client.agenerate(messages=prompts, callbacks=callbacks, metadata=metadata)\n return result" }, { "identifier": "G4FModel", "path": "openserver/core/llm_models/gf4.py", "snippet": "class G4FModel(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = G4FLLM(\n model=input.model_name if input.model_name else \"gpt-3.5-turbo\",\n max_retries=input.max_retries,\n cache=input.cache,\n verbose=input.verbose,\n streaming=input.stream,\n metadata=input.metadata,\n callbacks=input.callbacks,\n ) # type: ignore\n\n def compelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result = self.client.generate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result\n\n async def acompelete(self, prompts: List[str], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None):\n result = await self.client.agenerate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result" }, { "identifier": "FakeChatModel", "path": "openserver/core/llm_models/fake.py", "snippet": "class FakeChatModel(BaseChat):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = FakeListChatModel(\n responses=input.responses if input.responses else [],\n cache=input.cache,\n verbose=input.verbose,\n callbacks=CallbackManager(\n handlers=[StreamingStdOutCallbackHandler()]),\n ) # type: ignore\n\n def compelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(\n messages=prompts, callbacks=callbacks, metadata=metadata)\n return result\n\n async def acompelete(self, prompts: List[List[BaseMessage]], callbacks: Callbacks = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = await self.client.agenerate(messages=prompts, callbacks=callbacks, metadata=metadata)\n return result" }, { "identifier": "FakeModel", "path": "openserver/core/llm_models/fake.py", "snippet": "class FakeModel(BaseLlmModel):\n def __init__(self, input: LLmInputInterface) -> None:\n self.client = FakeListLLM(\n responses=input.responses if input.responses else [],\n cache=input.cache,\n verbose=input.verbose,\n callbacks=CallbackManager(\n handlers=[StreamingStdOutCallbackHandler()]),\n ) # type: ignore\n\n def compelete(self, prompts: List[str], callbacks: Callbacks | List[Callbacks] = None, metadata: Dict[str, Any] | None = None) -> LLMResult:\n result: LLMResult = self.client.generate(prompts=prompts)\n return result\n\n async def acompelete(self, prompts: List[str], callbacks: Callbacks | List[Callbacks] = None, metadata: Dict[str, Any] | None = None):\n result = await self.client.agenerate(prompts=prompts, metadata=metadata, callbacks=callbacks)\n return result" } ]
from .together import ChatTogetherModel, TogetherModel from .ai21 import AI21Model from .fireworks import ChatFireworksModel, FireworksModel from .palm import ChatGooglePalmModel, GooglePalmModel from .base import LLmInputInterface, LLMType from .cohere import ChatCohereModel, CohereModel from .openai import ChatOpenAIModel, OpenAIModel from .llama_cpp import LlamaCppModel from .gf4 import ChatG4FModel, G4FModel from .fake import FakeChatModel, FakeModel
5,503
class LLMFactory: @classmethod def get_model(cls, input: LLmInputInterface, provider_name: LLMType | str = 'free'): if isinstance(provider_name, str): provider_name = LLMType.get_type(provider_name.lower()) if provider_name == LLMType.OPENAI:
class LLMFactory: @classmethod def get_model(cls, input: LLmInputInterface, provider_name: LLMType | str = 'free'): if isinstance(provider_name, str): provider_name = LLMType.get_type(provider_name.lower()) if provider_name == LLMType.OPENAI:
return OpenAIModel(input)
12
2023-11-11 00:32:31+00:00
8k
motexture/VSeq2VSeq
models/unet_blocks.py
[ { "identifier": "TemporalConvLayer", "path": "models/resnet.py", "snippet": "class TemporalConvLayer(nn.Module):\n def __init__(self, in_dim, out_dim=None, dropout=0.0):\n super().__init__()\n\n out_dim = out_dim or in_dim\n self.in_dim = in_dim\n self.out_dim = out_dim\n\n self.conv1 = nn.Sequential(\n nn.GroupNorm(32, in_dim), \n nn.SiLU(), \n nn.Conv3d(in_dim, out_dim, (3, 1, 1), padding=(1, 0, 0))\n )\n self.conv2 = nn.Sequential(\n nn.GroupNorm(32, out_dim),\n nn.SiLU(),\n nn.Dropout(dropout),\n nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n self.conv3 = nn.Sequential(\n nn.GroupNorm(32, out_dim),\n nn.SiLU(),\n nn.Dropout(dropout),\n nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n self.conv4 = nn.Sequential(\n nn.GroupNorm(32, out_dim),\n nn.SiLU(),\n nn.Dropout(dropout),\n nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n\n nn.init.zeros_(self.conv4[-1].weight)\n nn.init.zeros_(self.conv4[-1].bias)\n\n def forward(self, hidden_states, num_frames=1):\n hidden_states = (\n hidden_states[None, :].reshape((-1, num_frames) + hidden_states.shape[1:]).permute(0, 2, 1, 3, 4)\n )\n identity = hidden_states\n\n hidden_states = self.conv1(hidden_states)\n hidden_states = self.conv2(hidden_states)\n hidden_states = self.conv3(hidden_states)\n hidden_states = self.conv4(hidden_states)\n\n hidden_states = identity + hidden_states\n\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(\n (hidden_states.shape[0] * hidden_states.shape[2], -1) + hidden_states.shape[3:]\n ) \n return hidden_states" }, { "identifier": "Downsample2D", "path": "models/resnet.py", "snippet": "class Downsample2D(nn.Module):\n def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name=\"conv\"):\n super().__init__()\n\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.padding = padding\n stride = 2\n self.name = name\n\n self.conv = Conditioner(self.channels, self.out_channels, 3, stride=stride, padding=padding)\n\n def forward(self, hidden_states, conditioning_hidden_states):\n assert hidden_states.shape[1] == self.channels\n\n if self.use_conv and self.padding == 0:\n pad = (0, 1, 0, 1)\n hidden_states = F.pad(hidden_states, pad, mode=\"constant\", value=0)\n\n assert hidden_states.shape[1] == self.channels\n \n hidden_states, conditioning_hidden_states = self.conv(hidden_states, conditioning_hidden_states=conditioning_hidden_states)\n\n return hidden_states, conditioning_hidden_states " }, { "identifier": "ResnetBlock2D", "path": "models/resnet.py", "snippet": "class ResnetBlock2D(nn.Module):\n def __init__(\n self,\n *,\n in_channels,\n out_channels=None,\n conv_shortcut=False,\n dropout=0.0,\n temb_channels=512,\n groups=32,\n pre_norm=True,\n eps=1e-6,\n output_scale_factor=1.0,\n up=False,\n down=False,\n conv_shortcut_bias: bool = True\n ):\n super().__init__()\n\n self.pre_norm = pre_norm\n self.pre_norm = True\n self.in_channels = in_channels\n out_channels = in_channels if out_channels is None else out_channels\n self.out_channels = out_channels\n self.use_conv_shortcut = conv_shortcut\n self.up = up\n self.down = down\n self.output_scale_factor = output_scale_factor\n\n self.hidden_norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n self.hidden_silu1 = nn.SiLU()\n\n self.conditioning_norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n self.conditioning_silu1 = nn.SiLU()\n\n self.conv1 = Conditioner(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n \n self.hidden_time_emb_silu = nn.SiLU()\n self.hidden_time_emb_proj = torch.nn.Linear(temb_channels, out_channels)\n\n self.conditioning_time_emb_silu = nn.SiLU()\n self.conditioning_time_emb_proj = torch.nn.Linear(temb_channels, out_channels)\n \n self.hidden_dropout = torch.nn.Dropout(dropout)\n self.hidden_norm2 = torch.nn.GroupNorm(num_groups=groups, num_channels=out_channels, eps=eps, affine=True)\n self.hidden_silu2 = nn.SiLU()\n\n self.conditioning_dropout = torch.nn.Dropout(dropout)\n self.conditioning_norm2 = torch.nn.GroupNorm(num_groups=groups, num_channels=out_channels, eps=eps, affine=True)\n self.conditioning_silu2 = nn.SiLU()\n\n self.conv2 = Conditioner(out_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n self.upsample = self.downsample = None\n\n if self.up:\n self.upsample = Upsample2D(in_channels, use_conv=False)\n elif self.down:\n self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name=\"op\")\n\n self.conv_shortcut = None\n if self.in_channels != self.out_channels:\n self.conv_shortcut = Conditioner(\n in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=conv_shortcut_bias\n )\n\n def forward(self, input_tensor, conditioning_input_tensor, h_emb, c_emb):\n hidden_states = input_tensor\n conditioning_hidden_states = conditioning_input_tensor\n\n hidden_states = self.hidden_norm1(hidden_states)\n hidden_states = self.hidden_silu1(hidden_states)\n\n conditioning_hidden_states = self.conditioning_norm1(conditioning_hidden_states)\n conditioning_hidden_states = self.conditioning_silu1(conditioning_hidden_states)\n\n if self.upsample is not None:\n if hidden_states.shape[0] >= 64:\n input_tensor = input_tensor.contiguous()\n hidden_states = hidden_states.contiguous()\n \n conditioning_input_tensor = conditioning_input_tensor.contiguous()\n conditioning_hidden_states = conditioning_hidden_states.contiguous()\n\n input_tensor, conditioning_input_tensor = self.upsample(input_tensor, conditioning_hidden_states=conditioning_input_tensor)\n hidden_states, conditioning_hidden_states = self.upsample(hidden_states, conditioning_hidden_states=conditioning_hidden_states)\n elif self.downsample is not None:\n input_tensor, conditioning_input_tensor = self.downsample(input_tensor, conditioning_hidden_states=conditioning_input_tensor)\n hidden_states, conditioning_hidden_states = self.downsample(hidden_states, conditioning_hidden_states=conditioning_hidden_states)\n\n hidden_states, conditioning_hidden_states = self.conv1(hidden_states, conditioning_hidden_states=conditioning_hidden_states)\n\n h_emb = self.hidden_time_emb_silu(h_emb)\n h_emb = self.hidden_time_emb_proj(h_emb)[:, :, None, None]\n hidden_states = hidden_states + h_emb\n\n c_emb = self.conditioning_time_emb_silu(c_emb)\n c_emb = torch.sigmoid(self.conditioning_time_emb_proj(c_emb)[:, :, None, None])\n conditioning_hidden_states = conditioning_hidden_states * c_emb\n\n hidden_states = self.hidden_norm2(hidden_states)\n hidden_states = self.hidden_silu2(hidden_states)\n\n conditioning_hidden_states = self.conditioning_norm2(conditioning_hidden_states)\n conditioning_hidden_states = self.conditioning_silu2(conditioning_hidden_states)\n\n hidden_states = self.hidden_dropout(hidden_states)\n conditioning_hidden_states = self.conditioning_dropout(conditioning_hidden_states)\n\n hidden_states, conditioning_hidden_states = self.conv2(hidden_states, conditioning_hidden_states=conditioning_hidden_states)\n\n if self.conv_shortcut is not None:\n input_tensor, conditioning_input_tensor = self.conv_shortcut(input_tensor, conditioning_hidden_states=conditioning_input_tensor)\n\n output_tensor = (input_tensor + hidden_states) / self.output_scale_factor\n output_conditioning_hidden_states = (conditioning_input_tensor + conditioning_hidden_states) / self.output_scale_factor\n\n return output_tensor, output_conditioning_hidden_states" }, { "identifier": "Upsample2D", "path": "models/resnet.py", "snippet": "class Upsample2D(nn.Module):\n def __init__(self, channels, use_conv=False, out_channels=None, name=\"conv\"):\n super().__init__()\n\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.name = name\n\n self.conv = Conditioner(self.channels, self.out_channels, 3, padding=1)\n\n def forward(self, hidden_states, conditioning_hidden_states, output_size=None):\n assert hidden_states.shape[1] == self.channels\n\n dtype = hidden_states.dtype\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(torch.float32)\n conditioning_hidden_states = conditioning_hidden_states.to(torch.float32)\n\n if hidden_states.shape[0] >= 64:\n hidden_states = hidden_states.contiguous()\n conditioning_hidden_states = conditioning_hidden_states.contiguous()\n\n if output_size is None:\n hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode=\"nearest\")\n conditioning_hidden_states = F.interpolate(conditioning_hidden_states, scale_factor=2.0, mode=\"nearest\")\n else:\n hidden_states = F.interpolate(hidden_states, size=output_size, mode=\"nearest\")\n conditioning_hidden_states = F.interpolate(conditioning_hidden_states, size=output_size, mode=\"nearest\")\n\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(dtype)\n conditioning_hidden_states = conditioning_hidden_states.to(dtype)\n\n hidden_states, conditioning_hidden_states = self.conv(hidden_states, conditioning_hidden_states=conditioning_hidden_states)\n\n return hidden_states, conditioning_hidden_states" }, { "identifier": "Transformer2DModel", "path": "models/transformers.py", "snippet": "class Transformer2DModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n only_cross_attention: bool = False,\n upcast_attention: bool = False,\n norm_type: str = \"layer_norm\",\n norm_elementwise_affine: bool = True,\n ):\n super().__init__()\n\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n if norm_type == \"layer_norm\" and num_embeds_ada_norm is not None:\n deprecation_message = (\n f\"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or\"\n \" incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config.\"\n \" Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect\"\n \" results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it\"\n \" would be very nice if you could open a Pull request for the `transformer/config.json` file\"\n )\n deprecate(\"norm_type!=num_embeds_ada_norm\", \"1.0.0\", deprecation_message, standard_warn=False)\n norm_type = \"ada_norm\"\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n attention_bias=attention_bias,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n norm_type=norm_type,\n norm_elementwise_affine=norm_elementwise_affine,\n )\n for d in range(num_layers)\n ]\n )\n\n # 4. Define output layers\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n class_labels: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ):\n # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.\n # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.\n # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.\n # expects mask of shape:\n # [batch, key_tokens]\n # adds singleton query_tokens dimension:\n # [batch, 1, key_tokens]\n # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:\n # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)\n # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)\n if attention_mask is not None and attention_mask.ndim == 2:\n # assume that mask is expressed as:\n # (1 = keep, 0 = discard)\n # convert mask into a bias that can be added to attention scores:\n # (keep = +0, discard = -10000.0)\n attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # convert encoder_attention_mask to a bias the same way we do for attention_mask\n if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:\n encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0\n encoder_attention_mask = encoder_attention_mask.unsqueeze(1)\n\n # 1. Input\n batch, _, height, width = hidden_states.shape\n\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n \n inner_dim = hidden_states.shape[1]\n\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)\n hidden_states = self.proj_in(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return Transformer2DModelOutput(sample=output)" }, { "identifier": "TransformerTemporalModel", "path": "models/transformers.py", "snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n norm_elementwise_affine: bool = True,\n double_self_attention: bool = True,\n ):\n super().__init__()\n\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n attention_bias=attention_bias,\n double_self_attention=double_self_attention,\n norm_elementwise_affine=norm_elementwise_affine,\n )\n for d in range(num_layers)\n ]\n )\n\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states,\n num_frames=1,\n encoder_hidden_states=None,\n timestep=None,\n class_labels=None,\n cross_attention_kwargs=None,\n return_dict: bool = True,\n ):\n # 1. Input\n batch_frames, channel, height, width = hidden_states.shape\n batch_size = batch_frames // num_frames\n\n residual = hidden_states\n\n hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width)\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4)\n\n hidden_states = self.norm(hidden_states)\n hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel)\n\n hidden_states = self.proj_in(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states[None, None, :]\n .reshape(batch_size, height, width, channel, num_frames)\n .permute(0, 3, 4, 1, 2)\n .contiguous()\n )\n hidden_states = hidden_states.reshape(batch_frames, channel, height, width)\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return TransformerTemporalModelOutput(sample=output)" }, { "identifier": "TransformerTemporalConditioningModel", "path": "models/transformers.py", "snippet": "class TransformerTemporalConditioningModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n only_cross_attention: bool = False,\n upcast_attention: bool = False,\n norm_type: str = \"layer_norm\",\n norm_elementwise_affine: bool = True,\n ):\n super().__init__()\n\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n if norm_type == \"layer_norm\" and num_embeds_ada_norm is not None:\n deprecation_message = (\n f\"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or\"\n \" incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config.\"\n \" Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect\"\n \" results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it\"\n \" would be very nice if you could open a Pull request for the `transformer/config.json` file\"\n )\n norm_type = \"ada_norm\"\n \n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.cond_norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.cond_proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=inner_dim,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n attention_bias=attention_bias,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n norm_type=norm_type,\n norm_elementwise_affine=norm_elementwise_affine,\n )\n for d in range(num_layers)\n ]\n )\n\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n conditioning_hidden_states: Optional[torch.Tensor] = None,\n num_frames: int = 16,\n return_dict: bool = True\n ):\n batch_frames_h, _, height_h, width_h = hidden_states.shape\n batch_frames_c, _, _, _ = conditioning_hidden_states.shape\n\n batch_h = batch_frames_h // num_frames\n hidden_states = rearrange(hidden_states, '(b f) c h w -> b c f h w', b=batch_h, f=num_frames)\n\n num_frames_c = batch_frames_c // batch_h\n conditioning_hidden_states = rearrange(conditioning_hidden_states, '(b f) c h w -> b c f h w', b=batch_h, f=num_frames_c)\n\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n conditioning_hidden_states = self.cond_norm(conditioning_hidden_states)\n\n hidden_states = rearrange(hidden_states, 'b c f h w -> (b h w) f c')\n conditioning_hidden_states = rearrange(conditioning_hidden_states, 'b c f h w -> (b h w) f c')\n\n hidden_states = self.proj_in(hidden_states)\n conditioning_hidden_states = self.cond_proj_in(conditioning_hidden_states)\n\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=conditioning_hidden_states,\n encoder_attention_mask=None\n )\n\n hidden_states = self.proj_out(hidden_states) \n hidden_states = rearrange(hidden_states, \"(b h w) f c -> b c f h w\", b=batch_h, h=height_h, w=width_h)\n\n hidden_states += residual\n hidden_states = rearrange(hidden_states, \"b c f h w -> (b f) c h w\")\n\n output = hidden_states\n\n if not return_dict:\n return (output, )\n\n return TransformerTemporalConditioningOutput(sample=output)" } ]
import torch from torch import nn from itertools import zip_longest from .resnet import TemporalConvLayer, Downsample2D, ResnetBlock2D, Upsample2D from .transformers import Transformer2DModel, TransformerTemporalModel, TransformerTemporalConditioningModel
6,994
# distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_down_block( down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, only_cross_attention=False, upcast_attention=False ): if down_block_type == "DownBlock3D": return DownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_groups=resnet_groups, downsample_padding=downsample_padding ) elif down_block_type == "CrossAttnDownBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D") return CrossAttnDownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None, only_cross_attention=False, upcast_attention=False ): if up_block_type == "UpBlock3D": return UpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_groups=resnet_groups ) elif up_block_type == "CrossAttnUpBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") return CrossAttnUpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention ) raise ValueError(f"{up_block_type} does not exist.") class UNetMidBlock3DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, cross_attention_dim=1280, upcast_attention=False, ): super().__init__() self.gradient_checkpointing = False self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) resnets = [
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_down_block( down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, only_cross_attention=False, upcast_attention=False ): if down_block_type == "DownBlock3D": return DownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_groups=resnet_groups, downsample_padding=downsample_padding ) elif down_block_type == "CrossAttnDownBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D") return CrossAttnDownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None, only_cross_attention=False, upcast_attention=False ): if up_block_type == "UpBlock3D": return UpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_groups=resnet_groups ) elif up_block_type == "CrossAttnUpBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") return CrossAttnUpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention ) raise ValueError(f"{up_block_type} does not exist.") class UNetMidBlock3DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, cross_attention_dim=1280, upcast_attention=False, ): super().__init__() self.gradient_checkpointing = False self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) resnets = [
ResnetBlock2D(
2
2023-11-14 09:09:09+00:00
8k
AI-sandbox/HyperFast
hyperfast/hyperfast.py
[ { "identifier": "config", "path": "hyperfast/config.py", "snippet": "" }, { "identifier": "seed_everything", "path": "hyperfast/utils.py", "snippet": "def seed_everything(seed: int):\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n torch.set_num_threads(1)" }, { "identifier": "transform_data_for_main_network", "path": "hyperfast/utils.py", "snippet": "def transform_data_for_main_network(X, cfg, rf, pca):\n with torch.no_grad():\n X = rf(X)\n if cfg.torch_pca:\n X = pca.transform(X)\n else:\n X = torch.from_numpy(pca.transform(X.cpu().numpy())).to(cfg.device)\n X = torch.clamp(X, -cfg.clip_data_value, cfg.clip_data_value)\n return X" }, { "identifier": "forward_main_network", "path": "hyperfast/utils.py", "snippet": "def forward_main_network(x, main_network):\n for n, layer in enumerate(main_network):\n if n % 2 == 0:\n residual_connection = x\n matrix, bias = layer\n x = torch.mm(x, matrix) + bias\n if n % 2 == 1 and n != len(main_network) - 1:\n x = x + residual_connection\n\n if n != len(main_network) - 1:\n x = F.relu(x)\n if n == len(main_network) - 2:\n intermediate_activations = x\n return x, intermediate_activations" }, { "identifier": "nn_bias_logits", "path": "hyperfast/utils.py", "snippet": "def nn_bias_logits(\n test_logits, test_samples, train_samples, train_labels, bias_param, n_classes\n):\n with torch.no_grad():\n nn = NN(train_samples, train_labels)\n preds = nn.predict(test_samples)\n preds_onehot = F.one_hot(preds, n_classes)\n test_logits[preds_onehot.bool()] += bias_param\n return test_logits" }, { "identifier": "fine_tune_main_network", "path": "hyperfast/utils.py", "snippet": "def fine_tune_main_network(\n cfg,\n X,\n y,\n n_classes,\n rf,\n pca,\n main_network_layers,\n nn_bias,\n device,\n optimize_steps,\n batch_size,\n):\n main_model = MainNetworkTrainable(\n cfg, n_classes, rf, pca, main_network_layers, nn_bias\n ).to(device)\n dataset = TensorDataset(X, y)\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.AdamW(main_model.parameters(), lr=cfg.lr)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, mode=\"min\", factor=0.1, patience=10, verbose=True\n )\n\n for step in range(optimize_steps):\n for inputs, targets in dataloader:\n optimizer.zero_grad()\n outputs = main_model(inputs, targets)\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n # print(f\"Step: [{step+1}/{optimize_steps}], Loss: {loss.item()}\")\n\n if scheduler is not None:\n if isinstance(scheduler, optim.lr_scheduler.ReduceLROnPlateau):\n scheduler.step(loss.item())\n else:\n scheduler.step()\n return main_model.get_main_network_parts()" }, { "identifier": "HyperFast", "path": "hyperfast/model.py", "snippet": "class HyperFast(nn.Module):\n def __init__(self, cfg):\n super().__init__()\n self.n_dims = cfg.n_dims\n self.max_categories = cfg.max_categories\n self.rf_size = cfg.rf_size\n self.torch_pca = cfg.torch_pca\n self.clip_data_value = cfg.clip_data_value\n self.hn_n_layers = cfg.hn_n_layers\n self.hn_hidden_size = cfg.hn_hidden_size\n self.main_n_layers = cfg.main_n_layers\n\n middle_layers = []\n for n in range(self.hn_n_layers - 2):\n middle_layers.append(nn.Linear(self.hn_hidden_size, self.hn_hidden_size))\n middle_layers.append(nn.ReLU())\n self.num_input_features_hn = self.n_dims + self.max_categories\n\n self.hypernetworks = nn.ModuleList()\n self.hn_emb_to_weights = nn.ModuleList()\n\n for n in range(self.main_n_layers - 1):\n if n > 0:\n self.num_input_features_hn = self.n_dims * 2 + self.max_categories\n num_input_features_hn = self.num_input_features_hn + self.n_dims * 2\n\n hn_layers = []\n hn_layers.append(nn.Linear(num_input_features_hn, self.hn_hidden_size))\n hn_layers.append(nn.ReLU())\n hn_layers = hn_layers + middle_layers\n\n self.hypernetworks.append(nn.Sequential(*hn_layers))\n self.output_size_hn = (self.n_dims + 1) * self.n_dims\n self.hn_emb_to_weights.append(\n nn.Linear(self.hn_hidden_size, self.output_size_hn)\n )\n\n hn_layers = []\n last_hn_output_size = self.n_dims + 1\n self.num_input_features_hn += self.n_dims * 2\n\n hn_layers.append(nn.Linear(self.num_input_features_hn, self.hn_hidden_size))\n hn_layers.append(nn.ReLU())\n hn_layers = hn_layers + middle_layers\n hn_layers.append(nn.Linear(self.hn_hidden_size, last_hn_output_size))\n self.hypernetworks.append(nn.Sequential(*hn_layers))\n self.nn_bias = nn.Parameter(torch.ones(2))\n\n def forward(self, X, y, n_classes):\n X = X.flatten(start_dim=1)\n rf_linear = nn.Linear(X.shape[1], self.rf_size, bias=False)\n nn.init.kaiming_normal_(rf_linear.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n rf_linear.weight.requires_grad = False\n rf = nn.Sequential(rf_linear, nn.ReLU()).to(X.device)\n with torch.no_grad():\n X = rf(X)\n if self.torch_pca:\n self.pca = TorchPCA(n_components=self.n_dims)\n else:\n self.pca = PCA(n_components=self.n_dims)\n if self.torch_pca:\n X = self.pca.fit_transform(X)\n else:\n X = torch.from_numpy(self.pca.fit_transform(X.cpu().numpy())).to(X.device)\n X = torch.clamp(X, -self.clip_data_value, self.clip_data_value)\n\n out = X\n pca_global_mean = torch.mean(out, axis=0)\n pca_perclass_mean = []\n for lab in range(n_classes):\n if torch.sum((y == lab)) > 0:\n class_mean = torch.mean(out[y == lab], dim=0, keepdim=True)\n else:\n class_mean = torch.mean(out, dim=0, keepdim=True)\n pca_perclass_mean.append(class_mean)\n pca_perclass_mean = torch.cat(pca_perclass_mean)\n\n pca_concat = []\n for ii, lab in enumerate(y):\n if pca_perclass_mean.ndim == 1:\n pca_perclass_mean = pca_perclass_mean.unsqueeze(0)\n if out.ndim == 1:\n out = out.unsqueeze(0)\n\n lab_index = lab.item() if torch.is_tensor(lab) else lab\n lab_index = min(lab_index, pca_perclass_mean.size(0) - 1)\n\n row = torch.cat((out[ii], pca_global_mean, pca_perclass_mean[lab_index]))\n pca_concat.append(row)\n pca_output = torch.vstack(pca_concat)\n y_onehot = F.one_hot(y, self.max_categories)\n\n main_network = []\n for n in range(self.main_n_layers - 1):\n if n > 0:\n data = torch.cat((out, pca_output, y_onehot), dim=1)\n else:\n data = torch.cat((pca_output, y_onehot), dim=1)\n if n % 2 == 0:\n residual_connection = out\n\n weights = get_main_weights(\n data, self.hypernetworks[n], self.hn_emb_to_weights[n]\n )\n out, main_linear_layer = forward_linear_layer(out, weights, self.n_dims)\n if n % 2 == 0:\n out = F.relu(out)\n else:\n out = out + residual_connection\n out = F.relu(out)\n main_network.append(main_linear_layer)\n data = torch.cat((out, pca_output, y_onehot), dim=1)\n weights_per_sample = get_main_weights(data, self.hypernetworks[-1])\n\n weights = []\n last_input_mean = []\n for lab in range(n_classes):\n if torch.sum((y == lab)) > 0:\n w = torch.mean(weights_per_sample[y == lab], dim=0, keepdim=True)\n input_mean = torch.mean(out[y == lab], dim=0, keepdim=True)\n else:\n w = torch.mean(weights_per_sample, dim=0, keepdim=True)\n input_mean = torch.mean(out, dim=0, keepdim=True)\n weights.append(w)\n last_input_mean.append(input_mean)\n weights = torch.cat(weights)\n last_input_mean = torch.cat(last_input_mean)\n weights[:, :-1] = weights[:, :-1] + last_input_mean\n weights = weights.T\n out, last_linear_layer = forward_linear_layer(out, weights, n_classes)\n main_network.append(last_linear_layer)\n\n return rf, self.pca, main_network" } ]
import os import math import torch import requests import numpy as np import pandas as pd import torch.nn.functional as F from torch import Tensor from types import SimpleNamespace from .config import config from sklearn.base import BaseEstimator from sklearn.utils.validation import check_X_y, check_array, check_is_fitted from sklearn.impute import SimpleImputer from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder, StandardScaler from .utils import ( seed_everything, transform_data_for_main_network, forward_main_network, nn_bias_logits, fine_tune_main_network, ) from .model import HyperFast
4,569
x_test = self._scaler.transform(x_test) return x_test def _initialize_fit_attributes(self): self._rfs = [] self._pcas = [] self._main_networks = [] self._X_preds = [] self._y_preds = [] def _sample_data(self, X, y): indices = torch.randperm(len(X))[: self.batch_size] X_pred, y_pred = X[indices].flatten(start_dim=1), y[indices] if X_pred.shape[0] < self._cfg.n_dims: n_repeats = math.ceil(self._cfg.n_dims / X_pred.shape[0]) X_pred = torch.repeat_interleave(X_pred, n_repeats, axis=0) y_pred = torch.repeat_interleave(y_pred, n_repeats, axis=0) return X_pred, y_pred def _store_network(self, rf, pca, main_network, X_pred, y_pred): self._rfs.append(rf) self._pcas.append(pca) self._main_networks.append(main_network) self._X_preds.append(X_pred) self._y_preds.append(y_pred) def fit(self, X, y, cat_features=[]): """ Generates a main model for the given data. Args: X (array-like): Input features. y (array-like): Target values. cat_features (list, optional): List of categorical features. Defaults to an empty list. """ seed_everything(self.seed) X, y = check_X_y(X, y) self._cat_features = cat_features self.n_features_in_ = X.shape[1] self.classes_ = np.unique(y) X, y = self._preprocess_fitting_data(X, y) self._initialize_fit_attributes() for n in range(self.n_ensemble): X_pred, y_pred = self._sample_data(X, y) self.n_classes_ = len(torch.unique(y_pred).cpu().numpy()) rf, pca, main_network = self._model(X_pred, y_pred, self.n_classes_) if self.optimization == "ensemble_optimize": rf, pca, main_network, self._model.nn_bias = fine_tune_main_network( self._cfg, X_pred, y_pred, self.n_classes_, rf, pca, main_network, self._model.nn_bias, self.device, self.optimize_steps, self.batch_size, ) self._store_network(rf, pca, main_network, X_pred, y_pred) if self.optimization == "optimize" and self.optimize_steps > 0: assert len(self._main_networks) == 1 ( self._rfs[0], self._pcas[0], self._main_networks[0], self._model.nn_bias, ) = fine_tune_main_network( self._cfg, X, y, self.n_classes_, self._rfs[0], self._pcas[0], self._main_networks[0], self._model.nn_bias, self.device, self.optimize_steps, self.batch_size, ) return self def predict_proba(self, X): check_is_fitted(self) X = check_array(X) X = self._preprocess_test_data(X) with torch.no_grad(): X = torch.Tensor(X).to(self.device) orig_X = X yhats = [] for jj in range(len(self._main_networks)): main_network = self._main_networks[jj] rf = self._rfs[jj] pca = self._pcas[jj] X_pred = self._X_preds[jj] y_pred = self._y_preds[jj] X_transformed = transform_data_for_main_network( X=X, cfg=self._cfg, rf=rf, pca=pca ) outputs, intermediate_activations = forward_main_network( X_transformed, main_network ) if self.nn_bias: X_pred_ = transform_data_for_main_network( X=X_pred, cfg=self._cfg, rf=rf, pca=pca ) outputs_pred, intermediate_activations_pred = forward_main_network( X_pred_, main_network ) for bb, bias in enumerate(self._model.nn_bias): if bb == 0:
class HyperFastClassifier(BaseEstimator): """ A scikit-learn-like interface for the HyperFast model. Attributes: device (str): Device to run the model on. n_ensemble (int): Number of ensemble models to use. batch_size (int): Size of the batch for weight prediction and ensembling. nn_bias (bool): Whether to use nearest neighbor bias. optimization (str): Strategy for optimization, can be None, 'optimize', or 'ensemble_optimize'. optimize_steps (int): Number of optimization steps. torch_pca (bool): Whether to use PyTorch-based PCA optimized for GPU (fast) or scikit-learn PCA (slower). seed (int): Random seed for reproducibility. """ def __init__( self, device="cuda:0", n_ensemble=16, batch_size=2048, nn_bias=False, optimization="ensemble_optimize", optimize_steps=64, torch_pca=True, seed=3, ): self.device = device self.n_ensemble = n_ensemble self.batch_size = batch_size self.nn_bias = nn_bias self.optimization = optimization self.optimize_steps = optimize_steps self.torch_pca = torch_pca self.seed = seed seed_everything(self.seed) self._cfg = self._load_config(config, self.device, self.torch_pca, self.nn_bias) self._model = self._initialize_model(self._cfg) def _load_config(self, config, device, torch_pca, nn_bias): cfg = SimpleNamespace(**config) cfg.device = device cfg.torch_pca = torch_pca cfg.nn_bias = nn_bias return cfg def _initialize_model(self, cfg): model = HyperFast(cfg).to(cfg.device) if not os.path.exists(cfg.model_path): self._download_model(cfg.model_url, cfg.model_path) try: print(f"Loading model from {cfg.model_path}...", flush=True) model.load_state_dict( torch.load(cfg.model_path, map_location=torch.device(cfg.device)) ) print(f"Model loaded from {cfg.model_path}", flush=True) except FileNotFoundError as e: raise FileNotFoundError(f"Model file not found at {cfg.model_path}") from e model.eval() return model def _download_model(self, url, local_path): print( f"Downloading model from {url}, since no model was found at {local_path}", flush=True, ) response = requests.get(url) if response.status_code == 200: with open(local_path, "wb") as f: f.write(response.content) print(f"Model downloaded and saved to {local_path}") else: raise ConnectionError(f"Failed to download the model from {url}") def _preprocess_fitting_data(self, x: np.ndarray, y: np.ndarray) -> np.ndarray: x = np.array(x, dtype=np.float32).copy() y = np.array(y, dtype=np.int64).copy() # Impute missing values for numerical features with the mean self._num_imputer = SimpleImputer(missing_values=np.nan, strategy="mean") self._all_feature_idxs = np.arange(x.shape[1]) self._numerical_feature_idxs = np.setdiff1d( self._all_feature_idxs, self._cat_features ) if len(self._numerical_feature_idxs) > 0: self._num_imputer.fit(x[:, self._numerical_feature_idxs]) x[:, self._numerical_feature_idxs] = self._num_imputer.transform( x[:, self._numerical_feature_idxs] ) if len(self._cat_features) > 0: # Impute missing values for categorical features with the most frequent category self.cat_imputer = SimpleImputer( missing_values=np.nan, strategy="most_frequent" ) self.cat_imputer.fit(x[:, self._cat_features]) x[:, self._cat_features] = self.cat_imputer.transform( x[:, self._cat_features] ) # One-hot encode categorical features x = pd.DataFrame(x) self.one_hot_encoder = ColumnTransformer( transformers=[ ( "cat", OneHotEncoder(sparse=False, handle_unknown="ignore"), self._cat_features, ) ], remainder="passthrough", ) self.one_hot_encoder.fit(x) x = self.one_hot_encoder.transform(x) # Standardize data self._scaler = StandardScaler() self._scaler.fit(x) x = self._scaler.transform(x) return torch.tensor(x, dtype=torch.float).to(self.device), torch.tensor( y, dtype=torch.long ).to(self.device) def _preprocess_test_data(self, x_test): x_test = np.array(x_test, dtype=np.float32).copy() # Impute missing values for numerical features with the mean if len(self._numerical_feature_idxs) > 0: x_test[:, self._numerical_feature_idxs] = self._num_imputer.transform( x_test[:, self._numerical_feature_idxs] ) if len(self._cat_features) > 0: # Impute missing values for categorical features with the most frequent category x_test[:, self._cat_features] = self.cat_imputer.transform( x_test[:, self._cat_features] ) # One-hot encode categorical features x_test = pd.DataFrame(x_test) x_test = self.one_hot_encoder.transform(x_test) # Standardize data x_test = self._scaler.transform(x_test) return x_test def _initialize_fit_attributes(self): self._rfs = [] self._pcas = [] self._main_networks = [] self._X_preds = [] self._y_preds = [] def _sample_data(self, X, y): indices = torch.randperm(len(X))[: self.batch_size] X_pred, y_pred = X[indices].flatten(start_dim=1), y[indices] if X_pred.shape[0] < self._cfg.n_dims: n_repeats = math.ceil(self._cfg.n_dims / X_pred.shape[0]) X_pred = torch.repeat_interleave(X_pred, n_repeats, axis=0) y_pred = torch.repeat_interleave(y_pred, n_repeats, axis=0) return X_pred, y_pred def _store_network(self, rf, pca, main_network, X_pred, y_pred): self._rfs.append(rf) self._pcas.append(pca) self._main_networks.append(main_network) self._X_preds.append(X_pred) self._y_preds.append(y_pred) def fit(self, X, y, cat_features=[]): """ Generates a main model for the given data. Args: X (array-like): Input features. y (array-like): Target values. cat_features (list, optional): List of categorical features. Defaults to an empty list. """ seed_everything(self.seed) X, y = check_X_y(X, y) self._cat_features = cat_features self.n_features_in_ = X.shape[1] self.classes_ = np.unique(y) X, y = self._preprocess_fitting_data(X, y) self._initialize_fit_attributes() for n in range(self.n_ensemble): X_pred, y_pred = self._sample_data(X, y) self.n_classes_ = len(torch.unique(y_pred).cpu().numpy()) rf, pca, main_network = self._model(X_pred, y_pred, self.n_classes_) if self.optimization == "ensemble_optimize": rf, pca, main_network, self._model.nn_bias = fine_tune_main_network( self._cfg, X_pred, y_pred, self.n_classes_, rf, pca, main_network, self._model.nn_bias, self.device, self.optimize_steps, self.batch_size, ) self._store_network(rf, pca, main_network, X_pred, y_pred) if self.optimization == "optimize" and self.optimize_steps > 0: assert len(self._main_networks) == 1 ( self._rfs[0], self._pcas[0], self._main_networks[0], self._model.nn_bias, ) = fine_tune_main_network( self._cfg, X, y, self.n_classes_, self._rfs[0], self._pcas[0], self._main_networks[0], self._model.nn_bias, self.device, self.optimize_steps, self.batch_size, ) return self def predict_proba(self, X): check_is_fitted(self) X = check_array(X) X = self._preprocess_test_data(X) with torch.no_grad(): X = torch.Tensor(X).to(self.device) orig_X = X yhats = [] for jj in range(len(self._main_networks)): main_network = self._main_networks[jj] rf = self._rfs[jj] pca = self._pcas[jj] X_pred = self._X_preds[jj] y_pred = self._y_preds[jj] X_transformed = transform_data_for_main_network( X=X, cfg=self._cfg, rf=rf, pca=pca ) outputs, intermediate_activations = forward_main_network( X_transformed, main_network ) if self.nn_bias: X_pred_ = transform_data_for_main_network( X=X_pred, cfg=self._cfg, rf=rf, pca=pca ) outputs_pred, intermediate_activations_pred = forward_main_network( X_pred_, main_network ) for bb, bias in enumerate(self._model.nn_bias): if bb == 0:
outputs = nn_bias_logits(
4
2023-11-14 05:56:47+00:00
8k
TCLResearchEurope/torch-dag
torch_dag/patterns_v2/transform.py
[ { "identifier": "DagModule", "path": "torch_dag/core/dag_module.py", "snippet": "class DagModule(torch.nn.Module):\n MAX_LEN_REPR = None\n\n def __init__(\n self,\n name: str,\n vertices: Optional[List[Vertex]] = None,\n output_vertex: Optional[InnerVertex] = None,\n ):\n super().__init__()\n self.name = name\n self.vertices = vertices if vertices is not None else []\n self.output_vertex = output_vertex\n self.forward_dict = None\n self.inputs_dict = None\n self.cache_forward_dict = False\n self._inner_modules = None\n self.forward_scaffold = {}\n self.output_index = None\n self.compiled = False\n self.update_inner_modules()\n\n def compile(self, inputs: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None):\n \"\"\"\n In general `forward` method of DagModule is not `torch.compile` friendly. To overcome that\n we need to use a modified implementation of the forward pass, with no cacheing of intermediate tensors.\n Additionally, some modules may require a compile-type step for `torch.compile` usage.\n :param inputs: optional input (a dummy tensor for a single forward pass)\n \"\"\"\n if inputs is not None:\n is_training = self.training\n if is_training:\n self.eval()\n _ = self(inputs)\n if is_training:\n self.train()\n\n self.forward_scaffold, self.output_index = self.get_forward_scaffold()\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n v.module.compile()\n self.compiled = True\n\n @property\n def inner_modules(self) -> torch.nn.ModuleList:\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n return self._inner_modules\n\n @property\n def input_vertices(self) -> List[InputVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InputVertex)]\n\n @property\n def inner_vertices(self) -> List[InnerVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InnerVertex)]\n\n def update_inner_modules(self):\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n for iv in self.inner_vertices:\n if isinstance(iv.module, DagModule):\n iv.module.update_inner_modules()\n\n def get_vertex_by_name(self, name: str) -> Union[InnerVertex, InputVertex]:\n result = [vertex for vertex in self.vertices if vertex.name == name]\n if len(result) == 1:\n return result[0]\n elif len(result) > 1:\n raise AssertionError(f'Multiple vertices found with name: {name} -> {result}')\n else:\n return\n\n def get_forward_scaffold(self):\n # a mapping between vertices index and its predecessors indices\n forward_scaffold = {}\n for k, vertex in enumerate(self.vertices):\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n predecessors_indices = [\n self.vertices.index(pd) for pd in predecessors\n ]\n forward_scaffold[k] = predecessors_indices\n\n output_index = self.vertices.index(self.output_vertex)\n\n return forward_scaffold, output_index\n\n def compiled_forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n\n assert self.compiled\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_list = [None for _ in range(len(self.vertices))]\n\n for k, input in enumerate(inputs):\n forward_list[k] = input\n\n num_inputs = len(inputs)\n\n for k in range(len(self.vertices)):\n if k < num_inputs:\n pass\n else:\n\n pd_indices = self.forward_scaffold[k]\n module_inputs = [forward_list[pd_index] for pd_index in pd_indices]\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n try:\n result = self.vertices[k].module(module_inputs)\n except (TypeError, AttributeError):\n result = self.vertices[k].module(*module_inputs)\n result = _postprocess_module_output(result)\n\n forward_list[k] = result\n\n return forward_list[self.output_index]\n\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n # this is for `torch.compile` usage\n if self.compiled:\n return self.compiled_forward(inputs)\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_dict = {}\n for k, v in enumerate(self.input_vertices):\n forward_dict[v] = inputs[k]\n\n # forward_dict = {vertex: tensor for vertex, tensor in zip(self.input_vertices, inputs)}\n inputs_dict = {}\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n module_inputs = [forward_dict[pd] for pd in predecessors]\n inputs_dict[vertex] = module_inputs\n\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n\n try:\n result = vertex.module(module_inputs)\n except (TypeError, AttributeError):\n result = vertex.module(*module_inputs)\n # if isinstance(result, Tuple):\n result = _postprocess_module_output(result)\n\n forward_dict[vertex] = result\n if self.cache_forward_dict:\n self.forward_dict = forward_dict\n self.inputs_dict = inputs_dict\n return forward_dict[self.output_vertex]\n\n def traverse(\n self,\n processor: VertexProcessor = None,\n ):\n if processor is None:\n inner_vertices = []\n for inner_vertex in self.inner_vertices:\n if isinstance(inner_vertex.module, DagModule):\n inner_vertices.extend(inner_vertex.module.traverse())\n inner_vertices.append(inner_vertex)\n return inner_vertices\n else:\n for inner_vertex in self.traverse():\n processor(inner_vertex)\n # TODO: Remove after validation\n # self._update_inner_modules()\n\n def _check_if_name_unique(self, name: str):\n if name in [v.name for v in self.vertices]:\n raise ValueError(\n f'{self.name} already has an Vertex with name {name}. Please use different name.'\n )\n\n def add_input_vertex(self, name: str) -> InputVertex:\n self._check_if_name_unique(name)\n input_vertex = InputVertex(name)\n self.vertices.append(input_vertex)\n return input_vertex\n\n def add_vertex(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ) -> InnerVertex:\n self._check_if_name_unique(name)\n assert isinstance(module, torch.nn.Module)\n\n inner_vertex = InnerVertex(\n name=name,\n module=module,\n predecessors=predecessors,\n )\n for predecessor in predecessors:\n if predecessor not in self.vertices:\n raise ValueError(f'The predecessor: {predecessor} of InnerVertex: {InnerVertex} is not in '\n f'the DagModule: {self.name}')\n self.vertices.append(inner_vertex)\n self.inner_modules.append(module)\n inner_vertex.dag_module = self\n return inner_vertex\n\n def __repr__(self):\n representation = f'{self.__class__.__name__}[{self.name}]'\n if len(self.vertices) == 0:\n return representation\n for inner_vertex in self.inner_vertices:\n inner_vertex.MAX_LEN_REPR = self.MAX_LEN_REPR\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n representation += f'\\n << {vertex.name} '\n else:\n index = self.inner_vertices.index(vertex)\n prefix = '>>' if vertex == self.output_vertex else '*'\n if isinstance(vertex.module, DagModule):\n prefix += '#'\n representation += f\"\\n{prefix} {index}: {vertex} \" \\\n f\"--> predecessors: {vertex.predecessors}, \" \\\n f\"successors: {vertex.successors}\"\n representation += f' {self.add_custom_module_info(vertex)}'\n for vertex in self.inner_vertices:\n vertex.MAX_LEN_REPR = None\n return representation\n\n def add_custom_module_info(self, vertex: InnerVertex):\n m = vertex.module\n if isinstance(m, torch.nn.Conv2d):\n return f'Conv2d(in={m.in_channels}, out={m.out_channels}, ks={m.kernel_size}, padding={m.padding})'\n if isinstance(m, torch.nn.Linear):\n return f'Linear(in={m.in_features}, out={m.out_features})'\n return ''\n\n def mark_current_top_vertex_as_output(self):\n if not self.inner_vertices:\n raise ValueError(f'One cannot mark top node in an empty {self}')\n if self.output_vertex is not None:\n logger.warning(f'{self} already has an output vertex. Replacing...')\n self.output_vertex = self.inner_vertices[-1]\n\n @property\n def module_classes(self) -> Set:\n return set([m.__class__ for m in self.inner_modules])\n\n def unroll_inner_modules(self) -> List[torch.nn.Module]:\n result = []\n for m in self.inner_modules:\n if not isinstance(m, DagModule):\n result.append(m)\n else:\n result.extend(m.unroll_inner_modules())\n return result\n\n def save(self, path: str):\n # TODO: Remove after validation\n # self._update_inner_modules()\n self.enforce_names_uniqueness()\n os.makedirs(path, exist_ok=True)\n atomic_modules = self.unroll_inner_modules()\n self.clear_custom_buffers()\n torch.save(torch.nn.ModuleList(atomic_modules), os.path.join(path, 'modules.pt'))\n with open(os.path.join(path, 'config.dict.json'), 'w') as f:\n json.dump(self.config_dict(), f)\n\n def clear_custom_buffers(self):\n for module in self.unroll_inner_modules():\n if hasattr(module, 'clear_custom_buffers'):\n module._buffers.clear()\n\n @classmethod\n def load(\n cls,\n path: str,\n map_location='cpu',\n custom_module_classes: Tuple[Type[torch.nn.Module]] = (),\n ) -> \"DagModule\":\n \"\"\"\n\n :param path: directory from which model should be loaded\n :param map_location: defaults to `cpu`\n :param custom_module_classes: custom torch module classes needed for loading a `DagModule` that was built\n using these modules\n \"\"\"\n with open(os.path.join(path, 'config.dict.json'), 'r') as f:\n config_dict = json.load(f)\n m = torch.load(os.path.join(path, 'modules.pt'), map_location=map_location)\n return cls.load_from_config_dict_and_atomic_modules(\n config_dict=config_dict,\n atomic_modules=m\n )\n\n @classmethod\n def load_from_config_dict_and_atomic_modules(\n cls,\n config_dict: Dict,\n atomic_modules: List[torch.nn.Module]\n ) -> \"DagModule\":\n output_index = config_dict.pop('output_index')\n name = config_dict.pop('name')\n if 'class' in config_dict:\n class_name = config_dict.pop('class')\n else:\n class_name = cls.__name__\n dag = None\n if class_name == cls.__name__:\n dag = cls(name)\n for subclass in cls.__subclasses__():\n if subclass.__name__ == class_name:\n dag = subclass(name)\n\n if dag is None:\n raise NotImplementedError(f'There is no subclass with name: {class_name}.')\n\n for k, (key, config) in enumerate(config_dict.items()):\n if config['type'] == 'input':\n dag.add_input_vertex(name=config['name'])\n else:\n predecessors = [dag.vertices[index] for index in config['predecessor_indices']]\n if config['is_atomic']:\n module = atomic_modules[config['module_index']]\n else:\n module = cls.load_from_config_dict_and_atomic_modules(\n config_dict=config['module_dict'],\n atomic_modules=atomic_modules,\n )\n vertex = dag.add_vertex(\n name=config['name'],\n module=module,\n predecessors=predecessors,\n )\n orbit = config.get('orbit')\n if orbit:\n vertex.orbit = orbit\n if k == output_index:\n dag.output_vertex = vertex\n\n return dag\n\n def config_dict(self, atomic_modules: List[torch.nn.Module] = None) -> Dict:\n if atomic_modules is None:\n atomic_modules = self.unroll_inner_modules()\n config_dict = {}\n for k, vertex in enumerate(self.vertices):\n _config = vertex.config_dict(atomic_modules)\n config_dict[k] = _config\n\n config_dict['name'] = self.name\n config_dict['class'] = self.__class__.__name__\n config_dict['output_index'] = self.vertices.index(self.output_vertex)\n return config_dict\n\n def _get_inner_vertex_predecessor_indices(self, inner_vertex: InnerVertex) -> List[int]:\n result = [\n self.vertices.index(predecessor)\n for predecessor in inner_vertex.predecessors\n ]\n return result\n\n @property\n def flat(self) -> bool:\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n return False\n return True\n\n def flatten(self, input_shape_for_verification: Optional[Tuple[int, ...]] = None) -> \"DagModule\":\n \"\"\"\n This method will switch the `dag` to `eval` mode if `input_shape_for_verification` is provided.\n :param input_shape_for_verification:\n :return:\n \"\"\"\n dag_copy = deepcopy(self)\n if self.flat:\n return dag_copy\n\n if input_shape_for_verification:\n dag_copy.eval()\n x = torch.normal(mean=torch.zeros(size=input_shape_for_verification))\n reference_output = dag_copy(x)\n\n # builds a new cell (not in place flatten)\n dag = self.__class__(name=dag_copy.name, vertices=dag_copy.input_vertices)\n for v in dag_copy.inner_vertices:\n if not isinstance(v.module, DagModule):\n dag.vertices.append(v)\n v.dag_module = dag\n if v == dag_copy.output_vertex:\n dag.output_vertex = v\n else:\n inner_dag_predecessors = v.predecessors\n inner_dag_successors = v.successors\n inner_dag = v.module.flatten()\n for iv in inner_dag.inner_vertices:\n for pd in iv.predecessors: # remap predecessors where needed\n if isinstance(pd, InputVertex):\n pd_index_in_inner_dag = inner_dag.input_vertices.index(pd)\n index = iv.predecessors.index(pd)\n iv.predecessors[index] = inner_dag_predecessors[pd_index_in_inner_dag]\n if inner_dag.output_vertex == iv: # remap output of inner dag\n for suc in inner_dag_successors:\n index = suc.predecessors.index(v)\n suc.predecessors[index] = iv\n iv.dag_module = dag\n dag.vertices.append(iv)\n if v == dag_copy.output_vertex:\n dag.output_vertex = iv\n assert all([e in dag.vertices for e in iv.predecessors])\n\n if input_shape_for_verification:\n dag.eval()\n new_output = dag(x)\n assert torch.abs(reference_output - new_output).sum() == 0.0\n\n # TODO: Remove after validation\n # self._update_inner_modules()\n dag.enforce_names_uniqueness()\n\n return dag\n\n def enforce_names_uniqueness(self):\n names = [v.name for v in self.vertices]\n while len(names) != len(set(names)):\n names_counter = Counter()\n for v in self.vertices:\n name = v.name\n names_counter[name] += 1\n if names_counter[name] > 1:\n new_name = f'{name}_{names_counter[name] - 1}'\n logger.debug(f'Renaming: {name} -> {new_name}')\n v.name = new_name\n names = [v.name for v in self.vertices]\n\n def clear_tensor_dicts(self):\n self.forward_dict = None\n self.inputs_dict = None\n\n @property\n def device(self):\n # https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180/10\n # useful, but may be dangerous\n self.update_inner_modules()\n device_ = next(iter(self.parameters())).device\n if not all([p.device == device_ for p in self.parameters()]):\n raise AssertionError(f'Not all parameters of {self.name} are on the same device')\n return device_" }, { "identifier": "InnerVertex", "path": "torch_dag/core/dag_module.py", "snippet": "class InnerVertex(Vertex):\n def __init__(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ):\n super().__init__(name=name)\n self._module = module\n self._predecessors = list(predecessors)\n self.dag_module: \"DagModule\" = None\n self.orbit = None\n\n @property\n def successors(self) -> List['InnerVertex']:\n if self.dag_module is None:\n logger.error(f'Trying to get successors of an InnerVertex that has not been assigned to any DagModule.')\n return [vertex for vertex in self.dag_module.inner_vertices if self in vertex.predecessors]\n\n @property\n def predecessors(self) -> List[Vertex]:\n return self._predecessors\n\n @property\n def predecessor_indices(self) -> List[Vertex]:\n return [self.dag_module.vertices.index(pd) for pd in self.predecessors]\n\n @predecessors.setter\n def predecessors(self, new_predecessors: List[Vertex]):\n if not isinstance(new_predecessors, list):\n logger.error(f'Predecessors is expected to be a list. Got {type(new_predecessors)} except.')\n self._predecessors = new_predecessors\n\n @property\n def module(self) -> torch.nn.Module:\n return self._module\n\n @module.setter\n def module(self, module: torch.nn.Module):\n self._module = module\n # TODO: Remove after validation\n self.dag_module.update_inner_modules()\n\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n is_atomic = not isinstance(self.module, DagModule)\n result = {\n 'name': self.name,\n 'predecessor_indices': self.predecessor_indices,\n 'is_atomic': is_atomic,\n 'type': 'inner',\n 'orbit': self.orbit,\n }\n if not is_atomic:\n result['module_dict'] = self.module.config_dict(atomic_modules)\n else:\n result['module_index'] = atomic_modules.index(self.module)\n return result" }, { "identifier": "InputVertex", "path": "torch_dag/core/dag_module.py", "snippet": "class InputVertex(Vertex):\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n return {\n 'name': self.name,\n 'type': 'input',\n }" }, { "identifier": "Pattern", "path": "torch_dag/patterns_v2/pattern.py", "snippet": "class Pattern(abc.ABC):\n def __iter__(self) -> Iterator[PatternNode]:\n yield from self.pattern_nodes\n\n @abc.abstractmethod\n def search_at_node(self, start_search_at: DagModule) -> List[Match]:\n raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def pattern_nodes(self) -> List[PatternNode]:\n raise NotImplementedError\n\n @property\n def pattern_node_names(self) -> List[str]:\n return [pn.name for pn in self.pattern_nodes]\n\n def search(self, inner_vertexes: InnerVertex) -> List[Match]:\n matches = []\n for iv in inner_vertexes:\n match = self.search_at_node(start_search_at=iv)\n if match:\n logger.info(f'[+] Found match {match} at {iv}')\n matches += [match]\n\n return matches" }, { "identifier": "Match", "path": "torch_dag/patterns_v2/match.py", "snippet": "class Match(abc.ABC):\n def __init__(self, names: List[str] = None):\n self._names = [] if not names else names\n\n def __iter__(self) -> Iterator[InnerVertex]:\n yield from self.matched_ivs\n\n def __len__(self):\n return len(self.matched_ivs)\n\n def __eq__(self, other: 'Match'):\n return len(self) == len(other) and all([other_icn in self.matched_ivs for other_icn in other])\n\n def __hash__(self):\n return hash(tuple(self.matched_ivs))\n\n def get_op_by_pattern_node_name(self, pattern_node_name: str) -> torch.nn.Module:\n for iv, pn_name in zip(self.matched_ivs, self.names):\n if pn_name == pattern_node_name:\n return iv.module\n\n @property\n def names(self) -> List[str]:\n return self._names\n\n @property\n @abc.abstractmethod\n def matched_ivs(self) -> List[InnerVertex]:\n raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def start(self) -> List[InnerVertex]:\n raise NotImplementedError\n\n @property\n @abc.abstractmethod\n def end(self) -> List[InnerVertex]:\n raise NotImplementedError\n\n @property\n def can_be_rolled(self) -> bool:\n allowed_successors = [*self.end[0].successors, *self.matched_ivs]\n for iv in self:\n if any([successor not in allowed_successors for successor in iv.successors]):\n return False\n\n if len(self.start) >= 2:\n return False\n\n return True" } ]
import sys import enum import copy import logging import numpy as np import torch from typing import Any, Type, Union from typing import Dict from typing import List from typing import Tuple from typing import Callable from torch_dag.core.dag_module import DagModule from torch_dag.core.dag_module import InnerVertex, InputVertex from torch_dag.patterns_v2.pattern import Pattern from torch_dag.patterns_v2.match import Match
5,641
logger = logging.getLogger(__name__) class SelectionType(enum.Enum): FIRST = 'first' LAST = 'last' ALL = 'all' ALL_BUT_FIRST = 'all_but_first' ALL_BUT_LAST = 'all_but_last' class Transform: """ For more information on patterns please refer to https://gitlab.com/tcl-research/auto-ml/blog/-/blob/main/2022-06-16-patterns/README.md or https://gitlab.com/tcl-research/auto-ml/node-api/-/blob/master/tutorials/advanced/pattens.ipynb """ PRESERVE_PARAMS_FROM = 'preserve_old_params_from' @staticmethod def _assert_forward_pass_equal(
logger = logging.getLogger(__name__) class SelectionType(enum.Enum): FIRST = 'first' LAST = 'last' ALL = 'all' ALL_BUT_FIRST = 'all_but_first' ALL_BUT_LAST = 'all_but_last' class Transform: """ For more information on patterns please refer to https://gitlab.com/tcl-research/auto-ml/blog/-/blob/main/2022-06-16-patterns/README.md or https://gitlab.com/tcl-research/auto-ml/node-api/-/blob/master/tutorials/advanced/pattens.ipynb """ PRESERVE_PARAMS_FROM = 'preserve_old_params_from' @staticmethod def _assert_forward_pass_equal(
cell1: DagModule,
0
2023-11-17 15:36:44+00:00
8k
timlrx/simple-ai-agents
tests/test_chat_session.py
[ { "identifier": "ChatLLMSession", "path": "simple_ai_agents/chat_session.py", "snippet": "class ChatLLMSession(ChatSession):\n system: str = \"You are a helpful assistant.\"\n llm_options: Optional[LLMOptions] = {\"model\": \"gpt-3.5-turbo\"}\n\n def prepare_request(\n self,\n prompt: str,\n system: Optional[str] = None,\n response_model: Optional[Type[BaseModel]] = None,\n llm_options: Optional[LLMOptions] = None,\n ) -> tuple[\n str,\n dict[str, Any],\n list[dict[str, Any]],\n ChatMessage,\n Optional[Type[BaseModel]],\n ]:\n \"\"\"\n Prepare a request to send to liteLLM.\n\n Args:\n prompt (str): User prompt\n system (str, optional): System prompt. Defaults to None.\n response_model (BaseModel), optional:\n The response model to use for parsing the response.\n Defaults to None.\n llm_options (LLMOptions, optional): LiteLLM options.\n See [LiteLLM Providers](https://docs.litellm.ai/docs/providers) for details.\n Defaults to None.\n\n Returns:\n Tuple: (model, kwargs, history, user_message, response_model)\n \"\"\"\n # Just use user prompt, no system prompt required\n if response_model:\n history = [\n {\"role\": \"user\", \"content\": prompt},\n ]\n # If saved messages exist, append it to prompt\n elif self.messages:\n history = [{\"role\": \"system\", \"content\": system or self.system}]\n for msg in self.messages:\n history.append({\"role\": msg.role, \"content\": msg.content})\n history.append({\"role\": \"user\", \"content\": prompt})\n else:\n history = [\n {\"role\": \"system\", \"content\": system or self.system},\n {\"role\": \"user\", \"content\": prompt},\n ]\n user_message = ChatMessage(\n role=\"user\",\n content=prompt,\n )\n\n if llm_options:\n litellm_options: LLMOptions = llm_options\n elif self.llm_options:\n litellm_options: LLMOptions = self.llm_options # type: ignore\n else:\n raise ValueError(\"No LLM options provided.\")\n\n model = litellm_options.get(\"model\")\n if not model:\n raise ValueError(\"No LLM model provided.\")\n kwargs = {k: v for k, v in litellm_options.items() if k != \"model\"}\n\n if response_model:\n response_model, fn_kwargs = handle_response_model(\n response_model=response_model, kwargs=kwargs, mode=Mode.FUNCTIONS\n )\n # Add functions and function_call to kwargs\n kwargs.update(fn_kwargs)\n return model, kwargs, history, user_message, response_model\n\n def gen(\n self,\n prompt: str,\n system: Optional[str] = None,\n save_messages: Optional[bool] = None,\n llm_options: Optional[LLMOptions] = None,\n ) -> str:\n \"\"\"\n Generate a chat response from the LLM.\n\n Args:\n prompt (str): User prompt\n system (str, optional): System prompt. Defaults to None.\n save_messages (bool, optional): Whether to save the messages.\n Defaults to None.\n llm_options (LLMOptions, optional): LiteLLM options.\n See [LiteLLM Providers](https://docs.litellm.ai/docs/providers) for details.\n Defaults to None.\n \"\"\"\n model, kwargs, history, user_message, _ = self.prepare_request(\n prompt,\n system=system,\n llm_options=llm_options,\n )\n response = completion(model=model, messages=history, **kwargs) # type: ignore\n try:\n content: str = response.choices[0][\"message\"][\"content\"]\n assistant_message = ChatMessage(\n role=\"assistant\",\n content=content,\n finish_reason=response.choices[0][\"finish_reason\"],\n prompt_length=response[\"usage\"][\"prompt_tokens\"],\n completion_length=response[\"usage\"][\"completion_tokens\"],\n total_length=response[\"usage\"][\"total_tokens\"],\n )\n self.add_messages(user_message, assistant_message, save_messages)\n self.total_prompt_length += response[\"usage\"][\"prompt_tokens\"]\n self.total_completion_length += response[\"usage\"][\"completion_tokens\"]\n self.total_length += response[\"usage\"][\"total_tokens\"]\n except KeyError:\n raise KeyError(f\"No AI generation: {response}\")\n\n return content\n\n async def gen_async(\n self,\n prompt: str,\n system: Optional[str] = None,\n save_messages: Optional[bool] = None,\n llm_options: Optional[LLMOptions] = None,\n ) -> str:\n \"\"\"\n Generate a chat response from the LLM.\n\n Args:\n prompt (str): User prompt\n system (str, optional): System prompt. Defaults to None.\n save_messages (bool, optional): Whether to save the messages.\n Defaults to None.\n llm_options (LLMOptions, optional): LiteLLM options.\n See [LiteLLM Providers](https://docs.litellm.ai/docs/providers) for details.\n Defaults to None.\n \"\"\"\n model, kwargs, history, user_message, _ = self.prepare_request(\n prompt, system=system, llm_options=llm_options\n )\n response: ModelResponse = await acompletion(\n model=model, messages=history, **kwargs\n ) # type: ignore\n try:\n content: str = response.choices[0][\"message\"][\"content\"]\n assistant_message = ChatMessage(\n role=\"assistant\",\n content=content,\n finish_reason=response.choices[0][\"finish_reason\"],\n prompt_length=response[\"usage\"][\"prompt_tokens\"],\n completion_length=response[\"usage\"][\"completion_tokens\"],\n total_length=response[\"usage\"][\"total_tokens\"],\n )\n self.add_messages(user_message, assistant_message, save_messages)\n self.total_prompt_length += response[\"usage\"][\"prompt_tokens\"]\n self.total_completion_length += response[\"usage\"][\"completion_tokens\"]\n self.total_length += response[\"usage\"][\"total_tokens\"]\n except KeyError:\n raise KeyError(f\"No AI generation: {response}\")\n\n return content\n\n async def gen_model_async(\n self,\n prompt: str,\n response_model: Type[T],\n system: Optional[str] = None,\n llm_options: Optional[LLMOptions] = None,\n validation_retries: int = 1,\n strict: Optional[bool] = None,\n ) -> Type[T]: # type: ignore\n \"\"\"\n Generate a response from the AI and parse it into a response model.\n\n Args:\n prompt (str): User prompt\n system (str, optional): System prompt. Defaults to None.\n llm_options (LLMOptions, optional): LiteLLM options.\n See [LiteLLM Providers](https://docs.litellm.ai/docs/providers) for details.\n Defaults to None.\n validation_retries (int, optional):\n Number of times to retry generating a valid response model.\n response_model (BaseModel): The response model to use for parsing the response\n strict (bool, optional): Whether to use strict json parsing. Defaults to None.\n\n Returns:\n Type[T]: An instance of the response model\n \"\"\"\n if not response_model:\n raise ValueError(\"No response model provided.\")\n (\n model,\n kwargs,\n history,\n user_message,\n response_model,\n ) = self.prepare_request(\n prompt,\n system=system,\n response_model=response_model,\n llm_options=llm_options,\n ) # type: ignore\n retries = 0\n while retries <= validation_retries:\n # Excepts ValidationError, and JSONDecodeError\n try:\n response: ModelResponse = await acompletion(\n model=model, messages=history, **kwargs\n ) # type: ignore\n model: Type[T] = process_response(\n response,\n response_model=response_model,\n stream=False,\n strict=strict,\n mode=Mode.FUNCTIONS,\n ) # type: ignore\n self.total_prompt_length += response[\"usage\"][\"prompt_tokens\"]\n self.total_completion_length += response[\"usage\"][\"completion_tokens\"]\n self.total_length += response[\"usage\"][\"total_tokens\"]\n except (ValidationError, JSONDecodeError) as e:\n history.append(response.choices[0].message.model_dump()) # type: ignore\n history.append(\n {\n \"role\": \"user\",\n \"content\": f\"Recall the function correctly, exceptions found\\n{e}\",\n }\n )\n retries += 1\n if retries > validation_retries:\n raise e\n return model\n\n def gen_model(\n self,\n prompt: str,\n response_model: Type[T],\n system: Optional[str] = None,\n llm_options: Optional[LLMOptions] = None,\n validation_retries: int = 1,\n strict: Optional[bool] = None,\n ) -> Type[T]: # type: ignore\n \"\"\"\n Generate a response from the AI and parse it into a response model.\n\n Args:\n prompt (str): User prompt\n system (str, optional): System prompt. Defaults to None.\n llm_options (LLMOptions, optional): LiteLLM options.\n See [LiteLLM Providers](https://docs.litellm.ai/docs/providers) for details.\n Defaults to None.\n validation_retries (int, optional):\n Number of times to retry generating a valid response model.\n response_model (BaseModel): The response model to use for parsing the response\n strict (bool, optional): Whether to use strict json parsing. Defaults to None.\n\n Returns:\n Type[T]: An instance of the response model\n \"\"\"\n if not response_model:\n raise ValueError(\"No response model provided.\")\n (\n model,\n kwargs,\n history,\n user_message,\n response_model,\n ) = self.prepare_request(\n prompt,\n system=system,\n response_model=response_model,\n llm_options=llm_options,\n ) # type: ignore\n retries = 0\n while retries <= validation_retries:\n # Excepts ValidationError, and JSONDecodeError\n try:\n response = completion(model=model, messages=history, **kwargs) # type: ignore\n model: Type[T] = process_response(\n response,\n response_model=response_model,\n stream=False,\n strict=strict,\n mode=Mode.FUNCTIONS,\n ) # type: ignore\n self.total_prompt_length += response[\"usage\"][\"prompt_tokens\"]\n self.total_completion_length += response[\"usage\"][\"completion_tokens\"]\n self.total_length += response[\"usage\"][\"total_tokens\"]\n except (ValidationError, JSONDecodeError) as e:\n history.append(response.choices[0].message.model_dump()) # type: ignore\n history.append(\n {\n \"role\": \"user\",\n \"content\": f\"Recall the function correctly, exceptions found\\n{e}\",\n }\n )\n retries += 1\n if retries > validation_retries:\n raise e\n return model\n\n def stream(\n self,\n prompt: str,\n system: Optional[str] = None,\n save_messages: Optional[bool] = None,\n llm_options: Optional[LLMOptions] = None,\n ) -> Generator[dict[str, str], None, None]:\n \"\"\"\n Generate a streaming response from the LLM.\n Stream response contains \"delta\" and \"response\" keys.\n - `delta` - latest response from the LLM model.\n - `response` - contains the entire conversation history up to that point.\n\n Args:\n prompt (str): User prompt\n system (str, optional): System prompt. Defaults to None.\n save_messages (bool, optional): Whether to save the messages.\n Defaults to None.\n llm_options (LLMOptions, optional): LiteLLM options.\n See [LiteLLM Providers](https://docs.litellm.ai/docs/providers) for details.\n Defaults to None.\n\n Yields:\n Generator[dict[str, str], None, None]\n \"\"\"\n model, kwargs, history, user_message, _ = self.prepare_request(\n prompt, system=system, llm_options=llm_options\n )\n\n response = completion(model=model, messages=history, stream=True, **kwargs)\n content_chunks = []\n for chunk in response:\n delta: str = chunk[\"choices\"][0][\"delta\"].get(\"content\") # type: ignore\n if delta:\n content_chunks.append(delta)\n yield {\"delta\": delta, \"response\": \"\".join(content_chunks)}\n\n content = \"\".join(content_chunks)\n # streaming does not currently return token counts\n assistant_message = ChatMessage(\n role=\"assistant\",\n content=content,\n )\n self.add_messages(user_message, assistant_message, save_messages)\n\n async def stream_async(\n self,\n prompt: str,\n system: Optional[str] = None,\n save_messages: Optional[bool] = None,\n llm_options: Optional[LLMOptions] = None,\n ) -> AsyncGenerator[dict[str, str], None]:\n \"\"\"\n Generate a streaming response from the LLM.\n Stream response contains \"delta\" and \"response\" keys.\n - `delta` - latest response from the LLM model.\n - `response` - contains the entire conversation history up to that point.\n\n Args:\n prompt (str): User prompt\n system (str, optional): System prompt. Defaults to None.\n save_messages (bool, optional): Whether to save the messages.\n Defaults to None.\n llm_options (LLMOptions, optional): LiteLLM options.\n See [LiteLLM Providers](https://docs.litellm.ai/docs/providers) for details.\n Defaults to None.\n\n Yields:\n AsyncGenerator[dict[str, str], None]\n \"\"\"\n model, kwargs, history, user_message, _ = self.prepare_request(\n prompt, system=system, llm_options=llm_options\n )\n\n response: ModelResponse = await acompletion(\n model=model, messages=history, stream=True, **kwargs\n ) # type: ignore\n content_chunks = []\n async for chunk in response: # type: ignore\n delta: str = chunk[\"choices\"][0][\"delta\"].get(\"content\")\n if delta:\n content_chunks.append(delta)\n yield {\"delta\": delta, \"response\": \"\".join(content_chunks)}\n\n content = \"\".join(content_chunks)\n # streaming does not currently return token counts\n assistant_message = ChatMessage(\n role=\"assistant\",\n content=content,\n )\n self.add_messages(user_message, assistant_message, save_messages)" }, { "identifier": "LLMOptions", "path": "simple_ai_agents/models.py", "snippet": "class LLMOptions(TypedDict, total=False):\n model: str\n functions: List\n function_call: str\n temperature: float\n top_p: float\n n: int\n stream: bool\n stop: str\n max_tokens: float\n presence_penalty: float\n frequency_penalty: float\n logit_bias: dict\n user: str\n deployment_id: str\n request_timeout: int\n api_base: str\n api_version: str\n api_key: str\n model_list: list" } ]
import pytest from dotenv import load_dotenv from pydantic import BaseModel from simple_ai_agents.chat_session import ChatLLMSession from simple_ai_agents.models import LLMOptions
3,816
load_dotenv() class UserDetail(BaseModel): name: str age: int def test_prepare_request(): sess = ChatLLMSession() prompt = "Hello, how can I help you?" system = "Test system"
load_dotenv() class UserDetail(BaseModel): name: str age: int def test_prepare_request(): sess = ChatLLMSession() prompt = "Hello, how can I help you?" system = "Test system"
llm_options: LLMOptions = {"model": "test-model", "temperature": 0.5}
1
2023-11-10 06:01:25+00:00
8k
DIAGNijmegen/HoVer-UNet
run_train.py
[ { "identifier": "apply_postprocessing", "path": "train/apply_postprocessing.py", "snippet": "def apply_postprocessing(path_weights, path_test, model):\n model.load_state_dict(torch.load(path_weights)['model_state_dict'])\n model.eval()\n data_infer = DatasetPannuke(path_test, mode='infer')\n dataloader = DataLoader(data_infer, shuffle=False, pin_memory=True, num_workers=0, batch_size=64)\n predictions = []\n with tqdm(total=len(dataloader)) as progress:\n for bn, (im, info) in enumerate(dataloader):\n t0 = time()\n im = im.to('cuda')\n im = torch.permute(im, (0, 3, 1, 2)).contiguous()\n with torch.no_grad():\n pred = model(im)\n pred_np = F.softmax(pred[:, :2, ...], dim=1)[:, 1, ...].to('cpu')\n pred_h, pred_v = pred[:, 2, ...].to('cpu'), pred[:, 3, ...].to('cpu')\n\n pred_tp = torch.argmax(F.softmax(pred[:, 4:, ...], dim=1), dim=1).to('cpu')\n pred_map = np.concatenate((pred_tp[..., None], pred_np[..., None], pred_h[..., None], pred_v[..., None]),\n axis=-1)\n im = torch.permute(im, (0, 2, 3, 1)).contiguous().to('cpu').numpy()\n progress.set_postfix(time=time() - t0)\n predictions.extend(zip(list(im), list(pred_map), list(info)))\n progress.update(1)\n progress.close()\n\n progress = tqdm(total=len(predictions))\n results = []\n for prediction in predictions:\n results.append(_postprocess(prediction))\n progress.update(1)\n progress.close()\n\n return results" }, { "identifier": "DatasetPannuke", "path": "data/pannuke_distillation_dataset.py", "snippet": "class DatasetPannuke(Dataset):\n \"\"\"\n Distillaton pannuke dataset\n \"\"\"\n\n def __init__(self, path: str, mode: str = 'train', true_labels: bool = False,\n hovernet_predictions: bool = True):\n \"\"\"\n :param path: path of processed pannuke dataset, h5 file\n :param mode: train or infer\n :param true_labels: load ground truth\n :param hovernet_predictions: load hovernet predictions\n \"\"\"\n assert isinstance(path, str), \"path have be instance of string\"\n assert isinstance(mode, str) and mode in ['train', 'infer'], \"mode must be either train or infer\"\n assert isinstance(hovernet_predictions, bool) and isinstance(true_labels, bool) and (\n hovernet_predictions or true_labels), \\\n \"hovernet_predictions and true_labels must be boolean, and at least one must be true\"\n\n self.path = path\n self.input_shape = (256, 256)\n self.output_shape = (256, 256)\n self.nr_types = 6\n self.mode = mode\n self.true_labels = true_labels\n self.hovernet_predictions = hovernet_predictions\n data = h5py.File(path, 'r')\n self.images = data['images']\n if mode == 'infer':\n self.types = data['types']\n if true_labels:\n self.labels = data['true_labels']\n if hovernet_predictions:\n self.hovernet = data['hovernet_predictions']\n\n def __len__(self):\n return len(self.images)\n\n def __getitem__(self, idx: int):\n outputs = ((self.images[idx] / 255).astype('float32'),)\n if self.mode == 'train':\n if self.true_labels:\n outputs += (self.labels[idx].astype('float32'),)\n if self.hovernet_predictions:\n outputs += (self.hovernet[idx].astype('float32'),)\n if len(outputs) == 3:\n outputs = (outputs[0], np.concatenate(outputs[1:], axis=-1))\n elif self.mode == 'infer':\n outputs += ('%s_%s' % (idx, self.types[idx].decode('utf8')),)\n\n return outputs" }, { "identifier": "loss_fcn", "path": "losses/losses.py", "snippet": "def loss_fcn(pred_student, pred_teacher, true, alpha, T):\n student_np, student_hv, student_tp = pred_student[:, :2, ...], pred_student[:, 2:4, ...], pred_student[:, 4:, ...]\n student_hv = torch.permute(student_hv, (0, 2, 3, 1))\n loss_distill, loss_student = None, None\n if pred_teacher is not None:\n teacher_np, teacher_hv, teacher_tp = pred_teacher[:, :2, ...], pred_teacher[:, 2:4, ...], pred_teacher[:, 4:,\n ...]\n teacher_hv = torch.permute(teacher_hv, (0, 2, 3, 1))\n loss_distill = _distill_loss((student_np, student_hv, student_tp),\n (teacher_np, teacher_hv, teacher_tp), T)\n\n if true is not None:\n true_np, true_hv, true_tp = true[:, 0, ...].type(torch.long), true[:, 1:3, ...], true[:, 3:, ...]\n true_hv = torch.permute(true_hv, (0, 2, 3, 1))\n loss_student = _student_loss((student_np, student_hv, student_tp),\n (true_np, true_hv, true_tp))\n\n if pred_teacher is not None and true is not None:\n loss = alpha * loss_distill + (1 - alpha) * loss_student\n elif pred_teacher is None and true is not None:\n loss = loss_student\n elif pred_teacher is not None and true is None:\n loss = loss_distill\n else:\n raise Exception()\n\n return loss" }, { "identifier": "train", "path": "train/trainer.py", "snippet": "def train(model: torch.nn.Module, train_set: torch.utils.data.DataLoader, val_set: torch.utils.data.DataLoader,\n epochs: int, optimizer: torch.optim.Optimizer, grad_scaler: torch.cuda.amp.GradScaler, scheduler,\n criterion, metrics, checkpoint_dir=None, experiment_dir=None, path_example=None,\n early_stopping=None,start_epoch=None):\n\n statistics_training = dict()\n best_loss = None\n if early_stopping:\n early_stopper = EarlyStopping(patience=10)\n else:\n early_stopper = None\n\n if start_epoch is None:\n start_epoch = 0\n best_epoch = start_epoch\n nr_types = 10\n augmentation = get_augmentation_gpu().cuda()\n\n for epoch in range(start_epoch, epochs):\n statistics_epoch = {}\n print(\"\\nEpoch %s/%s - best epoch %s\" % (epoch + 1, epochs, best_epoch))\n with tqdm.tqdm(total=len(train_set), unit='step',\n ncols=100 + (50 * len(metrics)),\n bar_format='{desc}{n_fmt}/{total_fmt}|{bar}|ETA:{remaining} '\n '- {elapsed} {rate_inv_fmt}{postfix}') as progress_bar:\n train_loss, train_metrics = train_step(model, train_set, optimizer, grad_scaler, criterion, metrics,\n progress_bar, nr_types=nr_types, gpu_augmentation=augmentation)\n val_loss, val_metrics, example = val_step(model, val_set, criterion, metrics, nr_types=nr_types)\n\n statistics_epoch['val_loss'] = val_loss\n statistics_epoch['train_loss'] = train_loss\n\n progress_bar.set_postfix(loss='{:05.3f}'.format(train_loss), loss_val='{:05.3f}'.format(val_loss),\n **val_metrics, **train_metrics, lr=get_lr(optimizer))\n\n if scheduler:\n scheduler.step(val_loss)\n\n if best_loss is None:\n best_loss = val_loss\n best_epoch = epoch + 1\n else:\n if best_loss > val_loss:\n best_loss = val_loss\n best_epoch = epoch + 1\n\n if best_epoch == epoch + 1:\n if checkpoint_dir:\n Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)\n torch.save({ # Save our checkpoint loc\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'val_loss': val_loss,\n 'train_loss': train_loss,\n },\n str(checkpoint_dir / 'checkpoint_epoch_{}.pth'.format(epoch + 1)))\n if path_example:\n save_example(example, os.path.join(path_example, 'ex_epoch_{}.png'.format(epoch + 1)))\n\n statistics_training[epoch] = {**statistics_epoch, **train_metrics, **val_metrics}\n if early_stopping:\n early_stopper(val_loss, model)\n\n with open(os.path.join(experiment_dir, 'statistics.json'), 'w') as file:\n file.write(json.dumps(statistics_training))\n return best_epoch" } ]
import argparse import json import os import numpy as np import segmentation_models_pytorch as smp import torch from multiprocessing import cpu_count from pathlib import Path from torch.utils.data import DataLoader from train.apply_postprocessing import apply_postprocessing from data.pannuke_distillation_dataset import DatasetPannuke from losses.losses import loss_fcn from train.trainer import train
4,149
use_true_labels = False # Generate all path project_dir = os.path.join(base_project_dir, project_name) experiment_name = f'experiment_{experiment_group}_{experiment_id}' experiment_dir = os.path.join(project_dir, experiment_name) centroids_path = os.path.join(experiment_dir, 'centroids') checkpoint_dir = os.path.join(experiment_dir, 'checkpoints') path_example = os.path.join(experiment_dir, 'examples') instance_map_path = os.path.join(experiment_dir, 'instance_maps') ######### DIRECTORIES SETTING ################## if not os.path.exists(project_dir): os.mkdir(project_dir) train_state = 0 # 0 train, 1 infer, 2 stats best_epoch = None if not os.path.exists(experiment_dir): os.mkdir(experiment_dir) os.mkdir(path_example) os.makedirs(centroids_path) os.makedirs(instance_map_path) else: exists_checkpoint = os.path.exists(checkpoint_dir) exists_statistics = os.path.exists(os.path.join(experiment_dir, 'statistics.json')) exists_tissue = os.path.exists(os.path.join(experiment_dir, 'tissue_stats.csv')) exists_pred_map = os.path.exists(os.path.join(experiment_dir, 'pred_masks.npy')) if exists_checkpoint and not exists_statistics: checkpoints = sorted([int(x.split('_')[-1].split('.')[0]) for x in os.listdir(checkpoint_dir)]) checkpoint_id = checkpoints[-1] path_weights = os.path.join(checkpoint_dir, f'checkpoint_epoch_{checkpoint_id}.pth') else: if not exists_statistics: pass elif exists_statistics and not exists_pred_map: train_state = 1 checkpoints = sorted([int(x.split('_')[-1].split('.')[0]) for x in os.listdir(checkpoint_dir)]) best_epoch = checkpoints[-1] elif exists_pred_map and not exists_tissue: train_state = 2 else: print("No operation") exit(-1) print("train state %s" % train_state) print(""" Base directory: %s Project directory: %s Experiment directory: %s Checkpoint directory: %s Examples directory: %s Centroids directory: %s Instance map directory: %s """ % ( base_project_dir, project_dir, experiment_dir, checkpoint_dir, path_example, centroids_path, instance_map_path)) ########################################################################### with open(os.path.join(experiment_dir, 'metadata.json'), 'w') as mf: mf.write(json.dumps(args.__dict__)) ##################### DATASET SETTING ##################################### print(""" Input shape: %s Output shape: %s Types numbers: %s Path train: %s Path validation: %s Path test: %s Pannuke path: %s Batch size: %s Epochs: %s Encoder: %s """ % ( input_shape, output_shape, nr_types, path_train, path_val, path_test, pannuke_path, batch_size, nr_epochs, encoder_name)) train_set = DatasetPannuke(path_train, mode='train', hovernet_predictions=use_hovernet_predictions, true_labels=use_true_labels) val_set = DatasetPannuke(path_val, mode='train', hovernet_predictions=use_hovernet_predictions, true_labels=use_true_labels) num_workers = cpu_count() // 2 if num_workers > 8: num_workers = 8 num_workers =0 print(f'Num workers per dataloader: {num_workers}') dataloader_train = DataLoader(train_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers,)# prefetch_factor=2) dataloader_val = DataLoader(val_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)#, prefetch_factor=2) ################################################################################### ############################## NETWORK AND TRAINING SETTING ##################################### model = smp.Unet(classes=nr_types, encoder_name=encoder_name, ).to('cuda', memory_format=torch.channels_last) optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999)) start_epoch = 0 if path_weights: checkpoint_dict = torch.load(path_weights) model.load_state_dict(checkpoint_dict['model_state_dict']) optimizer.load_state_dict(checkpoint_dict['optimizer_state_dict']) start_epoch = checkpoint_dict['epoch'] if 'train_mode' in checkpoint_dict: train_mode = checkpoint_dict['train_mode'] print( f"Resume model info: epoch: {checkpoint_dict['epoch']}, train_loss: {checkpoint_dict['train_loss']}, val_loss: {checkpoint_dict['val_loss']}") grad_scaler = torch.cuda.amp.GradScaler() metrics = dict() ######################################################################################### with open(os.path.join(experiment_dir, 'metadata.json'), 'w') as mf: mf.write(json.dumps(args.__dict__)) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', min_lr=1e-7, patience=5, threshold=1e-3) # LAUNCH TRAINING if train_state == 0: best_epoch = train(model, dataloader_train, dataloader_val, nr_epochs, optimizer, grad_scaler, scheduler=lr_scheduler,
if __name__ == '__main__': torch.cuda.set_device(0) parser = argparse.ArgumentParser(prog="Train network for nuclei segmentation") parser.add_argument_group("Info") parser.add_argument('--base_project_dir', type=str, required=True) parser.add_argument('--project_name', type=str, required=True) parser.add_argument('--experiment_group', default=0, type=int, required=True) parser.add_argument('--experiment_id', default=0, type=int, required=True) parser.add_argument('--path_train', type=str, required=True) parser.add_argument('--path_val', type=str, required=True) parser.add_argument('--path_test', type=str, required=True) parser.add_argument('--batch_size', default=64, type=int, choices={4, 8, 16, 32, 64, 128, 256}) parser.add_argument('--nr_epochs', default=240, type=int) parser.add_argument('--lr', default=1e-4, type=float) parser.add_argument('--pannuke_path', type=str, required=True) parser.add_argument('--encoder', default='mit_b2', help='see https://smp.readthedocs.io/en/latest/encoders.html') parser.add_argument('--use_true_labels', default=1, type=int, choices={0, 1}) parser.add_argument('--use_hovernet_predictions', default=1, type=int, choices={0, 1}) parser.add_argument('--loss_t', default=3, type=int, choices={1, 3, 5, 10, 15, 30}) parser.add_argument('--loss_alpha', default=0.5, type=float) args = parser.parse_args() base_project_dir = args.base_project_dir project_name = args.project_name experiment_group = args.experiment_group experiment_id = args.experiment_id path_weights = None input_shape = (256, 256) output_shape = (256, 256) nr_types = 10 path_train = args.path_train path_val = args.path_val path_test = args.path_test foldid = path_test.split('.')[0][-1] pannuke_path = args.pannuke_path true_path = f'{pannuke_path}/Fold{foldid}/masks/fold{foldid}' batch_size = args.batch_size nr_epochs = args.nr_epochs encoder_name = args.encoder lr = args.lr loss_alpha = args.loss_alpha loss_t = args.loss_t early_stopping = True use_true_labels = True if args.use_true_labels == 0: use_true_labels = False use_hovernet_predictions = True if args.use_hovernet_predictions == 0: use_hovernet_predictions = False if use_hovernet_predictions and not use_true_labels: loss_alpha = 1 print("Warning: student_loss_type will be ignored") print("Warning: loss_alpha will be ignored") if use_true_labels and not use_hovernet_predictions: loss_alpha = 0 loss_t = 0 print("Warning: distill_loss_type will be ignored") print("Warning: loss_alpha will be ignored") print("Warning: loss_t will be ignored") if use_true_labels and use_hovernet_predictions: if loss_alpha == 0: use_hovernet_predictions = False if loss_alpha == 1: use_true_labels = False # Generate all path project_dir = os.path.join(base_project_dir, project_name) experiment_name = f'experiment_{experiment_group}_{experiment_id}' experiment_dir = os.path.join(project_dir, experiment_name) centroids_path = os.path.join(experiment_dir, 'centroids') checkpoint_dir = os.path.join(experiment_dir, 'checkpoints') path_example = os.path.join(experiment_dir, 'examples') instance_map_path = os.path.join(experiment_dir, 'instance_maps') ######### DIRECTORIES SETTING ################## if not os.path.exists(project_dir): os.mkdir(project_dir) train_state = 0 # 0 train, 1 infer, 2 stats best_epoch = None if not os.path.exists(experiment_dir): os.mkdir(experiment_dir) os.mkdir(path_example) os.makedirs(centroids_path) os.makedirs(instance_map_path) else: exists_checkpoint = os.path.exists(checkpoint_dir) exists_statistics = os.path.exists(os.path.join(experiment_dir, 'statistics.json')) exists_tissue = os.path.exists(os.path.join(experiment_dir, 'tissue_stats.csv')) exists_pred_map = os.path.exists(os.path.join(experiment_dir, 'pred_masks.npy')) if exists_checkpoint and not exists_statistics: checkpoints = sorted([int(x.split('_')[-1].split('.')[0]) for x in os.listdir(checkpoint_dir)]) checkpoint_id = checkpoints[-1] path_weights = os.path.join(checkpoint_dir, f'checkpoint_epoch_{checkpoint_id}.pth') else: if not exists_statistics: pass elif exists_statistics and not exists_pred_map: train_state = 1 checkpoints = sorted([int(x.split('_')[-1].split('.')[0]) for x in os.listdir(checkpoint_dir)]) best_epoch = checkpoints[-1] elif exists_pred_map and not exists_tissue: train_state = 2 else: print("No operation") exit(-1) print("train state %s" % train_state) print(""" Base directory: %s Project directory: %s Experiment directory: %s Checkpoint directory: %s Examples directory: %s Centroids directory: %s Instance map directory: %s """ % ( base_project_dir, project_dir, experiment_dir, checkpoint_dir, path_example, centroids_path, instance_map_path)) ########################################################################### with open(os.path.join(experiment_dir, 'metadata.json'), 'w') as mf: mf.write(json.dumps(args.__dict__)) ##################### DATASET SETTING ##################################### print(""" Input shape: %s Output shape: %s Types numbers: %s Path train: %s Path validation: %s Path test: %s Pannuke path: %s Batch size: %s Epochs: %s Encoder: %s """ % ( input_shape, output_shape, nr_types, path_train, path_val, path_test, pannuke_path, batch_size, nr_epochs, encoder_name)) train_set = DatasetPannuke(path_train, mode='train', hovernet_predictions=use_hovernet_predictions, true_labels=use_true_labels) val_set = DatasetPannuke(path_val, mode='train', hovernet_predictions=use_hovernet_predictions, true_labels=use_true_labels) num_workers = cpu_count() // 2 if num_workers > 8: num_workers = 8 num_workers =0 print(f'Num workers per dataloader: {num_workers}') dataloader_train = DataLoader(train_set, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=num_workers,)# prefetch_factor=2) dataloader_val = DataLoader(val_set, batch_size=batch_size, shuffle=False, pin_memory=True, num_workers=num_workers)#, prefetch_factor=2) ################################################################################### ############################## NETWORK AND TRAINING SETTING ##################################### model = smp.Unet(classes=nr_types, encoder_name=encoder_name, ).to('cuda', memory_format=torch.channels_last) optimizer = torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.999)) start_epoch = 0 if path_weights: checkpoint_dict = torch.load(path_weights) model.load_state_dict(checkpoint_dict['model_state_dict']) optimizer.load_state_dict(checkpoint_dict['optimizer_state_dict']) start_epoch = checkpoint_dict['epoch'] if 'train_mode' in checkpoint_dict: train_mode = checkpoint_dict['train_mode'] print( f"Resume model info: epoch: {checkpoint_dict['epoch']}, train_loss: {checkpoint_dict['train_loss']}, val_loss: {checkpoint_dict['val_loss']}") grad_scaler = torch.cuda.amp.GradScaler() metrics = dict() ######################################################################################### with open(os.path.join(experiment_dir, 'metadata.json'), 'w') as mf: mf.write(json.dumps(args.__dict__)) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer, mode='min', min_lr=1e-7, patience=5, threshold=1e-3) # LAUNCH TRAINING if train_state == 0: best_epoch = train(model, dataloader_train, dataloader_val, nr_epochs, optimizer, grad_scaler, scheduler=lr_scheduler,
criterion=lambda x, y, z: loss_fcn(x, y, z, alpha=loss_alpha, T=loss_t),
2
2023-11-10 09:37:29+00:00
8k
joyn-gg/discord.http
discord_http/backend.py
[ { "identifier": "Command", "path": "discord_http/commands.py", "snippet": "class Command:\n def __init__(\n self,\n command: Callable,\n name: str,\n description: Optional[str] = None,\n guild_ids: Optional[list[Union[utils.Snowflake, int]]] = None,\n type: ApplicationCommandType = ApplicationCommandType.chat_input,\n ):\n self.id: Optional[int] = None\n self.command = command\n self.cog: Optional[\"Cog\"] = None\n self.type: int = int(type)\n self.name = name\n self.description = description\n self.options = []\n self.default_member_permissions = None\n\n self.name_localizations: Dict[LocaleTypes, str] = {}\n self.description_localizations: Dict[LocaleTypes, str] = {}\n\n self.list_autocompletes: Dict[str, Callable] = {}\n self.guild_ids: list[Union[utils.Snowflake, int]] = guild_ids or []\n self.__list_choices: list[str] = []\n\n if self.type == ApplicationCommandType.chat_input:\n if self.description is None:\n self.description = command.__doc__ or \"No description provided.\"\n if self.name != self.name.lower():\n raise ValueError(\"Command names must be lowercase.\")\n if not 1 <= len(self.description) <= 100:\n raise ValueError(\"Command descriptions must be between 1 and 100 characters.\")\n else:\n self.description = None\n\n if self.type is ApplicationCommandType.chat_input.value and not self.options:\n sig = inspect.signature(self.command)\n self.options = []\n\n slicer = 1\n if sig.parameters.get(\"self\"):\n slicer = 2\n\n for parameter in itertools.islice(sig.parameters.values(), slicer, None):\n origin = getattr(\n parameter.annotation, \"__origin__\",\n parameter.annotation\n )\n\n option = {}\n\n if (\n origin in [Union] and\n len(parameter.annotation.__args__) == 2\n ):\n # Parsing Optional/Union types\n origin = parameter.annotation.__args__[0]\n\n if origin in [Member, User]:\n ptype = CommandOptionType.user\n elif origin in channel_types:\n ptype = CommandOptionType.channel\n option.update({\n \"channel_types\": [\n int(i) for i in channel_types[origin]\n ]\n })\n elif origin in [Attachment]:\n ptype = CommandOptionType.attachment\n elif origin in [Role]:\n ptype = CommandOptionType.role\n elif origin in [Choice]:\n # Temporarily set to string, will be changed later\n self.__list_choices.append(parameter.name)\n ptype = CommandOptionType.string\n elif isinstance(origin, Range):\n ptype = origin.type\n if origin.type == CommandOptionType.string:\n option.update({\n \"min_length\": origin.min,\n \"max_length\": origin.max\n })\n else:\n option.update({\n \"min_value\": origin.min,\n \"max_value\": origin.max\n })\n elif origin == int:\n ptype = CommandOptionType.integer\n elif origin == bool:\n ptype = CommandOptionType.boolean\n elif origin == float:\n ptype = CommandOptionType.number\n elif origin == str:\n ptype = CommandOptionType.string\n else:\n ptype = CommandOptionType.string\n\n option.update({\n \"name\": parameter.name,\n \"description\": \"…\",\n \"type\": ptype.value,\n \"required\": (parameter.default == parameter.empty),\n \"autocomplete\": False,\n \"name_localizations\": {},\n \"description_localizations\": {},\n })\n\n self.options.append(option)\n\n def __repr__(self) -> str:\n return f\"<Command name='{self.name}'>\"\n\n @property\n def mention(self) -> str:\n \"\"\" `str`: Returns a mentionable string for the command \"\"\"\n if self.id:\n return f\"</{self.name}:{self.id}>\"\n return f\"`/{self.name}`\"\n\n def mention_sub(self, suffix: str) -> str:\n \"\"\"\n Returns a mentionable string for a subcommand.\n\n Parameters\n ----------\n suffix: `str`\n The subcommand name.\n\n Returns\n -------\n `str`\n The mentionable string.\n \"\"\"\n if self.id:\n return f\"</{self.name} {suffix}:{self.id}>\"\n return f\"`/{self.name} {suffix}`\"\n\n async def _make_context_and_run(\n self,\n context: \"Context\"\n ) -> BaseResponse:\n args, kwargs = context._create_args()\n\n for name, values in getattr(self.command, \"__choices_params__\", {}).items():\n if name not in kwargs:\n continue\n if name not in self.__list_choices:\n continue\n kwargs[name] = Choice(\n kwargs[name], values[kwargs[name]]\n )\n\n result = await self.run(context, *args, **kwargs)\n\n if not isinstance(result, BaseResponse):\n raise TypeError(\n f\"Command {self.name} must return a \"\n f\"Response object, not {type(result)}.\"\n )\n\n return result\n\n def _has_permissions(self, ctx: \"Context\") -> Permissions:\n _perms: Optional[Permissions] = getattr(\n self.command, \"__has_permissions__\", None\n )\n\n if _perms is None:\n return Permissions(0)\n\n if (\n isinstance(ctx.user, Member) and\n Permissions.administrator in ctx.user.resolved_permissions\n ):\n return Permissions(0)\n\n missing = Permissions(sum([\n flag.value for flag in _perms\n if flag not in ctx.app_permissions\n ]))\n\n return missing\n\n def _bot_has_permissions(self, ctx: \"Context\") -> Permissions:\n _perms: Optional[Permissions] = getattr(\n self.command, \"__bot_has_permissions__\", None\n )\n\n if _perms is None:\n return Permissions(0)\n if Permissions.administrator in ctx.app_permissions:\n return Permissions(0)\n\n missing = Permissions(sum([\n flag.value for flag in _perms\n if flag not in ctx.app_permissions\n ]))\n\n return missing\n\n async def _command_checks(self, ctx: \"Context\") -> bool:\n _checks: list[Callable] = getattr(\n self.command, \"__checks__\", []\n )\n\n for g in _checks:\n if inspect.iscoroutinefunction(g):\n result = await g(ctx)\n else:\n result = g(ctx)\n\n if result is not True:\n raise CheckFailed(f\"Check {g.__name__} failed.\")\n\n return True\n\n async def run(self, context: \"Context\", *args, **kwargs) -> BaseResponse:\n \"\"\"\n Runs the command.\n\n Parameters\n ----------\n context: `Context`\n The context of the command.\n\n Returns\n -------\n `BaseResponse`\n The return type of the command, used by backend.py (Quart)\n\n Raises\n ------\n `UserMissingPermissions`\n User that ran the command is missing permissions.\n `BotMissingPermissions`\n Bot is missing permissions.\n \"\"\"\n # Check user permissions\n perms_user = self._has_permissions(context)\n if perms_user != Permissions(0):\n raise UserMissingPermissions(perms_user)\n\n # Check bot permissions\n perms_bot = self._bot_has_permissions(context)\n if perms_bot != Permissions(0):\n raise BotMissingPermissions(perms_bot)\n\n # Check custom checks\n await self._command_checks(context)\n\n if self.cog is not None:\n return await self.command(self.cog, context, *args, **kwargs)\n else:\n return await self.command(context, *args, **kwargs)\n\n async def run_autocomplete(\n self,\n context: \"Context\",\n name: str,\n current: str\n ) -> dict:\n \"\"\"\n Runs the autocomplete\n\n Parameters\n ----------\n context: `Context`\n Context object for the command\n name: `str`\n Name of the option\n current: `str`\n Current value of the option\n\n Returns\n -------\n `dict`\n The return type of the command, used by backend.py (Quart)\n\n Raises\n ------\n `TypeError`\n Autocomplete must return an AutocompleteResponse object\n \"\"\"\n if self.cog is not None:\n result = await self.list_autocompletes[name](self.cog, context, current)\n else:\n result = await self.list_autocompletes[name](context, current)\n\n if isinstance(result, AutocompleteResponse):\n return result.to_dict()\n raise TypeError(\"Autocomplete must return an AutocompleteResponse object.\")\n\n def _find_option(self, name: str) -> Optional[dict]:\n return next((g for g in self.options if g[\"name\"] == name), None)\n\n def to_dict(self) -> dict:\n \"\"\"\n Converts the command to a dict.\n\n Returns\n -------\n `dict`\n The dict of the command.\n \"\"\"\n _extra_locale = getattr(self.command, \"__locales__\", {})\n _extra_params = getattr(self.command, \"__describe_params__\", {})\n _extra_choices = getattr(self.command, \"__choices_params__\", {})\n _default_permissions = getattr(self.command, \"__default_permissions__\", None)\n\n # Types\n _extra_locale: dict[LocaleTypes, list[LocaleContainer]]\n\n data = {\n \"type\": self.type,\n \"name\": self.name,\n \"description\": self.description,\n \"options\": self.options,\n \"default_permission\": True,\n \"dm_permission\": getattr(self.command, \"__dm_permission__\", True),\n \"nsfw\": getattr(self.command, \"__nsfw__\", False),\n \"name_localizations\": {},\n \"description_localizations\": {},\n }\n\n for key, value in _extra_locale.items():\n for loc in value:\n if loc.key == \"_\":\n data[\"name_localizations\"][key] = loc.name\n data[\"description_localizations\"][key] = loc.description\n continue\n\n opt = self._find_option(loc.key)\n if not opt:\n _log.warn(\n f\"{self.name} -> {loc.key}: \"\n \"Option not found in command, skipping...\"\n )\n continue\n\n opt[\"name_localizations\"][key] = loc.name\n opt[\"description_localizations\"][key] = loc.description\n\n if _default_permissions:\n data[\"default_member_permissions\"] = _default_permissions\n\n for key, value in _extra_params.items():\n opt = self._find_option(key)\n if not opt:\n continue\n\n opt[\"description\"] = value\n\n for key, value in _extra_choices.items():\n opt = self._find_option(key)\n if not opt:\n continue\n\n opt[\"choices\"] = [\n {\"name\": v, \"value\": k}\n for k, v in value.items()\n ]\n\n return data\n\n def autocomplete(self, name: str):\n \"\"\"\n Decorator to set an option as an autocomplete.\n\n The function must at the end, return a `Response.send_autocomplete()` object.\n\n Example usage\n\n .. code-block:: python\n\n @commands.command()\n async def ping(ctx, options: str):\n await ctx.send(f\"You chose {options}\")\n\n @ping.autocomplete(\"options\")\n async def search_autocomplete(ctx, current: str):\n return ctx.response.send_autocomplete({\n \"key\": \"Value shown to user\",\n \"feeling_lucky_tm\": \"I'm feeling lucky!\"\n })\n\n Parameters\n ----------\n name: `str`\n Name of the option to set as an autocomplete.\n \"\"\"\n def wrapper(func):\n find_option = next((\n option for option in self.options\n if option[\"name\"] == name\n ), None)\n\n if not find_option:\n raise ValueError(f\"Option {name} in command {self.name} not found.\")\n find_option[\"autocomplete\"] = True\n self.list_autocompletes[name] = func\n return func\n\n return wrapper" }, { "identifier": "Interaction", "path": "discord_http/commands.py", "snippet": "class Interaction:\n def __init__(\n self,\n func: Callable,\n custom_id: str,\n *,\n regex: bool = False\n ):\n self.func = func\n self.custom_id = custom_id\n self.cog: Optional[\"Cog\"] = None\n self.is_regex: bool = regex\n\n def __repr__(self) -> str:\n return (\n f\"<Interaction custom_id='{self.custom_id}' \"\n f\"is_regex={self.is_regex}>\"\n )\n\n async def run(self, context: \"Context\") -> BaseResponse:\n \"\"\"\n Runs the interaction.\n\n Parameters\n ----------\n context: `Context`\n The context of the interaction.\n\n Returns\n -------\n `BaseResponse`\n The return type of the interaction, used by backend.py (Quart)\n\n Raises\n ------\n `TypeError`\n Interaction must be a Response object\n \"\"\"\n if self.cog is not None:\n result = await self.func(self.cog, context)\n else:\n result = await self.func(context)\n\n if not isinstance(result, BaseResponse):\n raise TypeError(\"Interaction must be a Response object\")\n\n return result" }, { "identifier": "Listener", "path": "discord_http/commands.py", "snippet": "class Listener:\n def __init__(self, name: str, coro: Callable):\n self.name = name\n self.coro = coro\n self.cog: Optional[\"Cog\"] = None\n\n def __repr__(self) -> str:\n return f\"<Listener name='{self.name}'>\"\n\n async def run(self, *args, **kwargs):\n \"\"\" Runs the listener \"\"\"\n if self.cog is not None:\n await self.coro(self.cog, *args, **kwargs)\n else:\n await self.coro(*args, **kwargs)" }, { "identifier": "SubGroup", "path": "discord_http/commands.py", "snippet": "class SubGroup(Command):\n def __init__(\n self,\n *,\n name: str,\n description: Optional[str] = None,\n guild_ids: Optional[list[Union[utils.Snowflake, int]]] = None\n ):\n self.name = name\n self.description = description or \"...\" # Only used to make Discord happy\n self.guild_ids: list[Union[utils.Snowflake, int]] = guild_ids or []\n self.type = int(ApplicationCommandType.chat_input)\n self.cog: Optional[\"Cog\"] = None\n self.subcommands: Dict[str, Union[SubCommand, SubGroup]] = {}\n\n def __repr__(self) -> str:\n _subs = [g for g in self.subcommands.values()]\n return f\"<SubGroup name='{self.name}', subcommands={_subs}>\"\n\n def command(\n self,\n name: Optional[str] = None,\n description: Optional[str] = None,\n guild_ids: Optional[list[Union[utils.Snowflake, int]]] = None,\n ):\n \"\"\"\n Decorator to add a subcommand to a subcommand group\n\n Parameters\n ----------\n name: `Optional[str]`\n Name of the command (defaults to the function name)\n description: `Optional[str]`\n Description of the command (defaults to the function docstring)\n guild_ids: `Optional[list[Union[utils.Snowflake, int]]]`\n List of guild IDs to register the command in\n \"\"\"\n def decorator(func):\n subcommand = SubCommand(\n func,\n name=name or func.__name__,\n description=description,\n guild_ids=guild_ids,\n )\n self.subcommands[subcommand.name] = subcommand\n return subcommand\n return decorator\n\n def group(self, name: Optional[str] = None):\n \"\"\"\n Decorator to add a subcommand group to a subcommand group\n\n Parameters\n ----------\n name: `Optional[str]`\n Name of the subcommand group (defaults to the function name)\n \"\"\"\n def decorator(func):\n subgroup = SubGroup(name=name or func.__name__)\n self.subcommands[subgroup.name] = subgroup\n return subgroup\n return decorator\n\n def add_group(self, name: str) -> \"SubGroup\":\n \"\"\"\n Adds a subcommand group to a subcommand group\n\n Parameters\n ----------\n name: `str`\n Name of the subcommand group\n\n Returns\n -------\n `SubGroup`\n The subcommand group\n \"\"\"\n subgroup = SubGroup(name=name)\n self.subcommands[subgroup.name] = subgroup\n return subgroup\n\n @property\n def options(self) -> list[dict]:\n \"\"\" `list[dict]`: Returns the options of the subcommand group \"\"\"\n options = []\n for cmd in self.subcommands.values():\n data = cmd.to_dict()\n if isinstance(cmd, SubGroup):\n data[\"type\"] = int(CommandOptionType.sub_command_group)\n else:\n data[\"type\"] = int(CommandOptionType.sub_command)\n options.append(data)\n return options" }, { "identifier": "InteractionType", "path": "discord_http/enums.py", "snippet": "class InteractionType(Enum):\n ping = 1\n application_command = 2\n message_component = 3\n application_command_autocomplete = 4\n modal_submit = 5" }, { "identifier": "CheckFailed", "path": "discord_http/errors.py", "snippet": "class CheckFailed(DiscordException):\n \"\"\" Raised whenever a check fails \"\"\"\n pass" }, { "identifier": "BaseResponse", "path": "discord_http/response.py", "snippet": "class BaseResponse:\n def __init__(self):\n pass\n\n @property\n def content_type(self) -> str:\n \"\"\" `str`: Returns the content type of the response \"\"\"\n multidata = MultipartData()\n return multidata.content_type\n\n def to_dict(self) -> dict:\n \"\"\" Default method to convert the response to a `dict` \"\"\"\n raise NotImplementedError\n\n def to_multipart(self) -> bytes:\n \"\"\" Default method to convert the response to a `bytes` \"\"\"\n raise NotImplementedError" }, { "identifier": "Ping", "path": "discord_http/response.py", "snippet": "class Ping:\n def __init__(\n self,\n *,\n state: \"DiscordAPI\",\n data: dict\n ):\n self._state = state\n self._raw_user = data[\"user\"]\n\n self.id: int = int(data[\"id\"])\n self.application_id: int = int(data[\"application_id\"])\n self.version: int = int(data[\"version\"])\n\n def __repr__(self) -> str:\n return f\"<Ping application_id={self.application_id} user='{self.user}'>\"\n\n def __int__(self) -> int:\n return self.id\n\n @property\n def user(self) -> \"User\":\n \"\"\" `User`: Returns the user object of the bot \"\"\"\n from .user import User\n return User(state=self._state, data=self._raw_user)" }, { "identifier": "MessageResponse", "path": "discord_http/response.py", "snippet": "class MessageResponse(BaseResponse):\n def __init__(\n self,\n content: Optional[str] = MISSING,\n *,\n file: Optional[File] = MISSING,\n files: Optional[list[File]] = MISSING,\n embed: Optional[Embed] = MISSING,\n embeds: Optional[list[Embed]] = MISSING,\n attachment: Optional[File] = MISSING,\n attachments: Optional[list[File]] = MISSING,\n view: Optional[View] = MISSING,\n tts: Optional[bool] = False,\n allowed_mentions: Optional[AllowedMentions] = MISSING,\n message_reference: Optional[\"MessageReference\"] = MISSING,\n type: Union[ResponseType, int] = 4,\n ephemeral: Optional[bool] = False,\n ):\n self.content = content\n self.files = files\n self.embeds = embeds\n self.attachments = attachments\n self.ephemeral = ephemeral\n self.view = view\n self.tts = tts\n self.type = type\n self.allowed_mentions = allowed_mentions\n self.message_reference = message_reference\n\n if file is not MISSING and files is not MISSING:\n raise TypeError(\"Cannot pass both file and files\")\n if file is not MISSING:\n self.files = [file]\n\n if embed is not MISSING and embeds is not MISSING:\n raise TypeError(\"Cannot pass both embed and embeds\")\n if embed is not MISSING:\n if embed is None:\n self.embeds = []\n else:\n self.embeds = [embed]\n\n if attachment is not MISSING and attachments is not MISSING:\n raise TypeError(\"Cannot pass both attachment and attachments\")\n if attachment is not MISSING:\n if attachment is None:\n self.attachments = []\n else:\n self.attachments = [attachment]\n\n if self.view is not MISSING and self.view is None:\n self.view = View()\n\n if self.attachments is not MISSING:\n self.files = (\n [a for a in self.attachments if isinstance(a, File)]\n if self.attachments is not None else None\n )\n\n def to_dict(self, is_request: bool = False) -> dict:\n \"\"\"\n The JSON data that is sent to Discord.\n\n Parameters\n ----------\n is_request: `bool`\n Whether the data is being sent to Discord or not.\n\n Returns\n -------\n `dict`\n The JSON data that can either be sent\n to Discord or forwarded to a new parser\n \"\"\"\n output: dict[str, Any] = {\n \"flags\": (\n MessageFlags.ephemeral.value\n if self.ephemeral else 0\n )\n }\n\n if self.content is not MISSING:\n output[\"content\"] = self.content\n\n if self.tts:\n output[\"tts\"] = self.tts\n\n if self.message_reference is not MISSING:\n output[\"message_reference\"] = self.message_reference.to_dict()\n\n if self.embeds is not MISSING:\n output[\"embeds\"] = [\n embed.to_dict() for embed in self.embeds # type: ignore\n if isinstance(embed, Embed)\n ]\n\n if self.view is not MISSING:\n output[\"components\"] = self.view.to_dict()\n\n if self.allowed_mentions is not MISSING:\n output[\"allowed_mentions\"] = self.allowed_mentions.to_dict()\n\n if self.attachments is not MISSING:\n if self.attachments is None:\n output[\"attachments\"] = []\n else:\n _index = 0\n _file_payload = []\n for a in self.attachments:\n if not isinstance(a, File):\n continue\n _file_payload.append(a.to_dict(_index))\n _index += 1\n output[\"attachments\"] = _file_payload\n\n if is_request:\n return output\n return {\"type\": int(self.type), \"data\": output}\n\n def to_multipart(self, is_request: bool = False) -> bytes:\n \"\"\"\n The multipart data that is sent to Discord.\n\n Parameters\n ----------\n is_request: `bool`\n Whether the data is being sent to Discord or not.\n\n Returns\n -------\n `bytes`\n The multipart data that can either be sent\n \"\"\"\n multidata = MultipartData()\n\n if isinstance(self.files, list):\n for i, file in enumerate(self.files):\n multidata.attach(\n f\"files[{i}]\",\n file, filename=file.filename # type: ignore\n )\n\n multidata.attach(\n \"payload_json\",\n self.to_dict(is_request=is_request)\n )\n\n return multidata.finish()" } ]
import asyncio import logging import signal from datetime import datetime from hypercorn.asyncio import serve from hypercorn.config import Config as HyperConfig from nacl.exceptions import BadSignatureError from nacl.signing import VerifyKey from quart import Quart, request, abort from quart import Response as QuartResponse from quart.logging import default_handler from quart.utils import MustReloadError, restart from typing import Optional, Any, Union, TYPE_CHECKING from .commands import Command, Interaction, Listener, SubGroup from .enums import InteractionType from .errors import CheckFailed from .response import BaseResponse, Ping, MessageResponse from .client import Client from .context import Context
6,985
"task": task }) class DiscordHTTP(Quart): def __init__(self, *, client: "Client"): """ This serves as the fundemental HTTP server for Discord Interactions We recommend to not touch this class, unless you know what you're doing """ self.uptime: datetime = datetime.now() self.bot: "Client" = client self.loop = self.bot.loop self.debug_events = self.bot.debug_events self._cog_commands: dict[str, Command] = {} self._cog_interactions: dict[str, Interaction] = {} self._cog_listeners: list[Listener] = [] super().__init__(__name__) # Remove Quart's default logging handler _quart_log = logging.getLogger("quart.app") _quart_log.removeHandler(default_handler) _quart_log.setLevel(logging.CRITICAL) async def validate_request(self) -> None: """ Used to validate requests sent by Discord Webhooks """ if not self.bot.public_key: return abort(401, "invalid public key") verify_key = VerifyKey(bytes.fromhex(self.bot.public_key)) signature: str = request.headers.get("X-Signature-Ed25519", "") timestamp: str = request.headers.get("X-Signature-Timestamp", "") try: data = await request.data body = data.decode("utf-8") verify_key.verify( f"{timestamp}{body}".encode(), bytes.fromhex(signature) ) except BadSignatureError: abort(401, "invalid request signature") except Exception: abort(400, "invalid request body") def error_messages( self, ctx: "Context", e: Exception ) -> Optional[MessageResponse]: """ Used to return error messages to Discord Parameters ---------- ctx: `Context` The context of the command e: `Exception` The exception that was raised Returns ------- `Optional[MessageResponse]` The message response provided by the library error handler """ if isinstance(e, CheckFailed): return ctx.response.send_message( content=str(e), ephemeral=True ) def _dig_subcommand( self, cmd: Union[Command, SubGroup], data: dict ) -> tuple[Optional[Command], list[dict]]: """ Used to dig through subcommands to execute correct command/autocomplete """ data_options: list[dict] = data["data"].get("options", []) while isinstance(cmd, SubGroup): find_next_step = next(( g for g in data_options if g.get("name", None) and not g.get("value", None) ), None) if not find_next_step: return abort(400, "invalid command") cmd = cmd.subcommands.get(find_next_step["name"], None) # type: ignore if not cmd: _log.warn( f"Unhandled subcommand: {find_next_step['name']} " "(not found in local command list)" ) return abort(404, "command not found") data_options = find_next_step.get("options", []) return cmd, data_options async def _index_interaction(self) -> Union[BaseResponse, QuartResponse, dict]: """ The main function to handle all HTTP requests sent by Discord Please do not touch this function, unless you know what you're doing """ await self.validate_request() data = await request.json if self.debug_events: self.bot.dispatch("raw_interaction", data) context = self.bot._context(self.bot, data) data_type = data.get("type", -1) match data_type: case InteractionType.ping:
if TYPE_CHECKING: _log = logging.getLogger(__name__) __all__ = ( "DiscordHTTP", ) def _cancel_all_tasks(loop: asyncio.AbstractEventLoop) -> None: """ Used by Quart to cancel all tasks on shutdown. """ tasks = [ task for task in asyncio.all_tasks(loop) if not task.done() ] if not tasks: return for task in list(tasks): task.cancel() if task.get_coro().__name__ == "_windows_signal_support": tasks.remove(task) loop.run_until_complete( asyncio.gather(*tasks, return_exceptions=True) ) for task in tasks: if not task.cancelled() and task.exception() is not None: loop.call_exception_handler({ "message": "unhandled exception during shutdown", "exception": task.exception(), "task": task }) class DiscordHTTP(Quart): def __init__(self, *, client: "Client"): """ This serves as the fundemental HTTP server for Discord Interactions We recommend to not touch this class, unless you know what you're doing """ self.uptime: datetime = datetime.now() self.bot: "Client" = client self.loop = self.bot.loop self.debug_events = self.bot.debug_events self._cog_commands: dict[str, Command] = {} self._cog_interactions: dict[str, Interaction] = {} self._cog_listeners: list[Listener] = [] super().__init__(__name__) # Remove Quart's default logging handler _quart_log = logging.getLogger("quart.app") _quart_log.removeHandler(default_handler) _quart_log.setLevel(logging.CRITICAL) async def validate_request(self) -> None: """ Used to validate requests sent by Discord Webhooks """ if not self.bot.public_key: return abort(401, "invalid public key") verify_key = VerifyKey(bytes.fromhex(self.bot.public_key)) signature: str = request.headers.get("X-Signature-Ed25519", "") timestamp: str = request.headers.get("X-Signature-Timestamp", "") try: data = await request.data body = data.decode("utf-8") verify_key.verify( f"{timestamp}{body}".encode(), bytes.fromhex(signature) ) except BadSignatureError: abort(401, "invalid request signature") except Exception: abort(400, "invalid request body") def error_messages( self, ctx: "Context", e: Exception ) -> Optional[MessageResponse]: """ Used to return error messages to Discord Parameters ---------- ctx: `Context` The context of the command e: `Exception` The exception that was raised Returns ------- `Optional[MessageResponse]` The message response provided by the library error handler """ if isinstance(e, CheckFailed): return ctx.response.send_message( content=str(e), ephemeral=True ) def _dig_subcommand( self, cmd: Union[Command, SubGroup], data: dict ) -> tuple[Optional[Command], list[dict]]: """ Used to dig through subcommands to execute correct command/autocomplete """ data_options: list[dict] = data["data"].get("options", []) while isinstance(cmd, SubGroup): find_next_step = next(( g for g in data_options if g.get("name", None) and not g.get("value", None) ), None) if not find_next_step: return abort(400, "invalid command") cmd = cmd.subcommands.get(find_next_step["name"], None) # type: ignore if not cmd: _log.warn( f"Unhandled subcommand: {find_next_step['name']} " "(not found in local command list)" ) return abort(404, "command not found") data_options = find_next_step.get("options", []) return cmd, data_options async def _index_interaction(self) -> Union[BaseResponse, QuartResponse, dict]: """ The main function to handle all HTTP requests sent by Discord Please do not touch this function, unless you know what you're doing """ await self.validate_request() data = await request.json if self.debug_events: self.bot.dispatch("raw_interaction", data) context = self.bot._context(self.bot, data) data_type = data.get("type", -1) match data_type: case InteractionType.ping:
_ping = Ping(state=self.bot.state, data=data)
7
2023-11-14 12:50:42+00:00
8k
Ganymede-Bio/bio-curve-fit
tests/test_four_pl_logistic.py
[ { "identifier": "FourPLLogistic", "path": "bio_curve_fit/logistic.py", "snippet": "class FourPLLogistic(BaseEstimator, RegressorMixin, BaseStandardCurve):\n def __init__(\n self,\n A=None,\n B=None,\n C=None,\n D=None,\n LLOD=None,\n ULOD=None,\n ULOD_y=None,\n LLOD_y=None,\n slope_direction_positive: Optional[bool] = None,\n slope_guess_num_points_to_use: int = 3,\n ):\n # A is the minimum asymptote\n self.A_ = A\n # B is the Hill's slope\n self.B_ = B\n # C is the inflection point (EC50)\n self.C_ = C\n # D is the maximum asymptote\n self.D_ = D\n self.cov_ = None\n # Initial guesses used when fitting the curve\n self.guess_A_ = None\n self.guess_B_ = None\n self.guess_C_ = None\n self.guess_D_ = None\n # Estimated Limits of Detection for response signal\n self.LLOD_y_ = LLOD_y\n self.ULOD_y_ = ULOD_y\n # Estimated Limits of Detection for concentration\n self.LLOD_ = LLOD\n self.ULOD_ = ULOD\n self.slope_direction_positive = slope_direction_positive\n self.slope_guess_num_points_to_use = slope_guess_num_points_to_use\n\n def check_fit(self):\n if self.A_ is None or self.B_ is None or self.C_ is None or self.D_ is None:\n raise Exception(\n \"Model is not fit yet. Please call 'fit' with appropriate data\"\n \" or initialize the model object with non-null parameters.\"\n )\n\n def get_params(self, deep=False):\n if deep:\n return {\n \"A\": self.A_,\n \"B\": self.B_,\n \"C\": self.C_,\n \"D\": self.D_,\n \"LLOD\": self.LLOD_,\n \"ULOD\": self.ULOD_,\n \"ULOD_y\": self.ULOD_y_,\n \"LLOD_y\": self.LLOD_y_,\n }\n else:\n return {\n \"A\": self.A_,\n \"B\": self.B_,\n \"C\": self.C_,\n \"D\": self.D_,\n }\n\n @staticmethod\n def four_param_logistic(x, A, B, C, D):\n \"\"\"4 Parameter Logistic (4PL) model.\"\"\"\n\n # For addressing fractional powers of negative numbers\n # https://stackoverflow.com/questions/45384602/numpy-runtimewarning-invalid-value-encountered-in-power\n z = np.sign(x / C) * np.abs(x / C) ** B\n\n return ((A - D) / (1.0 + z)) + D\n\n @staticmethod\n def inverse_variance_weight_function(y_data):\n \"\"\"\n Function for weighting residuals by 1/y^2 in `scipy.optimize.curve_fit`.\n \"\"\"\n # To avoid division by zero, add a small constant to y_data.\n return y_data + np.finfo(float).eps\n\n def _calculate_lod_replicate_variance(\n self,\n x_data,\n y_data,\n lower_std_dev_multiplier: float = 2.5,\n upper_std_dev_multiplier: float = 0.0,\n ):\n \"\"\"\n Calculate the Lower and Upper Limits of Detection (LLOD and ULOD) using variance\n of replicate max and min concentration standards. It ignore zero concentration\n standards. If there are no replicates, the standard deviation zero\n Possible TODO: sometimes a minimum variance is used in other software.\n\n In the notation below we assume the response signal is the Y-axis and the\n concentration is the X-axis.\n\n Example: Two replicates of the lowest concentration standard (conc=1.0 pg/ml)\n have standard deviation of 100 across their responses. LLOD will be `model.predict\n (1.0) + 100 * 2.5` where 2.5 is the `lower_std_dev_multiplier` parameter.\n\n :param bottom_std_dev: Standard deviation at the bottom calibration point.\n :param top_std_dev: Standard deviation at the top calibration point.\n :param std_dev_multiplier: Multiplier for the standard deviations (default 2.5).\n :return: Pair of tuples containing the LLOD and ULOD, and the corresponding x-values.\n \"\"\"\n\n x_indexed_y_data = pd.DataFrame({\"x\": x_data, \"y\": y_data}).set_index(\"x\")\n # remove zeros from x_data\n x_indexed_y_data = x_indexed_y_data[x_indexed_y_data.index > 0]\n x_min = np.min(x_indexed_y_data.index)\n x_max = np.max(x_indexed_y_data.index)\n bottom_std_dev = x_indexed_y_data.loc[x_min, \"y\"].std()\n top_std_dev = x_indexed_y_data.loc[x_max, \"y\"].std()\n\n # Calculate LLOD and ULOD of RESPONSE SIGNAL\n llod = self.predict(x_min) + (lower_std_dev_multiplier * bottom_std_dev)\n ulod = self.predict(x_max) - (upper_std_dev_multiplier * top_std_dev)\n\n # Calculate the limits of detection for CONCENTRATION\n llod_x = self.predict_inverse(llod)\n ulod_x = self.predict_inverse(ulod)\n return llod_x, ulod_x, llod, ulod\n\n def fit(self, x_data, y_data, weight_func=None, LOD_func=None, **kwargs):\n \"\"\"\n Fit the 4 Parameter Logistic (4PL) model.\n\n x_data: x data points\n y_data: y data points\n weight_func: optional Function that calculates weights from y_data. This is\n passed into the `curve_fit` function where the function minimized is `sum\n ((r / weight_func(y_data)) ** 2)` where r is the residuals.\n Thus for a typical 1/y^2 weighting, `weight_func` should be `lambda\n y_data: y_data`\n \"\"\"\n x_data = np.float64(x_data)\n y_data = np.float64(y_data)\n df_data = pd.DataFrame({\"x\": x_data, \"y\": y_data})\n df_data.sort_values(by=\"x\", inplace=True)\n\n if LOD_func is None:\n # default LOD_func is to use replicate variance\n LOD_func = self._calculate_lod_replicate_variance\n\n absolute_sigma = False\n weights = None\n if weight_func is not None:\n weights = weight_func(y_data)\n absolute_sigma = True\n\n # Initial guess for the parameters\n self.guess_A_ = np.min(y_data) # type: ignore\n if self.slope_direction_positive is not None:\n self.guess_B_ = 1.0 if self.slope_direction_positive else -1.0\n else:\n # type: ignore\n self.guess_B_ = (\n 1.0\n if np.mean(\n df_data.iloc[: np.minimum(self.slope_guess_num_points_to_use, len(df_data))][ # type: ignore\n \"y\"\n ]\n )\n < np.mean(\n df_data.iloc[-np.minimum(self.slope_guess_num_points_to_use, len(df_data)) :][ # type: ignore\n \"y\"\n ]\n )\n else -1.0\n )\n self.guess_C_ = np.mean(x_data) # type: ignore\n self.guess_D_ = np.max(y_data) # type: ignore\n initial_guess = [self.guess_A_, self.guess_B_, self.guess_C_, self.guess_D_]\n\n curve_fit_kwargs = {\n \"f\": self.four_param_logistic,\n \"xdata\": x_data,\n \"ydata\": y_data,\n \"p0\": initial_guess,\n \"maxfev\": 10000,\n \"sigma\": weights,\n \"absolute_sigma\": absolute_sigma,\n }\n\n # overwrite parameters with any kwargs passed in\n for k, v in kwargs.items():\n curve_fit_kwargs[k] = v\n\n # Perform the curve fit\n params, cov = curve_fit(**curve_fit_kwargs)\n self.A_, self.B_, self.C_, self.D_ = params\n self.cov_ = cov\n self.LLOD_, self.ULOD_, self.LLOD_y_, self.ULOD_y_ = LOD_func(x_data, y_data)\n return self\n\n @staticmethod\n def jacobian(x_data, A, B, C, D):\n \"\"\"\n Jacobian matrix of the 4PL function with respect to A, B, C, D.\n \"\"\"\n z = (x_data / C) ** B\n\n partial_A = 1.0 / (1.0 + z)\n partial_B = -(z * (A - D) * np.log(np.maximum(x_data / C, np.finfo(float).eps))) / ( # type: ignore\n (1.0 + z) ** 2\n )\n partial_C = (B * z * (A - D)) / (C * (1.0 + z) ** 2)\n partial_D = 1.0 - 1.0 / (1.0 + z)\n\n # Jacobian matrix\n J = np.array([partial_A, partial_B, partial_C, partial_D]).T\n return J\n\n def predict_confidence_band(self, x_data):\n \"\"\"\n Predict confidence bands of data points.\n\n See:\n https://www.graphpad.com/guides/prism/latest/curve-fitting/reg_graphing_confidence_and_predic.htm\n https://www.graphpad.com/guides/prism/latest/curve-fitting/reg_how_confidence_and_prediction_.htm\n https://stats.stackexchange.com/questions/15423/how-to-compute-prediction-bands-for-non-linear-regression\n\n \"\"\"\n if self.cov_ is None:\n raise Exception(\n \"Covariance matrix is not available. Please call 'fit' with appropriate data.\"\n )\n J = self.jacobian(x_data, self.A_, self.B_, self.C_, self.D_)\n pred_var = np.sum((J @ self.cov_) * J, axis=1)\n\n return np.sqrt(pred_var)\n\n def predict_prediction_band(self, x_data, y_data):\n \"\"\"\n Predict prediction bands of data points.\n TODO: still need to double-check the math here.\n \"\"\"\n ss = (y_data - self.predict(x_data)) ** 2\n df = len(x_data) - 4 # 4 parameters\n\n return np.sqrt(self.predict_confidence_band(x_data) ** 2 * ss / df)\n\n def predict_inverse(self, y):\n \"\"\"Inverse 4 Parameter Logistic (4PL) model.\n\n Used for calculating the x-value for a given y-value.\n Usually, standard curves are fitted using concentration as x-values and response as\n y-values, so that variance in response is modeled for a given known concentration.\n But for samples of unknown concentration, we want to get the concentration as given\n response, which is what this function does.\n\n \"\"\"\n self.check_fit()\n z = ((self.A_ - self.D_) / (y - self.D_)) - 1 # type: ignore\n\n # For addressing fractional powers of negative numbers, np.sign(z) * np.abs(z) used rather than z\n # https://stackoverflow.com/questions/45384602/numpy-runtimewarning-invalid-value-encountered-in-power\n return self.C_ * (np.sign(z) * np.abs(z) ** (1 / self.B_)) # type: ignore\n\n def predict(self, x_data):\n self.check_fit()\n return self.four_param_logistic(x_data, self.A_, self.B_, self.C_, self.D_)" }, { "identifier": "plot_standard_curve", "path": "bio_curve_fit/plotting.py", "snippet": "def plot_standard_curve(\n x_data,\n y_data,\n fitted_model: BaseStandardCurve,\n title=\"Standard Curve Fit\",\n x_label=\"Concentration\",\n y_label=\"Response\",\n show_plot: bool = False,\n) -> bytes:\n \"\"\"\n Generate a plot of the data and the fitted curve.\n \"\"\"\n # Plot the data and the fitted curve\n # set x-axis to log scale\n # set scales to log\n plt.xscale(\"log\")\n plt.yscale(\"log\")\n data = pd.DataFrame({\"x\": x_data, \"y\": y_data})\n # remove zeros from x_data\n filtered_data = data[data[\"x\"] > 0]\n\n # Plot the fitted curve\n epsilon = 0.01\n x_min = np.log10(max(min(x_data), epsilon))\n x_max = max(x_data) * 2\n x = np.logspace(x_min, np.log10(x_max), 100) # type: ignore\n # Generate y-data based on the fitted parameters\n y_pred = fitted_model.predict(x)\n\n plt.plot(x, y_pred, label=\"Fitted curve\", color=\"red\")\n plt.scatter(filtered_data[\"x\"], filtered_data[\"y\"], label=\"Data\", s=12)\n formatter = ScalarFormatter()\n formatter.set_scientific(False)\n plt.gca().xaxis.set_major_formatter(formatter)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.title(title)\n\n # set horizontal and vertical lines for ULOD and LLOD\n llod_response, ulod_response = fitted_model.LLOD_y_, fitted_model.ULOD_y_\n plt.axhline(llod_response, color=\"red\", linestyle=\"--\", label=\"LLOD\") # type: ignore\n plt.axhline(ulod_response, color=\"blue\", linestyle=\"--\", label=\"ULOD\") # type: ignore\n plt.legend()\n if show_plot:\n plt.show()\n # Save the plot to a BytesIO object\n buf = io.BytesIO()\n plt.savefig(buf, format=\"png\")\n plt.clf()\n buf.seek(0)\n return buf.read()" } ]
import numpy as np import pandas as pd import pytest from bio_curve_fit.logistic import FourPLLogistic from bio_curve_fit.plotting import plot_standard_curve
3,685
# set a seed for reproducibility np.random.seed(42) def test_fit_and_plot(): TEST_PARAMS = [1.0, 1.0, 2.0, 3.0] x_data = np.logspace(0.00001, 7, 100, base=np.e) # type: ignore # generate y-data based on the test parameters y_data = FourPLLogistic.four_param_logistic( x_data + np.random.normal(0.0, 0.1 * x_data, len(x_data)), *TEST_PARAMS ) model = FourPLLogistic().fit( x_data, y_data, weight_func=FourPLLogistic.inverse_variance_weight_function ) # model should recover parameters used to generate the data params = list(model.get_params().values()) assert np.isclose(params, TEST_PARAMS, rtol=0.4).all() # type: ignore r2 = model.score(x_data, y_data) assert r2 > 0.995 # test plotting
# set a seed for reproducibility np.random.seed(42) def test_fit_and_plot(): TEST_PARAMS = [1.0, 1.0, 2.0, 3.0] x_data = np.logspace(0.00001, 7, 100, base=np.e) # type: ignore # generate y-data based on the test parameters y_data = FourPLLogistic.four_param_logistic( x_data + np.random.normal(0.0, 0.1 * x_data, len(x_data)), *TEST_PARAMS ) model = FourPLLogistic().fit( x_data, y_data, weight_func=FourPLLogistic.inverse_variance_weight_function ) # model should recover parameters used to generate the data params = list(model.get_params().values()) assert np.isclose(params, TEST_PARAMS, rtol=0.4).all() # type: ignore r2 = model.score(x_data, y_data) assert r2 > 0.995 # test plotting
plot_standard_curve(x_data, y_data, model)
1
2023-11-13 15:06:15+00:00
8k
chziakas/backbone-learn
backbone_learn/backbone/backbone_clustering.py
[ { "identifier": "MIOClustering", "path": "backbone_learn/exact_solvers/mio_clustering.py", "snippet": "class MIOClustering:\n \"\"\"\n Class for solving clustering problems using Mixed-Integer Optimization.\n \"\"\"\n\n def __init__(\n self,\n n_clusters: int = None,\n time_limit: float = 1200,\n ls_pairs_diff_cluster: Optional[List[Tuple[int, int]]] = None,\n ls_pairs_same_cluster: Optional[List[Tuple[int, int]]] = None,\n ):\n self.n_clusters = n_clusters\n self.ls_pairs_diff_cluster = ls_pairs_diff_cluster\n self.ls_pairs_same_cluster = ls_pairs_same_cluster\n self.time_limit = time_limit\n self.model = LpProblem(\"Clustering MIO\", LpMinimize)\n self.z = None # For storing solution for z variables\n self.y = None # For storing solution for y variables\n self.cluster_means = None\n\n @staticmethod\n def euclidean_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.linalg.norm(point1 - point2)\n\n def _initialize_variables(self, num_points: int):\n \"\"\"\n Initialize the decision variables for the optimization problem.\n\n Args:\n num_points (int): The number of data points.\n\n Returns:\n Tuple: A tuple containing the dictionaries of z and y variables.\n \"\"\"\n z = LpVariable.dicts(\n \"z\",\n [\n (i, j, k)\n for i in range(num_points - 1)\n for j in range(i + 1, num_points)\n for k in range(self.n_clusters)\n ],\n 0,\n 1,\n LpBinary,\n )\n\n y = LpVariable.dicts(\n \"y\",\n [(i, k) for i in range(num_points) for k in range(self.n_clusters)],\n 0,\n 1,\n LpBinary,\n )\n\n return z, y\n\n def _calculate_distances_noise(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculate and return the matrix of pairwise distances with added noise.\n\n Args:\n X (np.ndarray): The input feature matrix.\n\n Returns:2\n np.ndarray: The matrix of pairwise distances with noise.\n \"\"\"\n distances = np.linalg.norm(X[:, np.newaxis] - X, axis=2)\n min_dist = np.min(distances[np.nonzero(distances)])\n noise = 0.1 * min_dist * (2 * np.random.rand(X.shape[0], X.shape[0], self.n_clusters) - 1)\n return distances[:, :, np.newaxis] + noise\n\n def _calculate_distances(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Calculate and return the matrix of pairwise distances\n\n Args:\n X (np.ndarray): The input feature matrix.\n\n Returns:2\n np.ndarray: The matrix of pairwise distances with noise.\n \"\"\"\n distances = np.linalg.norm(X[:, np.newaxis] - X, axis=2)\n return np.tile(distances[:, :, np.newaxis], (1, 1, self.n_clusters))\n\n def _add_constraints(self, num_points: int, z: dict, y: dict, coef: np.ndarray, b: int):\n \"\"\"\n Add constraints to the optimization model.\n\n Args:\n num_points (int): The number of data points.\n z (dict): The decision variables representing pair assignments.\n y (dict): The decision variables representing individual assignments.\n coef (np.ndarray): Coefficient matrix for the objective function.\n b (int): Minimum number of points per cluster.\n \"\"\"\n # Objective\n\n z_opt, y_opt = self._initialize_variables(num_points)\n\n if self.ls_pairs_diff_cluster:\n for (i, j) in self.ls_pairs_diff_cluster:\n for k in range(self.n_clusters):\n z_opt[i, j, k].setInitialValue(0)\n z_opt[i, j, k].fixValue()\n\n self.model += lpSum(\n z_opt[i, j, k] * coef[i, j, k]\n for i in range(num_points - 1)\n for j in range(i + 1, num_points)\n for k in range(self.n_clusters)\n )\n\n # Each point is assigned to exactly one cluster\n for i in range(num_points):\n self.model += lpSum(y_opt[i, k] for k in range(self.n_clusters)) == 1\n\n # Each cluster has at least b points\n for k in range(self.n_clusters):\n self.model += lpSum(y_opt[i, k] for i in range(num_points)) >= b\n\n # Relationship between y and z variables\n for i in range(num_points - 1):\n for j in range(i + 1, num_points):\n for k in range(self.n_clusters):\n self.model += z_opt[i, j, k] <= y_opt[i, k]\n self.model += z_opt[i, j, k] <= y_opt[j, k]\n self.model += z_opt[i, j, k] >= y_opt[i, k] + y_opt[j, k] - 1\n\n # Exclusion constraints\n if self.ls_pairs_diff_cluster:\n for (i, j) in self.ls_pairs_diff_cluster:\n for k in range(self.n_clusters):\n self.model += y_opt[i, k] + y_opt[j, k] <= 1\n\n # Inclusion constraints\n if self.ls_pairs_same_cluster:\n for (i, j) in self.ls_pairs_same_cluster:\n for k in range(self.n_clusters):\n self.model += y_opt[i, k] - y_opt[j, k] == 0\n\n def extract_cluster_means(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Extract cluster means after fitting the model.\n\n Args:\n X (np.ndarray): The input feature matrix.\n\n Returns:\n np.ndarray: An array of cluster means.\n \"\"\"\n num_points = len(X)\n # Initialize an array to store means\n cluster_means = np.zeros((self.n_clusters, X.shape[1]))\n\n for k in range(self.n_clusters):\n cluster_points = [] # List to store data points assigned to the current cluster\n for i in range(num_points):\n if self.y[i, k] == 1.0:\n cluster_points.append(X[i])\n\n if cluster_points:\n cluster_means[k] = np.mean(cluster_points, axis=0)\n\n return cluster_means\n\n def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None):\n \"\"\"\n Fit the model to the given data using Mixed-Integer Optimization.\n\n Args:\n X (np.ndarray): The input feature matrix.\n y (Optional[np.ndarray]): The target vector (not used in this model).\n \"\"\"\n num_points = len(X)\n b = int((num_points / self.n_clusters) * 0.1) # Minimum number of points per cluster\n\n coef = self._calculate_distances_noise(X)\n\n self._add_constraints(num_points, self.z, self.y, coef, b)\n\n solver = PULP_CBC_CMD(timeLimit=self.time_limit, warmStart=True)\n\n # Solve the problem\n self.model.solve(solver)\n\n self.y = np.zeros((num_points, self.n_clusters))\n self.z = np.zeros((num_points, num_points, self.n_clusters))\n\n for v in self.model.variables():\n var_value = v.varValue\n var_name = v.name\n if var_name.startswith(\"y_\"):\n # Parse the indices for y\n i, k = (\n var_name.replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\"y_\", \"\")\n .replace(\",\", \"\")\n .split(\"_\")\n )\n i, k = int(i), int(k)\n self.y[i, k] = var_value\n elif var_name.startswith(\"z_\"):\n # Parse the indices for z\n i, j, k = (\n var_name.replace(\"(\", \"\")\n .replace(\")\", \"\")\n .replace(\"z_\", \"\")\n .replace(\",\", \"\")\n .split(\"_\")\n )\n i, j, k = int(i), int(j), int(k)\n self.z[i, j, k] = var_value\n\n # Extract and store cluster means\n self.labels = self._get_cluster_assingments(X.shape[0])\n self.cluster_centers = self._compute_cluster_centers(X)\n self.wcss = self._compute_wcss(X)\n self.silhouette_score = self._compute_silhouette_score(X)\n\n def _get_cluster_assingments(self, n_rows: int) -> np.ndarray:\n \"\"\"\n Predict cluster assignments for new data points based on stored cluster means.\n\n Args:\n new_data (np.ndarray): The new data points for which predictions are to be made.\n\n Returns:\n np.ndarray: An array of cluster assignments for the new data points.\n \"\"\"\n cluster_assignments = np.zeros(n_rows, dtype=int)\n\n for i in range(n_rows):\n cluster_assignments[i] = np.argmax(self.y[i, :]) # np.argmin(distances)\n return cluster_assignments\n\n def _compute_wcss(self, X: np.ndarray) -> float:\n \"\"\"\n Compute the Within-Cluster Sum of Squares (WCSS) for the fitted model.\n\n Args:\n X (np.ndarray): The input feature matrix used for fitting the model.\n\n Returns:\n float: The computed WCSS value.\n\n Raises:\n ValueError: If the model has not been fitted yet or if cluster means are not available.\n \"\"\"\n\n wcss = 0.0\n cluster_labels_pred = self.labels\n\n for cluster_idx in range(self.n_clusters):\n cluster_points = X[cluster_labels_pred == cluster_idx]\n wcss += np.sum((cluster_points - self.cluster_centers[cluster_idx]) ** 2)\n return wcss\n\n def _compute_cluster_centers(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Extract cluster means after fitting the model.\n\n Args:\n X (np.ndarray): The input feature matrix.\n\n Returns:\n np.ndarray: An array of cluster means.\n \"\"\"\n # Initialize an array to store means\n cluster_centers = np.zeros((self.n_clusters, X.shape[1]))\n for i in range(X.shape[0]):\n for k in range(self.n_clusters):\n cluster_points = [] # List to store data points assigned to the current cluster\n if self.labels[i] == k:\n cluster_points.append(X[i, :])\n if cluster_points:\n cluster_centers[k] = np.mean(cluster_points, axis=0)\n\n return cluster_centers\n\n def _compute_silhouette_score(self, X: np.ndarray) -> float:\n \"\"\" \"\"\"\n from sklearn.metrics import silhouette_score\n\n silhouette_avg = silhouette_score(X, self.labels)\n return silhouette_avg\n\n def predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Predict cluster assignments for new data points based on stored cluster means.\n\n Args:\n new_data (np.ndarray): The new data points for which predictions are to be made.\n\n Returns:\n np.ndarray: An array of cluster assignments for the new data points.\n \"\"\"\n\n num_new_points = len(X)\n n_clusters = self.n_clusters\n\n cluster_assignments = np.zeros(num_new_points, dtype=int)\n\n for i in range(num_new_points):\n # Calculate distances between the new data point and cluster means\n distances = [np.linalg.norm(X[i] - self.cluster_centers[k]) for k in range(n_clusters)]\n cluster_assignments[i] = np.argmin(distances)\n return cluster_assignments" }, { "identifier": "KMeansSolver", "path": "backbone_learn/heuristic_solvers/kmeans_solver.py", "snippet": "class KMeansSolver(HeuristicSolverBase):\n \"\"\"\n A heuristic solver that applies KMeans clustering to identify relevant instances.\n \"\"\"\n\n def __init__(self, n_clusters: int = 10, **kwargs) -> None:\n \"\"\"\n Initializes the KMeansHeuristicSolver with a specified number of clusters.\n Args:\n n_clusters (int): The number of clusters to form.\n \"\"\"\n self.n_clusters: int = n_clusters\n self._model = KMeans()\n self.wcss = None\n\n def _compute_cluster_centers(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Extract cluster means after fitting the model.\n\n Args:\n X (np.ndarray): The input feature matrix.\n\n Returns:\n np.ndarray: An array of cluster means.\n \"\"\"\n # Initialize an array to store means\n cluster_centers = np.zeros((self.n_clusters, X.shape[1]))\n for i in range(X.shape[0]):\n for k in range(self.n_clusters):\n cluster_points = [] # List to store data points assigned to the current cluster\n if self._model.labels_[i] == k:\n cluster_points.append(X[i, :])\n if cluster_points:\n cluster_centers[k] = np.mean(cluster_points, axis=0)\n\n return cluster_centers\n\n def fit(\n self,\n X: np.ndarray,\n y: np.ndarray = None,\n init: str = \"k-means++\",\n n_init: int = 10,\n max_iter: int = 300,\n tol: float = 0.0001,\n random_state: int = 0,\n ) -> None:\n \"\"\"\n Applies KMeans clustering to the data with customizable hyperparameters.\n Args:\n X (np.ndarray): Input feature matrix.\n y (np.ndarray): Target vector (not used in clustering).\n init (str): Method for initialization.\n n_init (int): Number of time the k-means algorithm will be run with different centroid seeds.\n max_iter (int): Maximum number of iterations of the k-means algorithm for a single run.\n tol (float): Relative tolerance with regards to Frobenius norm of the difference in the cluster centers.\n random_state (int): Determines random number generation for centroid initialization.\n \"\"\"\n if X.shape[0] < self.n_clusters:\n self.n_clusters = X.shape[0]\n\n self._model.set_params(\n n_clusters=self.n_clusters,\n init=\"random\",\n n_init=n_init,\n max_iter=max_iter,\n tol=tol,\n random_state=random_state,\n )\n self._model.fit(X)\n self.cluster_centers = self._compute_cluster_centers(X)\n self.wcss = self._compute_wcss(X)\n self.silhouette_score = self._compute_silhouette_score(X)\n\n def get_relevant_variables(self) -> List[Tuple[int, int]]:\n \"\"\"\n Identifies tuples of instance indices that are not in the same cluster.\n Returns:\n List of tuples: Each tuple contains indices of instances not in the same cluster.\n \"\"\"\n\n n = len(self._model.labels_)\n grid_x, grid_y = np.meshgrid(np.arange(n), np.arange(n), indexing=\"ij\")\n mask = self._model.labels_[grid_x] != self._model.labels_[grid_y]\n upper_triangle_mask = np.triu(mask, k=1)\n i_indices, j_indices = np.where(upper_triangle_mask)\n different_pairs = [(min(i, j), max(i, j)) for i, j in zip(i_indices, j_indices)]\n return different_pairs\n\n def _compute_wcss(self, X: np.ndarray) -> float:\n \"\"\"\n Mthod to calculate the Within-Cluster Sum of Squares (WCSS).\n\n Args:\n X (np.ndarray): The input feature matrix.\n\n Returns:\n float: The WCSS value.\n \"\"\"\n wcss = 0.0\n cluster_labels_pred = self._model.labels_\n\n for cluster_idx in range(self.n_clusters):\n cluster_points = X[cluster_labels_pred == cluster_idx]\n wcss += np.sum((cluster_points - self.cluster_centers[cluster_idx]) ** 2)\n\n return wcss\n\n def _compute_silhouette_score(self, X: np.ndarray) -> float:\n \"\"\" \"\"\"\n # Check if the number of unique clusters is 1 or equal to the number of samples\n if len(set(self._model.labels_)) == 1 or len(X) == len(set(self._model.labels_)):\n # Silhouette score cannot be computed in this case\n return 0.0\n silhouette_avg = silhouette_score(X, self._model.labels_)\n return silhouette_avg" }, { "identifier": "BackboneUnsupervised", "path": "backbone_learn/backbone/backbone_unsupervised.py", "snippet": "class BackboneUnsupervised(BackboneBase):\n \"\"\"\n Implementation for unsupervised learning specific operations.\n \"\"\"\n\n def preprocessing_backbone(self, X_selected: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform preprocessing specific to unsupervised learning during backbone construction.\n This typically involves transposing the dataset.\n\n Args:\n X_selected (np.ndarray): The selected feature dataset after screen selection.\n\n Returns:\n np.ndarray: The transposed dataset for unsupervised learning.\n \"\"\"\n return X_selected.transpose()\n\n def set_utilities(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Set utilities for unsupervised learning, typically one for each sample.\n\n Args:\n X (np.ndarray): The feature dataset.\n\n Returns:\n np.ndarray: An array of utilities, one for each sample.\n \"\"\"\n return np.ones(X.shape[0])\n\n def utilize_variables(\n self, X_selected: np.ndarray, variables_exact_idx: List[int]\n ) -> np.ndarray:\n \"\"\"\n Utilize selected variables in the dataset after processing the backbone sets in unsupervised learning.\n In unsupervised learning, the entire dataset is often used as is.\n\n Args:\n X_selected (np.ndarray): The selected feature dataset after screen selection.\n variables_exact_idx (List[int]): List of indices for variables selected by the backbone (unused in unsupervised).\n\n Returns:\n np.ndarray: Dataset with all features, as variable selection is typically not performed in unsupervised learning.\n \"\"\"\n if self.heuristic_solver is not None:\n self.exact_solver.ls_pairs_diff_cluster = variables_exact_idx\n return X_selected\n\n def preprocessing_predict(self, X: np.ndarray) -> np.ndarray:\n \"\"\"\n Preprocess the dataset before making predictions in unsupervised learning.\n Typically, the entire dataset is used as is.\n\n Args:\n X (np.ndarray): The input feature matrix.\n\n Returns:\n np.ndarray: The original dataset, as preprocessing is typically not required for predictions in unsupervised learning.\n \"\"\"\n return X\n\n def get_relevant_variables(\n self, feature_idx: List[int], threshold: float = None\n ) -> List[Tuple[int, int]]:\n \"\"\"\n Implements the retrieval of relevant variables for unsupervised learning.\n\n In unsupervised learning, this method identifies pairs of variables that\n are considered relevant based on the learning model used.\n\n Args:\n feature_idx (List[int]): List of feature indices to consider.\n\n Returns:\n List[Tuple[int, int]]: A list of tuples, where each tuple contains a pair of indices\n representing relevant variable pairs.\n \"\"\"\n rel_variables_local = self.heuristic_solver.get_relevant_variables()\n return rel_variables_local\n\n def build_backbone_set(self, backbone_sets) -> List:\n \"\"\"\n Find tuples that are common to all backbone sets\n\n Args:\n backbone_sets (list of list of int): The list of lists of backbone sets\n\n Returns:\n list: A backbone set with the tuples\n \"\"\"\n excluded_pairs = Utils.merge_lists_and_sort(backbone_sets)\n num_points = self.n_samples_backbone\n self.exact_solver.ls_pairs_same_cluster = Utils.generate_index_pairs(\n num_points, excluded_pairs\n )\n\n return Utils.find_common_tuples(backbone_sets)" } ]
from ..exact_solvers.mio_clustering import MIOClustering from ..heuristic_solvers.kmeans_solver import KMeansSolver from .backbone_unsupervised import BackboneUnsupervised
5,072
# Copyright (c) 2023 Vassilis Digalakis Jr, Christos Ziakas # Licensed under the MIT License. class BackboneClustering(BackboneUnsupervised): """ Specific implementation of the Backbone method for clustering. This class uses K-means for heuristic solving and retains MIO optimzer for exact solving. No screen selector is used in this approach, as K-means is considered efficient for feature selection. Inherits from: BackboneBase (ABC): The abstract base class for backbone algorithms. """ def set_solvers(self, n_clusters: int = 10, time_limit: int = 1000): """ Initializes the clustering method with specified components. Args: n_clusters (int, optional): Number of clusters for K-means. Defaults to 10. time_limit (int): Time limit for the optimization process. """ self.screen_selector = None # No screen selector for this clustering approach
# Copyright (c) 2023 Vassilis Digalakis Jr, Christos Ziakas # Licensed under the MIT License. class BackboneClustering(BackboneUnsupervised): """ Specific implementation of the Backbone method for clustering. This class uses K-means for heuristic solving and retains MIO optimzer for exact solving. No screen selector is used in this approach, as K-means is considered efficient for feature selection. Inherits from: BackboneBase (ABC): The abstract base class for backbone algorithms. """ def set_solvers(self, n_clusters: int = 10, time_limit: int = 1000): """ Initializes the clustering method with specified components. Args: n_clusters (int, optional): Number of clusters for K-means. Defaults to 10. time_limit (int): Time limit for the optimization process. """ self.screen_selector = None # No screen selector for this clustering approach
self.heuristic_solver = KMeansSolver(n_clusters=n_clusters)
1
2023-11-18 14:28:12+00:00
8k
newcastleuniversity/DISPEL
dispel/processing/flags.py
[ { "identifier": "EntityType", "path": "dispel/data/core.py", "snippet": "class ReadingSchema:\nclass Evaluation(Epoch):\nclass Session(Epoch):\nclass Reading(FlagMixIn):\n def __init__(\n self,\n *args,\n uuid: str,\n finished: Optional[bool] = None,\n exit_reason: Optional[str] = None,\n user_id: Optional[str] = None,\n **kwargs,\n ):\n def to_dict(self):\n def __init__(\n self,\n *args,\n uuid: Optional[str] = None,\n evaluation_codes: Optional[Iterable[str]] = None,\n **kwargs,\n ):\n def __init__(\n self,\n evaluation: Evaluation,\n session: Optional[Session] = None,\n levels: Optional[Iterable[Level]] = None,\n measure_set: Optional[MeasureSet] = None,\n schema: Optional[ReadingSchema] = None,\n date: Any = None,\n device: Optional[Device] = None,\n ):\n def get_level(self, level_id: Optional[LevelIdType] = None) -> Level:\n def __repr__(self) -> str:\n def __iter__(self) -> Iterable[Tuple[LevelIdType, Level]]:\n def __len__(self) -> int:\n def empty(self) -> bool:\n def levels(self) -> ValuesView[Level]:\n def level_ids(self) -> List[LevelId]:\n def has_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> bool:\n def get_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> RawDataSet:\n def get_measure_set(self, level_id: Optional[LevelIdType] = None) -> MeasureSet:\n def get_merged_measure_set(self) -> MeasureSet:\n def set(self, value, **kwargs):\n def _get_level(self, level: Optional[Union[LevelIdType, Level]] = None) -> Level:\n def _measure_set(\n self,\n value: MeasureSet,\n level: Optional[Union[LevelIdType, Level]] = None,\n ):\n def _measure_value(\n self,\n value: MeasureValue,\n level: Optional[Union[LevelIdType, Level]] = None,\n epoch: Optional[LevelEpoch] = None,\n ):\n def _raw_data_set(\n self,\n value: RawDataSet,\n level: Union[LevelIdType, Level],\n concatenate: bool = False,\n overwrite: bool = False,\n ):\n def _epoch_measure_set(self, value: LevelEpoch, level: Union[LevelIdType, Level]):\n def _level(self, value: Level):\n def _set_flag(self, value: Flag):" }, { "identifier": "Flag", "path": "dispel/data/flags.py", "snippet": "class Flag:\n \"\"\"A class for entity flag.\"\"\"\n\n #: The flag identifier (string or id format)\n id_: InitVar[FlagIdType]\n\n #: The flag identifier\n id: FlagId = field(init=False)\n\n #: The detailed reason for the flag\n reason: str\n\n #: Stop processing\n stop_processing: bool = False\n\n def __post_init__(self, id_: FlagIdType):\n if isinstance(id_, str):\n self.id = FlagId.from_str(id_)\n elif isinstance(id_, FlagId):\n self.id = id_\n else:\n raise TypeError(\n \"Flag id should be either a convertible string id or an \"\n \"FlagId class.\"\n )\n\n def __hash__(self):\n return hash((self.id, self.reason, self.stop_processing))\n\n def format(self, *args, **kwargs) -> \"Flag\":\n \"\"\"Format an flag.\"\"\"\n return Flag(\n id_=self.id.format(*args, **kwargs),\n reason=self.reason.format(*args, **kwargs),\n stop_processing=self.stop_processing,\n )" }, { "identifier": "FlagId", "path": "dispel/data/flags.py", "snippet": "class FlagId(DefinitionId):\n \"\"\"The identifier of an entity flag for a task.\n\n Parameters\n ----------\n task_name\n The name and abbreviation of the task. Note that if no abbreviation is provided\n the name is used directly in the id.\n flag_name\n The name of the flag and its abbreviation.\n flag_type\n The type of the flag. See :class:`~dispel.data.flags.FlagType`.\n flag_severity\n The severity of the flag. See :class:`~dispel.data.flags.FlagSeverity`.\n\n Notes\n -----\n The abbreviations of values are passed using\n :class:`~dispel.data.values.AbbreviatedValue`. To generate the actual id the `.abbr`\n accessor is used. If one passes only strings, the class actually wraps those into\n ``AbbreviatedValue`` instances.\n\n Examples\n --------\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> from dispel.data.flags import FlagId, FlagType\n >>> FlagId(\n ... task_name=AV('Cognitive Processing Speed', 'CPS'),\n ... flag_name=AV('tilt angle', 'ta'),\n ... flag_type=FlagType.BEHAVIORAL,\n ... flag_severity=FlagSeverity.DEVIATION,\n ... )\n cps-behavioral-deviation-ta\n \"\"\"\n\n def __init__(\n self,\n task_name: Union[str, AV],\n flag_name: Union[str, AV],\n flag_type: Union[str, FlagType],\n flag_severity: Union[str, FlagSeverity],\n ):\n self.task_name = AV.wrap(task_name)\n self.flag_name = AV.wrap(flag_name)\n # flag type\n if isinstance(flag_type, str):\n flag_type = FlagType.from_abbr(flag_type)\n self.flag_type = cast(FlagType, flag_type).av\n # flag severity\n if isinstance(flag_severity, str):\n flag_severity = FlagSeverity.from_abbr(flag_severity)\n self.flag_severity = cast(FlagSeverity, flag_severity).av\n\n id_ = \"-\".join(\n (\n self.task_name.abbr.lower(),\n self.flag_type.abbr.lower(),\n self.flag_severity.abbr.lower(),\n self.flag_name.abbr.lower(),\n )\n )\n\n super().__init__(id_)\n\n @classmethod\n def from_str(cls, value: str) -> \"FlagId\":\n \"\"\"Create a flag id from a string representation.\n\n Parameters\n ----------\n value\n The string from which the flag id is to be constructed. It ought to respect\n the following format ``<task_name>-<flag_type>-<flag_name>`` where the flag\n type should one of the enumerations defined in\n :class:`~dispel.data.flags.FlagType`.\n\n Returns\n -------\n FlagId\n The initialised flag identifier.\n\n Raises\n ------\n ValueError\n If the flag string representation does not respect the required format.\n \"\"\"\n components = value.split(\"-\")\n if len(components) != 4:\n raise ValueError(\n \"Flag Id format is not respected. Please provide an id that follows \"\n \"the following format: ``<task_name>-<flag_type>-<flag_name>``\"\n )\n return cls(\n task_name=components[0],\n flag_name=components[3],\n flag_type=components[1],\n flag_severity=components[2],\n )\n\n def format(self, *args, **kwargs) -> \"FlagId\":\n \"\"\"Format an flag identifier.\"\"\"\n return FlagId(\n task_name=self.task_name,\n flag_name=self.flag_name.format(*args, **kwargs),\n flag_type=FlagType.from_abbr(self.flag_type.abbr),\n flag_severity=FlagSeverity.from_abbr(self.flag_severity.abbr),\n )" }, { "identifier": "FlagSeverity", "path": "dispel/data/flags.py", "snippet": "class FlagSeverity(AVEnum):\n \"\"\"An enumeration for flag severity.\"\"\"\n\n DEVIATION = \"deviation\"\n INVALIDATION = \"invalidation\"" }, { "identifier": "FlagType", "path": "dispel/data/flags.py", "snippet": "class FlagType(AVEnum):\n \"\"\"An enumeration for flag types.\"\"\"\n\n TECHNICAL = \"technical\"\n BEHAVIORAL = \"behavioral\"" }, { "identifier": "Level", "path": "dispel/data/levels.py", "snippet": "class Level(Epoch):\n \"\"\"An entity to separate sub-task inside each test (Levels).\n\n FIXME: DOC\n\n Attributes\n ----------\n context\n Contextual information about the level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n\n Parameters\n ----------\n id_\n The identifier of a given Level.\n start\n The timestamp of the beginning of the level\n end\n The timestamp of the end of the level\n context\n Contextual information about the level\n raw_data_sets\n An iterable of :class:'~dispel.data.raw.RawDataSet' of a given Level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n epochs\n An iterable of :class:`~dispel.data.measures.EpochMeasureSet` to be added to the\n level.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, List[str], LevelId],\n start: Any,\n end: Any,\n context: Optional[Context] = None,\n raw_data_sets: Optional[Iterable[RawDataSet]] = None,\n measure_set: Optional[MeasureSet] = None,\n epochs: Optional[Iterable[LevelEpoch]] = None,\n ):\n if not isinstance(id_, LevelId):\n id_ = LevelId(id_)\n\n definition = EpochDefinition(id_=id_)\n super().__init__(start=start, end=end, definition=definition)\n\n self.context = context or Context()\n self.measure_set = measure_set or MeasureSet()\n\n # create dictionary of raw data sets\n self._raw_data_sets: Dict[str, RawDataSet] = {}\n\n # set raw data sets if arg is provided\n if raw_data_sets:\n for raw_data_set in raw_data_sets:\n self.set(raw_data_set)\n\n # create data frame for each epoch\n self._epochs = pd.DataFrame(columns=[\"definition_id\", \"start\", \"end\", \"epoch\"])\n if epochs:\n for epoch in epochs:\n self.set(epoch)\n\n @property\n def id(self) -> LevelId:\n \"\"\"Get the ID of the level from its definition.\n\n Returns\n -------\n LevelId\n The ID of the definition provided via `definition`.\n \"\"\"\n assert self.definition is not None, \"Require definition to access id\"\n return cast(LevelId, self.definition.id)\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the level's definition.\n\n Parameters\n ----------\n value\n The ID to be set.\n \"\"\"\n assert self.definition is not None, \"Require definition to set id\"\n self.definition.id = value # type: ignore\n\n def __hash__(self):\n return hash(self.id)\n\n def __repr__(self):\n return f\"<Level: {self.id} ({self.flag_count_repr})>\"\n\n @property\n def raw_data_sets(self) -> List[RawDataSet]:\n \"\"\"Get all raw data sets.\"\"\"\n return list(self._raw_data_sets.values())\n\n def has_raw_data_set(self, id_: str) -> bool:\n \"\"\"Return ``True`` if the level contains the desired raw data set.\"\"\"\n return id_ in self._raw_data_sets\n\n def get_raw_data_set(self, id_: str) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id.\n\n Parameters\n ----------\n id_\n The id of the raw data set to be returned\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id\n\n Raises\n ------\n ValueError\n If the given id does not correspond to any existing raw data set within the\n level.\n \"\"\"\n if id_ not in self._raw_data_sets:\n raise ValueError(\n f'Unknown data set with id: \"{id_}\" for level_id == \"{self.id}\" '\n f\"please provide an id within {list(self._raw_data_sets.keys())}\"\n )\n\n return self._raw_data_sets[id_]\n\n @property\n def epochs(self) -> List[LevelEpoch]:\n \"\"\"Get all epoch measure sets.\"\"\"\n return self._epochs[\"epoch\"].tolist()\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a level.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n @set.register(MeasureSet)\n def _set_measure_set(self, value: MeasureSet):\n self.measure_set += value\n\n @set.register(MeasureValue)\n def _set_measure_value(self, value: MeasureValue):\n self.measure_set.set(value)\n\n @set.register(RawDataSet)\n def _set_raw_data_set(\n self, value: RawDataSet, concatenate: bool = False, overwrite: bool = False\n ):\n if overwrite and concatenate:\n raise ValueError(\n \"You cannot both concatenate and overwrite an existing raw data set. \"\n \"Only one of these arguments must be set to ``True``.\"\n )\n\n if (id_ := value.id) in self._raw_data_sets: # pylint: disable=all\n if concatenate:\n value = value.concat(self.get_raw_data_set(id_))\n elif not overwrite:\n raise RawDataSetAlreadyExists(\n id_, self.id, \"Use overwrite=True to overwrite\"\n )\n\n self._raw_data_sets[id_] = value\n\n @set.register(LevelEpoch)\n def _set_epoch(self, value: LevelEpoch):\n new_index = len(self._epochs)\n self._epochs.loc[new_index] = pd.Series(\n dict(\n definition_id=value.id if value.definition else None,\n start=value.start,\n end=value.end,\n epoch=value,\n )\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)" }, { "identifier": "AbbreviatedValue", "path": "dispel/data/values.py", "snippet": "class AbbreviatedValue:\n \"\"\"An abbreviated value.\n\n Examples\n --------\n This class allows to consistently handle abbreviated terms. Assuming you have a name\n of an assessment, e.g. `Cognitive Processing Speed` test and the respective\n abbreviation would be `CPS`, then you can create an abbreviated value like this:\n\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> value = AV('Cognitive Processing Speed test', 'CPS')\n >>> value\n Cognitive Processing Speed test (CPS)\n\n While this seems like a lot of overhead, it comes in handy when describing value\n definitions or higher-level abstractions, such as measure definitions.\n\n Parameters\n ----------\n value\n The full description of the value\n abbr\n The abbreviated form of the value\n\n Attributes\n ----------\n value\n The full description of the value\n \"\"\"\n\n def __init__(self, value: str, abbr: Optional[str] = None):\n self.value = value\n self._abbr = abbr\n\n @property\n def abbr(self):\n \"\"\"Get the abbreviated form of the value.\"\"\"\n return self._abbr or self.value\n\n def __str__(self):\n return self.value\n\n def __repr__(self):\n if self._abbr:\n return f\"{self.value} ({self._abbr})\"\n return self.value\n\n def __hash__(self):\n return hash((self.value, self._abbr))\n\n def __eq__(self, other):\n if isinstance(other, str):\n return self._abbr is None and self.value == other\n if isinstance(other, AbbreviatedValue):\n return self.value == other.value and self.abbr == other.abbr\n return False\n\n def __lt__(self, other):\n if not isinstance(other, AbbreviatedValue):\n raise ValueError(f\"Unsupported type in comparison: {type(other)}\")\n if self.value == other.value:\n return self.abbr < other.abbr\n return self.value < other.value\n\n def format(self, *args, **kwargs):\n \"\"\"Format an abbreviated value.\"\"\"\n return AbbreviatedValue(\n self.value.format(*args, **kwargs),\n self._abbr.format(*args, **kwargs) if self._abbr else None,\n )\n\n @classmethod\n def wrap(cls, value):\n \"\"\"Wrap a value into an abbreviated value.\n\n This is a small helper class to conveniently wrap values into an abbreviated\n value, if they are not already one.\n\n Parameters\n ----------\n value\n The value to be wrapped\n\n Returns\n -------\n AbbreviatedValue\n The passed ``value`` if it is an instance of :class:`AbbreviatedValue`. If a\n string is passed, then the string is passed as ``value`` argument to the\n constructor.\n\n Raises\n ------\n ValueError\n If the passed value is neither a string nor an instance of\n :class:`AbbreviatedValue`.\n \"\"\"\n if isinstance(value, cls):\n return value\n if isinstance(value, str):\n return cls(value)\n\n raise ValueError(f\"Can only wrap string values. Got: {type(value)}\")" }, { "identifier": "TaskMixin", "path": "dispel/processing/utils.py", "snippet": "class TaskMixin(metaclass=ABCMeta):\n \"\"\"A mixin class for entities related to tasks.\"\"\"\n\n #: The task name\n task_name: Union[AV, str]\n\n def get_task_name(self, **kwargs) -> Union[str, AV]:\n \"\"\"Get the task name.\"\"\"\n task_name = kwargs.get(\"task_name\", None) or getattr(self, \"task_name\")\n if isinstance(task_name, (str, AV)):\n return task_name\n raise ValueError(\"Missing task name.\")" }, { "identifier": "set_attributes_from_kwargs", "path": "dispel/utils.py", "snippet": "def set_attributes_from_kwargs(\n obj: object, *attrs: str, pop: bool = True, **kwargs\n) -> Dict[str, Any]:\n \"\"\"Set attributes in object from kwargs.\n\n Parameters\n ----------\n obj\n The class object where the attributes are to be set.\n attrs\n The names of teh attributes that are to be set in the object.\n pop\n ``True`` if the attributes are to popped from the provided keyword arguments.\n ``False`` otherwise.\n kwargs\n The keyword argument from which the attributes are to be retrieved as well. If\n no values corresponding to the provided attributes are found a ``None`` value is\n set instead.\n\n Returns\n -------\n Dict[str, Any]\n A dictionary of the remaining key word arguments.\n \"\"\"\n func = getattr(kwargs, \"pop\" if pop else \"get\")\n for attribute in attrs:\n kwargs_attribute = func(attribute, None)\n new_attribute = kwargs_attribute or getattr(obj, attribute, None)\n setattr(obj, attribute, new_attribute)\n return kwargs" } ]
import inspect from abc import ABCMeta, abstractmethod from typing import Any, Callable, Dict, Generator, Iterable, Optional, Tuple, Union from dispel.data.core import EntityType, Reading from dispel.data.flags import Flag, FlagId, FlagSeverity, FlagType from dispel.data.levels import Level from dispel.data.values import AbbreviatedValue as AV from dispel.processing.utils import TaskMixin from dispel.utils import set_attributes_from_kwargs
5,769
#: The name of the flag flag_name: Union[AV, str] #: The type of the flag flag_type: Union[FlagType, str] # The severity of the flag flag_severity: Union[FlagSeverity, str] #: The detailed reason of the flag reason: str #: The stop_processing status of the flag step stop_processing: bool = False #: The flagging function flagging_function: Optional[Callable[..., bool]] = None def __init__(self, *args, **kwargs): kwargs = set_attributes_from_kwargs( self, "task_name", "flag_name", "flag_type", "flag_severity", "reason", "stop_processing", "flagging_function", **kwargs, ) self.kwargs = kwargs super().__init__(*args, **kwargs) def get_flag_name(self, **kwargs) -> Union[str, AV]: """Get the flag name.""" flag_name = kwargs.get("flag_name", None) or getattr(self, "flag_name") if isinstance(flag_name, (str, AV)): return flag_name.format(**kwargs) raise ValueError("Missing flag name.") def get_flag_type(self, **kwargs) -> Union[str, FlagType]: """Get the flag type.""" flag_type = kwargs.get("flag_type", None) or getattr(self, "flag_type") if isinstance(flag_type, (str, FlagType)): return flag_type raise ValueError("Missing flag type.") def get_flag_severity(self, **kwargs) -> Union[str, FlagSeverity]: """Get the flag severity.""" flag_severity = kwargs.get("flag_severity", None) or getattr( self, "flag_severity" ) if isinstance(flag_severity, (str, FlagSeverity)): return flag_severity raise ValueError("Missing flag severity.") def get_reason(self, **kwargs) -> str: """Get the flag reason.""" reason = kwargs.get("reason", None) or getattr(self, "reason") if isinstance(reason, str): return reason.format(**kwargs) raise ValueError("Missing flag reason.") @abstractmethod def get_flag_targets( self, reading: Reading, level: Optional[Level] = None, **kwargs ) -> Iterable[EntityType]: """Get flag targets. Parameters ---------- reading The reading to which the targets are associated. level The level associated with the targets (if needed). kwargs Keyword arguments from which the flag targets are to be extracted. Returns ------- Iterable[EntityType] An iterable of the flag targets. """ raise NotImplementedError def get_flagging_function(self) -> Optional[Callable[..., bool]]: """Get the flagging function.""" # unbind bound methods func = self.flagging_function if func is not None and hasattr(func, "__func__"): return func.__func__ # type: ignore return func def get_flagging_functions(self) -> FlaggingFunctionGeneratorType: """Get all flagging functions associated with this step.""" if func := self.get_flagging_function(): yield func, {} members = inspect.getmembers(self, predicate=inspect.isroutine) for _, func in members: if func is not None and hasattr(func, "__flagging_function__"): yield func, func.__flag_kwargs__ # type: ignore def set_flag_kwargs(self, **kwargs): """Set keyword arguments inside flagging function. Parameters ---------- kwargs The keyword arguments to be added inside the flagging function keyword arguments. """ _, parent, *_ = inspect.stack() getattr(self, parent.function).__flag_kwargs__.update(kwargs) def get_flag(self, **kwargs) -> Flag: """Get the flag corresponding to the flag step.""" (all_kwargs := self.kwargs.copy()).update(kwargs) return Flag(
"""Data entity flag module.""" def flag(_func=None, **kwargs): """Decorate a function as a flagging function.""" def wrapper(func): func.__flagging_function__ = True func.__flag_kwargs__ = { **kwargs, **getattr(func, "__flag_kwargs__", {}), } return func if _func is None: return wrapper return wrapper(_func) FlaggingFunctionGeneratorType = Generator[ Tuple[Callable[..., bool], Dict[str, Any]], None, None ] class FlagStepMixin(TaskMixin, metaclass=ABCMeta): """A flag mix in class.""" #: The name of the flag flag_name: Union[AV, str] #: The type of the flag flag_type: Union[FlagType, str] # The severity of the flag flag_severity: Union[FlagSeverity, str] #: The detailed reason of the flag reason: str #: The stop_processing status of the flag step stop_processing: bool = False #: The flagging function flagging_function: Optional[Callable[..., bool]] = None def __init__(self, *args, **kwargs): kwargs = set_attributes_from_kwargs( self, "task_name", "flag_name", "flag_type", "flag_severity", "reason", "stop_processing", "flagging_function", **kwargs, ) self.kwargs = kwargs super().__init__(*args, **kwargs) def get_flag_name(self, **kwargs) -> Union[str, AV]: """Get the flag name.""" flag_name = kwargs.get("flag_name", None) or getattr(self, "flag_name") if isinstance(flag_name, (str, AV)): return flag_name.format(**kwargs) raise ValueError("Missing flag name.") def get_flag_type(self, **kwargs) -> Union[str, FlagType]: """Get the flag type.""" flag_type = kwargs.get("flag_type", None) or getattr(self, "flag_type") if isinstance(flag_type, (str, FlagType)): return flag_type raise ValueError("Missing flag type.") def get_flag_severity(self, **kwargs) -> Union[str, FlagSeverity]: """Get the flag severity.""" flag_severity = kwargs.get("flag_severity", None) or getattr( self, "flag_severity" ) if isinstance(flag_severity, (str, FlagSeverity)): return flag_severity raise ValueError("Missing flag severity.") def get_reason(self, **kwargs) -> str: """Get the flag reason.""" reason = kwargs.get("reason", None) or getattr(self, "reason") if isinstance(reason, str): return reason.format(**kwargs) raise ValueError("Missing flag reason.") @abstractmethod def get_flag_targets( self, reading: Reading, level: Optional[Level] = None, **kwargs ) -> Iterable[EntityType]: """Get flag targets. Parameters ---------- reading The reading to which the targets are associated. level The level associated with the targets (if needed). kwargs Keyword arguments from which the flag targets are to be extracted. Returns ------- Iterable[EntityType] An iterable of the flag targets. """ raise NotImplementedError def get_flagging_function(self) -> Optional[Callable[..., bool]]: """Get the flagging function.""" # unbind bound methods func = self.flagging_function if func is not None and hasattr(func, "__func__"): return func.__func__ # type: ignore return func def get_flagging_functions(self) -> FlaggingFunctionGeneratorType: """Get all flagging functions associated with this step.""" if func := self.get_flagging_function(): yield func, {} members = inspect.getmembers(self, predicate=inspect.isroutine) for _, func in members: if func is not None and hasattr(func, "__flagging_function__"): yield func, func.__flag_kwargs__ # type: ignore def set_flag_kwargs(self, **kwargs): """Set keyword arguments inside flagging function. Parameters ---------- kwargs The keyword arguments to be added inside the flagging function keyword arguments. """ _, parent, *_ = inspect.stack() getattr(self, parent.function).__flag_kwargs__.update(kwargs) def get_flag(self, **kwargs) -> Flag: """Get the flag corresponding to the flag step.""" (all_kwargs := self.kwargs.copy()).update(kwargs) return Flag(
id_=FlagId(
2
2023-11-14 10:06:46+00:00
8k
runDMCA/home-assistant-mazda
custom_components/mazda/pymazda/sensordata/sensor_data_builder.py
[ { "identifier": "BackgroundEventList", "path": "custom_components/mazda/pymazda/sensordata/background_event_list.py", "snippet": "class BackgroundEventList: # noqa: D101\n def __init__(self): # noqa: D107\n self.background_events = []\n\n def randomize(self, sensor_collection_start_timestamp): # noqa: D102\n self.background_events = []\n\n if random.randrange(0, 10) > 0:\n return\n\n now_timestamp = datetime.datetime.now(datetime.UTC)\n time_since_sensor_collection_start = int(\n (now_timestamp - sensor_collection_start_timestamp)\n / datetime.timedelta(milliseconds=1)\n )\n\n if time_since_sensor_collection_start < 10000:\n return\n\n paused_timestamp = timestamp_to_millis(\n sensor_collection_start_timestamp\n ) + random.randrange(800, 4500)\n resumed_timestamp = paused_timestamp + random.randrange(2000, 5000)\n\n self.background_events.append(BackgroundEvent(2, paused_timestamp))\n self.background_events.append(BackgroundEvent(3, resumed_timestamp))\n\n def to_string(self): # noqa: D102\n return \"\".join(event.to_string() for event in self.background_events)" }, { "identifier": "KeyEventList", "path": "custom_components/mazda/pymazda/sensordata/key_event_list.py", "snippet": "class KeyEventList: # noqa: D101\n def __init__(self): # noqa: D107\n self.key_events = []\n\n def randomize(self, sensor_collection_start_timestamp): # noqa: D102\n self.key_events = []\n\n if random.randrange(0, 20) > 0:\n return\n\n now_timestamp = datetime.datetime.now(datetime.UTC)\n time_since_sensor_collection_start = int(\n (now_timestamp - sensor_collection_start_timestamp)\n / datetime.timedelta(milliseconds=1)\n )\n\n if time_since_sensor_collection_start < 10000:\n return\n\n event_count = random.randrange(2, 5)\n id_char_code_sum = random.randrange(517, 519)\n for i in range(event_count):\n time = random.randrange(5000, 8000) if i == 0 else random.randrange(10, 50)\n self.key_events.append(\n KeyEvent(time, id_char_code_sum, random.randrange(0, 2) == 0)\n )\n\n def to_string(self): # noqa: D102\n return \"\".join(event.to_string() for event in self.key_events)\n\n def get_sum(self): # noqa: D102\n sum = 0\n for key_event in self.key_events:\n sum += key_event.id_char_code_sum\n sum += key_event.time\n sum += 2\n return sum" }, { "identifier": "PerformanceTestResults", "path": "custom_components/mazda/pymazda/sensordata/performance_test_results.py", "snippet": "class PerformanceTestResults: # noqa: D101\n def randomize(self): # noqa: D102\n num_iterations_1 = (random.randrange(350, 600) * 100) - 1\n self.mod_test_result = 16\n self.mod_test_iterations = int(num_iterations_1 / 100)\n\n num_iterations_2 = (random.randrange(563, 2000) * 100) - 1\n self.float_test_result = 59\n self.float_test_iterations = int(num_iterations_2 / 100)\n\n num_iterations_3 = (random.randrange(500, 2000) * 100) - 1\n self.sqrt_test_result = num_iterations_3 - 899\n self.sqrt_test_iterations = int(num_iterations_3 / 100)\n\n num_iterations_4 = (random.randrange(500, 1500) * 100) - 1\n self.trig_test_result = num_iterations_4\n self.trig_test_iterations = int(num_iterations_4 / 100)\n\n self.loop_test_result = random.randrange(8500, 16000)\n\n def to_string(self): # noqa: D102\n values = [\n self.mod_test_result,\n self.mod_test_iterations,\n self.float_test_result,\n self.float_test_iterations,\n self.sqrt_test_result,\n self.sqrt_test_iterations,\n self.trig_test_result,\n self.trig_test_iterations,\n self.loop_test_result,\n ]\n\n return \",\".join(map(str, values))" }, { "identifier": "SensorDataEncryptor", "path": "custom_components/mazda/pymazda/sensordata/sensor_data_encryptor.py", "snippet": "class SensorDataEncryptor: # noqa: D101\n def __init__(self): # noqa: D107\n self.aes_key = secrets.token_bytes(16)\n self.aes_iv = secrets.token_bytes(16)\n self.hmac_sha256_key = secrets.token_bytes(32)\n\n public_key = serialization.load_der_public_key(base64.b64decode(RSA_PUBLIC_KEY))\n self.encrypted_aes_key = public_key.encrypt(\n self.aes_key, asymmetric_padding.PKCS1v15()\n )\n self.encrypted_hmac_sha256_key = public_key.encrypt(\n self.hmac_sha256_key, asymmetric_padding.PKCS1v15()\n )\n\n def encrypt_sensor_data(self, sensor_data): # noqa: D102\n padder = padding.PKCS7(128).padder()\n padded_data = padder.update(sensor_data.encode()) + padder.finalize()\n cipher = Cipher(algorithms.AES(self.aes_key), modes.CBC(self.aes_iv))\n encryptor = cipher.encryptor()\n encrypted_sensor_data = encryptor.update(padded_data) + encryptor.finalize()\n\n iv_and_encrypted_sensor_data = self.aes_iv + encrypted_sensor_data\n\n hmac_obj = hmac.HMAC(self.hmac_sha256_key, hashes.SHA256())\n hmac_obj.update(iv_and_encrypted_sensor_data)\n hmac_result = hmac_obj.finalize()\n\n result = iv_and_encrypted_sensor_data + hmac_result\n\n aes_timestamp = random.randrange(0, 3) * 1000\n hmac_timestamp = random.randrange(0, 3) * 1000\n base64_timestamp = random.randrange(0, 3) * 1000\n\n return f\"1,a,{to_base64_str(self.encrypted_aes_key)},{to_base64_str(self.encrypted_hmac_sha256_key)}${to_base64_str(result)}${aes_timestamp},{hmac_timestamp},{base64_timestamp}\"" }, { "identifier": "feistel_cipher", "path": "custom_components/mazda/pymazda/sensordata/sensor_data_util.py", "snippet": "def feistel_cipher(upper_32_bits, lower_32_bits, key): # noqa: D103\n def to_signed_32(n):\n n = n & 0xFFFFFFFF\n return n | (-(n & 0x80000000))\n\n def iterate(arg1, arg2, arg3):\n return arg1 ^ (arg2 >> (32 - arg3) | to_signed_32(arg2 << arg3))\n\n upper = to_signed_32(upper_32_bits)\n lower = to_signed_32(lower_32_bits)\n\n data = (lower & 0xFFFFFFFF) | (upper << 32)\n\n lower2 = to_signed_32(data & 0xFFFFFFFF)\n upper2 = to_signed_32((data >> 32) & 0xFFFFFFFF)\n\n for i in range(16):\n v2_1 = upper2 ^ iterate(lower2, key, i)\n v8 = lower2\n lower2 = v2_1\n upper2 = v8\n\n return (upper2 << 32) | (lower2 & 0xFFFFFFFF)" }, { "identifier": "timestamp_to_millis", "path": "custom_components/mazda/pymazda/sensordata/sensor_data_util.py", "snippet": "def timestamp_to_millis(timestamp): # noqa: D103\n return int(timestamp.timestamp() * 1000)" }, { "identifier": "SystemInfo", "path": "custom_components/mazda/pymazda/sensordata/system_info.py", "snippet": "class SystemInfo: # noqa: D101\n def __init__(self): # noqa: D107\n self.android_builds = AndroidBuilds()\n\n def randomize(self): # noqa: D102\n device_model, device = random.choice(\n list(self.android_builds.get_builds().items())\n )\n codename = device[\"codename\"]\n build = random.choice(device[\"builds\"])\n build_version_incremental = random.randrange(1000000, 9999999)\n\n self.screen_height, self.screen_width = random.choice(SCREEN_SIZES)\n self.battery_charging = random.randrange(0, 10) <= 1\n self.battery_level = random.randrange(10, 90)\n self.orientation = 1\n self.language = \"en\"\n self.android_version = build[\"version\"]\n self.rotation_lock = \"1\" if random.randrange(0, 10) > 1 else \"0\"\n self.build_model = device_model\n self.build_bootloader = str(random.randrange(1000000, 9999999))\n self.build_hardware = codename\n self.package_name = \"com.interrait.mymazda\"\n self.android_id = secrets.token_bytes(8).hex()\n self.keyboard = 0\n self.adb_enabled = False\n self.build_version_codename = \"REL\"\n self.build_version_incremental = build_version_incremental\n self.build_version_sdk = ANDROID_VERSION_TO_SDK_VERSION.get(build[\"version\"])\n self.build_manufacturer = \"Google\"\n self.build_product = codename\n self.build_tags = \"release-keys\"\n self.build_type = \"user\"\n self.build_user = \"android-build\"\n self.build_display = build[\"buildId\"]\n self.build_board = codename\n self.build_brand = \"google\"\n self.build_device = codename\n self.build_fingerprint = f\"google/{codename}/{codename}:{build['version']}/{build['buildId']}/{build_version_incremental}:user/release-keys\"\n self.build_host = f\"abfarm-{random.randrange(10000, 99999)}\"\n self.build_id = build[\"buildId\"]\n\n def to_string(self): # noqa: D102\n return \",\".join(\n [\n \"-1\",\n \"uaend\",\n \"-1\",\n str(self.screen_height),\n str(self.screen_width),\n (\"1\" if self.battery_charging else \"0\"),\n str(self.battery_level),\n str(self.orientation),\n percent_encode(self.language),\n percent_encode(self.android_version),\n self.rotation_lock,\n percent_encode(self.build_model),\n percent_encode(self.build_bootloader),\n percent_encode(self.build_hardware),\n \"-1\",\n self.package_name,\n \"-1\",\n \"-1\",\n self.android_id,\n \"-1\",\n str(self.keyboard),\n \"1\" if self.adb_enabled else \"0\",\n percent_encode(self.build_version_codename),\n percent_encode(str(self.build_version_incremental)),\n str(self.build_version_sdk),\n percent_encode(self.build_manufacturer),\n percent_encode(self.build_product),\n percent_encode(self.build_tags),\n percent_encode(self.build_type),\n percent_encode(self.build_user),\n percent_encode(self.build_display),\n percent_encode(self.build_board),\n percent_encode(self.build_brand),\n percent_encode(self.build_device),\n percent_encode(self.build_fingerprint),\n percent_encode(self.build_host),\n percent_encode(self.build_id),\n ]\n )\n\n def get_char_code_sum(self): # noqa: D102\n return sum_char_codes(self.to_string())" }, { "identifier": "TouchEventList", "path": "custom_components/mazda/pymazda/sensordata/touch_event_list.py", "snippet": "class TouchEventList: # noqa: D101\n def __init__(self): # noqa: D107\n self.touch_events = []\n\n def randomize(self, sensor_collection_start_timestamp): # noqa: D102\n self.touch_events = []\n\n now_timestamp = datetime.datetime.now(datetime.UTC)\n time_since_sensor_collection_start = int(\n (now_timestamp - sensor_collection_start_timestamp)\n / datetime.timedelta(milliseconds=1)\n )\n\n if time_since_sensor_collection_start < 3000:\n return\n elif (\n time_since_sensor_collection_start >= 3000\n and time_since_sensor_collection_start < 5000\n ):\n # down event\n self.touch_events.append(\n TouchEvent(\n 2,\n time_since_sensor_collection_start - random.randrange(1000, 2000),\n 1,\n 1,\n )\n )\n\n # move events\n num_move_events = random.randrange(2, 9)\n for i in range(num_move_events): # noqa: B007\n self.touch_events.append(TouchEvent(1, random.randrange(3, 50), 1, 1))\n\n # up event\n self.touch_events.append(TouchEvent(3, random.randrange(3, 100), 1, 1))\n elif (\n time_since_sensor_collection_start >= 5000\n and time_since_sensor_collection_start < 10000\n ):\n for i in range(2):\n # down event\n self.touch_events.append(\n TouchEvent(\n 2, random.randrange(100, 1000) + (5000 if i == 1 else 0), 1, 1\n )\n )\n\n # move events\n num_move_events = random.randrange(2, 9)\n for i in range(num_move_events): # noqa: B007\n self.touch_events.append(\n TouchEvent(1, random.randrange(3, 50), 1, 1)\n )\n\n # up event\n self.touch_events.append(TouchEvent(3, random.randrange(3, 100), 1, 1))\n else:\n for i in range(3):\n timestamp_offset = 0\n if i == 0:\n timestamp_offset = time_since_sensor_collection_start - 9000\n else:\n timestamp_offset = random.randrange(2000, 3000)\n\n # down event\n self.touch_events.append(\n TouchEvent(2, random.randrange(100, 1000) + timestamp_offset, 1, 1)\n )\n\n # move events\n num_move_events = random.randrange(2, 9)\n for i in range(num_move_events): # noqa: B007\n self.touch_events.append(\n TouchEvent(1, random.randrange(3, 50), 1, 1)\n )\n\n # up event\n self.touch_events.append(TouchEvent(3, random.randrange(3, 100), 1, 1))\n\n def to_string(self): # noqa: D102\n return \"\".join(event.to_string() for event in self.touch_events)\n\n def get_sum(self): # noqa: D102\n sum = 0\n for touch_event in self.touch_events:\n sum += touch_event.type\n sum += touch_event.time\n return sum" } ]
import datetime # noqa: D100 import random from .background_event_list import BackgroundEventList from .key_event_list import KeyEventList from .performance_test_results import PerformanceTestResults from .sensor_data_encryptor import SensorDataEncryptor from .sensor_data_util import feistel_cipher, timestamp_to_millis from .system_info import SystemInfo from .touch_event_list import TouchEventList
3,797
SDK_VERSION = "2.2.3" class SensorDataBuilder: # noqa: D101 def __init__(self): # noqa: D107 self.sensor_collection_start_timestamp = datetime.datetime.now(datetime.UTC) self.device_info_time = random.randrange(3, 8) * 1000 self.system_info = SystemInfo() self.system_info.randomize() self.touch_event_list = TouchEventList() self.key_event_list = KeyEventList() self.background_event_list = BackgroundEventList() self.performance_test_results = PerformanceTestResults() self.performance_test_results.randomize()
SDK_VERSION = "2.2.3" class SensorDataBuilder: # noqa: D101 def __init__(self): # noqa: D107 self.sensor_collection_start_timestamp = datetime.datetime.now(datetime.UTC) self.device_info_time = random.randrange(3, 8) * 1000 self.system_info = SystemInfo() self.system_info.randomize() self.touch_event_list = TouchEventList() self.key_event_list = KeyEventList() self.background_event_list = BackgroundEventList() self.performance_test_results = PerformanceTestResults() self.performance_test_results.randomize()
self.sensor_data_encryptor = SensorDataEncryptor()
3
2023-11-14 01:42:43+00:00
8k
ubertidavide/fastbots
fastbots/firefox_bot.py
[ { "identifier": "config", "path": "fastbots/config.py", "snippet": "class DriverType(Enum):\n FIREFOX = 1\n CHROME = 2\nENV_DEVELOPMENT: str = 'development'\nENV_RELEASE: str = 'release'\nLOG_LEVEL: int = config('LOGLEVEL', default=logging.DEBUG, cast=int)\nENV: str = config('ENV', default=ENV_DEVELOPMENT, cast=str)\nPROJECT_NAME: str = config('PROJECT_NAME', default='fastbot', cast=str)\nAPP_VERSION: str = config('APP_VERSION', default='0.2.6', cast=str)\nBOT_DRIVER_TYPE: DriverType = config('BOT_DRIVER_TYPE', default='firefox', cast=DriverType.from_str)\nBOT_DOWNLOAD_FOLDER_PATH: str = config('BOT_DOWNLOAD_FOLDER_PATH', default=None, cast=str)\nBOT_STRICT_DOWNLOAD_WAIT: bool = config('BOT_STRICT_DOWNLOAD_WAIT', default=True, cast=bool)\nBOT_ARGUMENTS: str = config('BOT_ARGUMENTS', default=None, cast=str)\nBOT_USER_AGENT: str = config('BOT_USER_AGENT', default=f'{PROJECT_NAME} {APP_VERSION}', cast=str)\nBOT_PROXY_ENABLED: bool = config('BOT_PROXY_ENABLED', default=False, cast=bool)\nBOT_HTTP_PROXY: str = config('BOT_HTTP_PROXY', default=None, cast=str)\nBOT_HTTPS_PROXY: str = config('BOT_HTTPS_PROXY', default=BOT_HTTP_PROXY, cast=str)\nBOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH: str = config('BOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH', default='debug/', cast=str)\nBOT_HTML_DOWNLOAD_FOLDER_PATH: str = config('BOT_HTML_DOWNLOAD_FOLDER_PATH', default='debug/', cast=str)\nBOT_COOKIES_FILE_PATH: str = config('BOT_COOKIES_FILE_PATH', default='cookies.pkl', cast=str)\nBOT_PREFERENCES_FILE_PATH: str = config('BOT_PREFERENCES_FILE_PATH', default='preferences.json', cast=str)\nBOT_MAX_RETRIES: int = config('BOT_MAX_RETRIES', default=2, cast=int)\nBOT_RETRY_DELAY: int = config('BOT_RETRY_DELAY', default=10, cast=int)\nSELENIUM_GLOBAL_IMPLICIT_WAIT: int = config('SELENIUM_GLOBAL_IMPLICIT_WAIT', default=5, cast=int)\nSELENIUM_EXPECTED_URL_CHECK: bool = config('SELENIUM_EXPECTED_URL_CHECK', default=True, cast=bool)\nSELENIUM_EXPECTED_URL_TIMEOUT: int = config('SELENIUM_EXPECTED_URL_TIMEOUT', default=5, cast=int)\nSELENIUM_DEFAULT_WAIT: int = config('SELENIUM_DEFAULT_WAIT', default=5, cast=int)\nSELENIUM_FILE_DOWNLOAD_TIMEOUT: int = config('SELENIUM_FILE_DOWNLOAD_TIMEOUT', default=20, cast=int)\nSELENIUM_LOCATORS_FILE: str = config('SELENIUM_LOCATORS_FILE', default='locators.ini', cast=str)\nSELENIUM_DISABLE_CAPTURE: bool = config('SELENIUM_DISABLE_CAPTURE', default=True, cast=bool)\nSELENIUM_IN_SCOPE_CAPTURE: str = config('SELENIUM_IN_SCOPE_CAPTURE', default=None, cast=str)\nSELENIUM_ENABLE_HAR_CAPTURE: bool = config('SELENIUM_ENABLE_HAR_CAPTURE', default=False, cast=bool)\nCAPSOLVER_API_KEY: str = config('CAPSOLVER_API_KEY', default=None, cast=str)\n def from_str(label):" }, { "identifier": "Bot", "path": "fastbots/bot.py", "snippet": "class Bot(ABC):\n \"\"\"\n Base class for creating web automation bots using Selenium.\n\n Attributes:\n _temp_dir (str): A temporary directory for storing files during the bot's operation.\n _download_dir (str): The directory where downloaded files are stored.\n _locators (ConfigParser): Configuration parser for managing locators.\n _payload (Payload): Datastore for the bot.\n\n Methods:\n __init__(): Initializes the Bot instance.\n __enter__(): Enters a context and loads/configures resources.\n __exit__(): Exits a context and cleans up resources.\n check_page_url(expected_page_url: str): Checks if the browser is on the expected page URL.\n locator(page_name: str, locator_name: str) -> str: Retrieves a locator for a given page.\n wait_downloaded_file_path(file_extension: str, new_file_name: str | None = None) -> str:\n Waits for a specific downloaded file and returns its path.\n save_screenshot(): Saves a screenshot of the browser.\n save_html(): Saves the HTML page of the browser.\n save_cookies(): Saves all the cookies found in the browser.\n load_cookies(): Loads and adds cookies from a file.\n __load_locators__() -> ConfigParser: Loads locators from a configuration file.\n __load_preferences__() -> Union[FirefoxProfile, dict]:\n Load preferences that are stored in a JSON file specified in the configuration.\n __load_options__() -> Union[FirefoxOptions, ChromeOptions]: Loads default options.\n __load_driver__() -> WebDriver: Loads and configures the driver.\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"\n Initializes the Bot instance.\n\n Sets up temporary directories, locators, and a data store for the bot.\n \"\"\"\n super().__init__()\n\n # used to track time\n self._start_time: float = time.time()\n\n # use a temporary directory as the default download folder\n self._temp_dir: str = tempfile.mkdtemp()\n\n # official downloaded file folder\n if config.BOT_DOWNLOAD_FOLDER_PATH != 'None':\n self._download_dir: str = config.BOT_DOWNLOAD_FOLDER_PATH\n else:\n self._download_dir: str = tempfile.mkdtemp()\n\n # load all the locators\n self._locators: ConfigParser = self.__load_locators__()\n # data store\n self._payload: Payload = Payload()\n\n # add the api key if setted\n if config.CAPSOLVER_API_KEY != 'None':\n capsolver.api_key = config.CAPSOLVER_API_KEY\n\n @property\n def driver(self) -> WebDriver:\n \"\"\"\n Gets the Selenium WebDriver instance used by the bot.\n\n Returns:\n WebDriver: The Selenium WebDriver instance.\n \"\"\"\n return self._driver\n \n @property\n def wait(self) -> WebDriverWait:\n \"\"\"\n Gets the WebDriverWait instance used for waiting in the bot.\n\n Returns:\n WebDriverWait: The WebDriverWait instance.\n \"\"\"\n return self._wait\n \n @property\n def payload(self) -> Payload:\n \"\"\"\n Gets the payload class instance used to store data in the bot.\n\n Returns:\n Payload: The payload class instance.\n \"\"\"\n return self._payload\n\n def __enter__(self) -> Type['Bot']:\n \"\"\"\n Enters a context and loads/configures resources.\n\n Sets up implicit wait and navigates to the start URL.\n\n Returns:\n Type['Bot']: The bot instance within the context.\n \"\"\"\n # add the url in scope, only used when the capture is enabled\n if config.SELENIUM_IN_SCOPE_CAPTURE != 'None':\n self._driver.scopes = config.SELENIUM_IN_SCOPE_CAPTURE.replace(' ', '').strip().split(',')\n\n # default global driver settings\n self._driver.implicitly_wait(config.SELENIUM_GLOBAL_IMPLICIT_WAIT)\n\n # load the start page, if it's setted\n start_url: str = self.locator('pages_url', 'start_url')\n if start_url != 'None':\n self._driver.get(start_url)\n\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n \"\"\"\n Exits a context and cleans up resources.\n\n Removes temporary directories and closes the driver.\n \"\"\"\n if not config.BOT_STRICT_DOWNLOAD_WAIT:\n for temp_file in list(Path(self._temp_dir).glob(f'*.*')):\n # if the file is not a firefox of chrome temporary file\n if temp_file.suffix not in '.crdownload' and temp_file.suffix not in '.part':\n # destination file name\n downloaded_file_path = Path(self._download_dir) / temp_file.name\n # move to the download folder the file name\n destination: str = shutil.move(src=str(temp_file.absolute()), dst=str(downloaded_file_path.absolute()))\n self._payload.downloads.append(destination)\n self._payload.output_data['downloads_count'] = len(self._payload.downloads)\n # remove the file, don't raise exception if not exsit\n temp_file.unlink(missing_ok=True)\n\n shutil.rmtree(self._temp_dir)\n self._driver.quit()\n self._payload.output_data['eta'] = time.time()-self._start_time\n\n def check_page_url(self, expected_page_url: str, strict_page_check: bool = True):\n \"\"\"\n Check if the browser is on the expected page URL.\n\n Args:\n expected_page_url (str): The expected page URL.\n strict_page_check (bool): True -> Uses url_to_be to verify that the url is the same, else use url_contains for the same check.\n\n Raises:\n ExpectedUrlError: If the browser is not on the expected page URL.\n \"\"\"\n\n # switch to contains in case of strict page check disabled\n check_function = EC.url_to_be\n if not strict_page_check:\n check_function = EC.url_contains\n\n try:\n # polling that the page URL is the expected\n WebDriverWait(driver=self._driver, timeout=config.SELENIUM_EXPECTED_URL_TIMEOUT, poll_frequency=1).until(\n check_function(expected_page_url)\n )\n\n except TimeoutException as te:\n # if not the expected URL raises an exception\n raise ExpectedUrlError(current_url=self._driver.current_url, expected_url=expected_page_url)\n\n def locator(self, page_name: str, locator_name: str) -> str:\n \"\"\"\n Retrieves a locator for a given page.\n\n Args:\n page_name (str): The name of the page.\n locator_name (str): The name of the locator.\n\n Returns:\n str: The locator string.\n\n Raises:\n ValueError: If the specified page_name or locator_name is not declared in locator's config.\n \"\"\"\n if not self._locators.has_section(page_name):\n raise ValueError(f'The specified page_name: {page_name} is not declared in locators config.')\n \n if not self._locators.has_option(page_name, locator_name):\n raise ValueError(f'The specified locator_name: {locator_name} is not declared in locators config.')\n \n return self._locators.get(page_name, locator_name)\n \n def wait_downloaded_file_path(self, file_extension: str, new_file_name: str | None = None) -> str:\n \"\"\"\n Waits for a specific downloaded file and returns its path.\n\n Args:\n file_extension (str): The file extension without the dot (e.g., \"png\" instead of \".png\").\n new_file_name (str | None): The new file name if renaming is needed.\n\n Returns:\n str: The path of the downloaded file.\n\n Raises:\n DownloadFileError: If an error occurs during the file download.\n \"\"\"\n try:\n # polling that the page URL is the expected, it uses the extension because the temp part file cache by browser\n # usually have a specific extension that isn't the usually of the files\n WebDriverWait(driver=self._driver, timeout=config.SELENIUM_FILE_DOWNLOAD_TIMEOUT, poll_frequency=1).until(\n lambda driver: len(list(Path(self._temp_dir).glob(f'*.{file_extension}'))) == 1\n )\n\n # get the latest downloaded file\n latest_file: Path = max(list(Path(self._temp_dir).glob(f'*.{file_extension}')), key=lambda x: x.stat().st_ctime)\n\n # build the download path based on renamed file or \n downloaded_file_path: Path = None\n if new_file_name is None:\n downloaded_file_path = Path(self._download_dir) / latest_file.name\n else:\n downloaded_file_path = Path(self._download_dir) / f'{new_file_name}.{file_extension}'\n \n # move to the download folder the file name\n destination: str = shutil.move(src=str(latest_file.absolute()), dst=str(downloaded_file_path.absolute()))\n self._payload.downloads.append(destination)\n self._payload.output_data['downloads_count'] = len(self._payload.downloads)\n # remove the file, don't raise exception if not exsit\n latest_file.unlink(missing_ok=True)\n\n # return the path and filename as string\n return destination\n\n except TimeoutException as te:\n # if not the expected URL raises an exception\n file_count: int = len(list(Path(self._temp_dir).glob(f'*.{file_extension}')))\n\n # error string based on the specific error\n if file_count == 0:\n raise DownloadFileError('File not founded in the download folder, an error with the download occurs.')\n elif file_count > 1:\n raise DownloadFileError(f'Too many downloaded files founded, files number : {file_count}.')\n\n raise DownloadFileError()\n\n def save_screenshot(self) -> str:\n \"\"\"\n Saves a screenshot of the browser.\n\n Example:\n ```python\n bot = MyBot()\n bot.save_screenshot()\n ```\n \"\"\"\n if not Path(config.BOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH).exists():\n Path(config.BOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH).mkdir(exist_ok=True, parents=True)\n\n file_path: Path = Path(config.BOT_SCREENSHOT_DOWNLOAD_FOLDER_PATH) / f'{datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")}.png'\n self._driver.save_screenshot(str(file_path.absolute()))\n self._payload.output_data['screenshot_path'] = str(file_path.absolute())\n return str(file_path.absolute())\n\n def save_html(self) -> str:\n \"\"\"\n Saves the HTML page of the browser.\n\n Example:\n ```python\n bot = MyBot()\n bot.save_html()\n ```\n \"\"\"\n if not Path(config.BOT_HTML_DOWNLOAD_FOLDER_PATH).exists():\n Path(config.BOT_HTML_DOWNLOAD_FOLDER_PATH).mkdir(exist_ok=True, parents=True)\n\n file_path: Path = Path(config.BOT_HTML_DOWNLOAD_FOLDER_PATH) / f'{datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")}.html'\n with open(str(file_path.absolute()), \"w\", encoding=\"utf-8\") as file:\n file.write(self._driver.page_source)\n self._payload.output_data['html_path'] = str(file_path.absolute())\n return str(file_path.absolute())\n\n def save_cookies(self) -> str:\n \"\"\"\n Saves all the cookies found in the browser.\n\n Example:\n ```python\n bot = MyBot()\n bot.save_cookies()\n ```\n \"\"\"\n cookies: List[dict] = self._driver.get_cookies()\n\n with open(config.BOT_COOKIES_FILE_PATH, 'wb') as file:\n pickle.dump(cookies, file)\n self._payload.output_data['cookies_path'] = config.BOT_COOKIES_FILE_PATH\n return config.BOT_COOKIES_FILE_PATH\n \n def load_cookies(self):\n \"\"\"\n Loads and adds cookies from a file to the browser.\n\n Example:\n ```python\n bot = MyBot()\n bot.load_cookies()\n ```\n \"\"\"\n if Path(config.BOT_COOKIES_FILE_PATH).is_file():\n with open(config.BOT_COOKIES_FILE_PATH, 'rb') as file:\n cookies = pickle.load(file)\n\n for cookie in cookies:\n self._driver.add_cookie(cookie)\n\n def __load_locators__(self) -> ConfigParser:\n \"\"\"\n Loads locators from a configuration file.\n\n Returns:\n ConfigParser: An instance of ConfigParser with loaded locators.\n\n Example:\n ```python\n bot = MyBot()\n locators = bot.__load_locators__()\n ```\n \"\"\"\n if not Path(config.SELENIUM_LOCATORS_FILE).is_file():\n return ValueError(f'Erorr, locators file not founded at path: {config.SELENIUM_LOCATORS_FILE}')\n \n config_parser: ConfigParser = ConfigParser()\n config_parser.read(config.SELENIUM_LOCATORS_FILE)\n return config_parser\n\n @abstractmethod\n def __load_preferences__(self) -> Union[FirefoxProfile, dict]:\n \"\"\"\n Load preferences that are stored in a JSON file specified in the configuration.\n\n Returns:\n Union[FirefoxProfile, dict]: Either a FirefoxProfile or a dictionary of preferences.\n\n Example:\n ```python\n class MyBot(Bot):\n def __load_preferences__(self):\n # your implementation here\n ```\n \"\"\"\n return NotImplementedError('Bot must define this method.')\n\n @abstractmethod\n def __load_options__(self) -> Union[FirefoxOptions, ChromeOptions]:\n \"\"\"\n Loads default options.\n\n Returns:\n Union[FirefoxOptions, ChromeOptions]: Either FirefoxOptions or ChromeOptions.\n\n Example:\n ```python\n class MyBot(Bot):\n def __load_options__(self):\n # your implementation here\n ```\n \"\"\"\n return NotImplementedError('Bot must define this method.')\n \n @abstractmethod\n def __load_driver__(self) -> WebDriver:\n \"\"\"\n Loads and configures the driver.\n\n Returns:\n WebDriver: An instance of Selenium WebDriver.\n\n Example:\n ```python\n class MyBot(Bot):\n def __load_driver__(self):\n # your implementation here\n ```\n \"\"\"\n return NotImplementedError('Bot must define this method.')" } ]
import json import logging from pathlib import Path from datetime import datetime from seleniumwire.webdriver import Firefox from selenium.webdriver.firefox.options import Options as FirefoxOptions from selenium.webdriver.firefox.firefox_profile import FirefoxProfile from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.remote.webdriver import WebDriver from fastbots import config, Bot
4,456
logger = logging.getLogger(__name__) class FirefoxBot(Bot): """ Firefox Bot Class representing the Firefox Bot implementation. Attributes: _driver (WebDriver): The WebDriver instance for Firefox. _wait (WebDriverWait): The WebDriverWait instance for Firefox. Methods: __init__(): Initializes all attributes of the Firefox Bot instance. save_screenshot(): Saves the browser's screenshot to a PNG file. __load_preferences__(): Loads Firefox preferences from a JSON file. __load_options__(): Loads Firefox options, including user agent and download directory. __load_driver__(): Loads and configures options for the Firefox driver. Example: ```python with FirefoxBot() as bot: bot.save_screenshot() ``` """ def __init__(self) -> None: """ Initializes all attributes of the Firefox Bot instance. """ super().__init__() # Load the configured driver self._driver: WebDriver = self.__load_driver__() # Default wait
logger = logging.getLogger(__name__) class FirefoxBot(Bot): """ Firefox Bot Class representing the Firefox Bot implementation. Attributes: _driver (WebDriver): The WebDriver instance for Firefox. _wait (WebDriverWait): The WebDriverWait instance for Firefox. Methods: __init__(): Initializes all attributes of the Firefox Bot instance. save_screenshot(): Saves the browser's screenshot to a PNG file. __load_preferences__(): Loads Firefox preferences from a JSON file. __load_options__(): Loads Firefox options, including user agent and download directory. __load_driver__(): Loads and configures options for the Firefox driver. Example: ```python with FirefoxBot() as bot: bot.save_screenshot() ``` """ def __init__(self) -> None: """ Initializes all attributes of the Firefox Bot instance. """ super().__init__() # Load the configured driver self._driver: WebDriver = self.__load_driver__() # Default wait
self._wait: WebDriverWait = WebDriverWait(driver=self._driver, timeout=config.SELENIUM_DEFAULT_WAIT, poll_frequency=1)
0
2023-11-16 00:12:09+00:00
8k
intel/llm-on-ray
inference/api_openai_backend/router_app.py
[ { "identifier": "OpenAIHTTPException", "path": "inference/api_openai_backend/request_handler.py", "snippet": "class OpenAIHTTPException(Exception):\n def __init__(\n self,\n status_code: int,\n message: str,\n type: str = \"Unknown\",\n ) -> None:\n self.status_code = status_code\n self.message = message\n self.type = type" }, { "identifier": "openai_exception_handler", "path": "inference/api_openai_backend/request_handler.py", "snippet": "def openai_exception_handler(request: Request, exc: OpenAIHTTPException):\n assert isinstance(\n exc, OpenAIHTTPException\n ), f\"Unable to handle invalid exception {type(exc)}\"\n if exc.status_code == status.HTTP_500_INTERNAL_SERVER_ERROR:\n message = f\"Internal Server Error\"\n internal_message = message\n exc_type = \"InternalServerError\"\n else:\n internal_message = extract_message_from_exception(exc)\n message = exc.message\n exc_type = exc.type\n err_response = ModelResponse(\n error=ErrorResponse(\n message=message,\n code=exc.status_code,\n internal_message=internal_message,\n type=exc_type,\n )\n )\n return JSONResponse(content=err_response.dict(), status_code=exc.status_code)" }, { "identifier": "RouterQueryClient", "path": "inference/api_openai_backend/query_client.py", "snippet": "class RouterQueryClient():\n def __init__(self, serve_deployments):\n self.serve_deployments = serve_deployments\n\n async def query(self, model: str, prompt: Prompt, request_id: str):\n response_stream = self.stream(\n model,\n prompt,\n request_id,\n )\n responses = [resp async for resp in response_stream]\n return ModelResponse.merge_stream(*responses)\n\n async def stream(\n self, model: str, prompt: Prompt, request_id: str\n ):\n if model in self.serve_deployments:\n deploy_handle = self.serve_deployments[model]\n else:\n raise HTTPException(404, f\"Could not find model with id {model}\")\n\n prompt_content = prompt.prompt\n request_config = prompt.parameters\n temperature = request_config.get(\"temperature\", 1.0)\n top_p = request_config.get(\"top_p\", 1.0)\n max_new_tokens = request_config.get(\"max_tokens\", None)\n gen_config = {\n \"max_new_tokens\": max_new_tokens,\n \"temperature\": temperature,\n \"top_p\": top_p,\n }\n if temperature != 1.0 or top_p != 1.0:\n gen_config.update({\"do_sample\": True})\n\n async for x in handle_request(\n model=model,\n prompt=prompt,\n request_id=request_id,\n async_iterator=deploy_handle.options(stream=True).stream_response.options(stream=True, use_new_handle_api=True).remote(prompt_content, gen_config)\n ):\n yield x\n\n async def model(self, model_id: str) -> ModelCard:\n \"\"\"Get configurations for a supported model\"\"\"\n return ModelCard(\n id=model_id,\n root=model_id,\n )\n\n async def models(self) -> Dict[str, ModelCard]:\n \"\"\"Get configurations for supported models\"\"\"\n metadatas = {}\n for model_id in self.serve_deployments:\n metadatas[model_id] = await self.model(model_id)\n return metadatas" }, { "identifier": "Prompt", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class Prompt(BaseModel):\n prompt: Union[str, List[ChatMessage]]\n use_prompt_format: bool = True\n parameters: Optional[Union[Dict[str, Any], BaseModel]] = None" }, { "identifier": "ModelResponse", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class ModelResponse(ComputedPropertyMixin, BaseModelExtended):\n generated_text: Optional[str] = None\n num_input_tokens: Optional[int] = None\n num_input_tokens_batch: Optional[int] = None\n num_generated_tokens: Optional[int] = None\n num_generated_tokens_batch: Optional[int] = None\n preprocessing_time: Optional[float] = None\n generation_time: Optional[float] = None\n timestamp: Optional[float] = Field(default_factory=time.time)\n finish_reason: Optional[str] = None\n error: Optional[ErrorResponse] = None\n\n @root_validator(skip_on_failure=True)\n def text_or_error_or_finish_reason(cls, values):\n if (\n values.get(\"generated_text\") is None\n and values.get(\"error\") is None\n and values.get(\"finish_reason\") is None\n ):\n raise ValueError(\n \"Either 'generated_text' or 'error' or 'finish_reason' must be set\"\n )\n return values\n\n @classmethod\n def merge_stream(cls, *responses: \"ModelResponse\") -> \"ModelResponse\":\n \"\"\"\n Merge a stream of responses into a single response.\n\n The generated text is concatenated. Fields are maxed, except for\n num_generated_tokens and generation_time, which are summed.\n \"\"\"\n if len(responses) == 1:\n return responses[0]\n\n generated_text = \"\".join(\n [response.generated_text or \"\" for response in responses]\n )\n num_input_tokens = [\n response.num_input_tokens\n for response in responses\n if response.num_input_tokens is not None\n ]\n max_num_input_tokens = max(num_input_tokens) if num_input_tokens else None\n num_input_tokens_batch = [\n response.num_input_tokens_batch\n for response in responses\n if response.num_input_tokens_batch is not None\n ]\n max_num_input_tokens_batch = (\n max(num_input_tokens_batch) if num_input_tokens_batch else None\n )\n num_generated_tokens = [\n response.num_generated_tokens\n for response in responses\n if response.num_generated_tokens is not None\n ]\n total_generated_tokens = (\n sum(num_generated_tokens) if num_generated_tokens else None\n )\n num_generated_tokens_batch = [\n response.num_generated_tokens_batch\n for response in responses\n if response.num_generated_tokens_batch is not None\n ]\n total_generated_tokens_batch = (\n sum(num_generated_tokens_batch) if num_generated_tokens_batch else None\n )\n preprocessing_time = [\n response.preprocessing_time\n for response in responses\n if response.preprocessing_time is not None\n ]\n max_preprocessing_time = max(preprocessing_time) if preprocessing_time else None\n generation_time = [\n response.generation_time\n for response in responses\n if response.generation_time is not None\n ]\n total_generation_time = sum(generation_time) if generation_time else None\n error = next(\n (response.error for response in reversed(responses) if response.error), None\n )\n\n return cls(\n generated_text=generated_text,\n num_input_tokens=max_num_input_tokens,\n num_input_tokens_batch=max_num_input_tokens_batch,\n num_generated_tokens=total_generated_tokens,\n num_generated_tokens_batch=total_generated_tokens_batch,\n preprocessing_time=max_preprocessing_time,\n generation_time=total_generation_time,\n timestamp=responses[-1].timestamp,\n finish_reason=responses[-1].finish_reason,\n error=error,\n )\n\n @property\n def total_time(self) -> Optional[float]:\n if self.generation_time is None and self.preprocessing_time is None:\n return None\n return (self.preprocessing_time or 0) + (self.generation_time or 0)\n\n @property\n def num_total_tokens(self) -> Optional[float]:\n try:\n return (self.num_input_tokens or 0) + (self.num_generated_tokens or 0)\n except Exception:\n return None\n\n @property\n def num_total_tokens_batch(self) -> Optional[float]:\n try:\n return (self.num_input_tokens_batch or 0) + (\n self.num_generated_tokens_batch or 0\n )\n except Exception:\n return None\n\n def unpack(self) -> Tuple[\"ModelResponse\", ...]:\n return (self,)" }, { "identifier": "CompletionRequest", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class CompletionRequest(BaseModel):\n model: str\n prompt: str\n suffix: Optional[str] = None\n temperature: Optional[float] = None\n top_p: Optional[float] = None\n n: int = 1\n max_tokens: Optional[int] = 16\n stop: Optional[List[str]] = None\n stream: bool = False\n echo: Optional[bool] = False\n presence_penalty: Optional[float] = None\n frequency_penalty: Optional[float] = None\n logprobs: Optional[int] = None\n logit_bias: Optional[Dict[str, float]] = None\n user: Optional[str] = None" }, { "identifier": "ChatCompletionRequest", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class ChatCompletionRequest(BaseModel):\n model: str\n messages: List[ChatMessage]\n temperature: Optional[float] = None\n top_p: Optional[float] = None\n n: int = 1\n max_tokens: Optional[int] = None\n stop: Optional[List[str]] = None\n stream: bool = False\n presence_penalty: Optional[float] = None\n frequency_penalty: Optional[float] = None\n logprobs: Optional[int] = None\n logit_bias: Optional[Dict[str, float]] = None\n user: Optional[str] = None" }, { "identifier": "ChatCompletionResponse", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class ChatCompletionResponse(BaseModel):\n id: str = Field(default_factory=lambda: f\"chatcmpl-{str(uuid.uuid4().hex)}\")\n object: str\n created: int = Field(default_factory=lambda: int(time.time()))\n model: str\n choices: List[Union[ChatCompletionResponseChoice, DeltaChoices]]\n usage: Optional[UsageInfo]" }, { "identifier": "CompletionResponse", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class CompletionResponse(BaseModel):\n id: str = Field(default_factory=lambda: f\"cmpl-{str(uuid.uuid4().hex)}\")\n object: str = \"text_completion\"\n created: int = Field(default_factory=lambda: int(time.time()))\n model: str\n choices: List[CompletionResponseChoice]\n usage: Optional[UsageInfo]" }, { "identifier": "DeltaChoices", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class DeltaChoices(BaseModel):\n delta: Union[DeltaRole, DeltaContent, DeltaEOS]\n index: int\n finish_reason: Optional[str]" }, { "identifier": "DeltaContent", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class DeltaContent(BaseModel):\n content: str\n\n def __str__(self):\n return self.content" }, { "identifier": "DeltaEOS", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class DeltaEOS(BaseModel):\n class Config:\n extra = \"forbid\"" }, { "identifier": "DeltaRole", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class DeltaRole(BaseModel):\n role: Literal[\"system\", \"assistant\", \"user\"]\n\n def __str__(self):\n return self.role" }, { "identifier": "ChatMessage", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class ChatMessage(BaseModel):\n role: Literal[\"system\", \"assistant\", \"user\"]\n content: str\n\n def __str__(self):\n return self.content" }, { "identifier": "ChatCompletionResponseChoice", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class ChatCompletionResponseChoice(BaseModel):\n index: int\n message: ChatMessage\n finish_reason: Optional[str]" }, { "identifier": "ModelList", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class ModelList(BaseModel):\n object: str = \"list\"\n data: List[ModelCard] = []" }, { "identifier": "ModelCard", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class ModelCard(BaseModel):\n id: str\n object: str = \"model\"\n created: int = Field(default_factory=lambda: int(time.time()))\n owned_by: str = \"llmonray\"\n root: Optional[str] = None\n parent: Optional[str] = None" }, { "identifier": "CompletionResponseChoice", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class CompletionResponseChoice(BaseModel):\n index: int\n text: str\n logprobs: Optional[int] = None\n finish_reason: Optional[str]" }, { "identifier": "UsageInfo", "path": "inference/api_openai_backend/openai_protocol.py", "snippet": "class UsageInfo(BaseModel):\n prompt_tokens: int\n total_tokens: int\n completion_tokens: Optional[int] = 0\n\n @classmethod\n def from_response(\n cls, response: Union[\"ModelResponse\", Dict[str, Any]]\n ) -> \"UsageInfo\":\n if isinstance(response, BaseModel):\n response_dict = response.dict()\n else:\n response_dict = response\n return cls(\n prompt_tokens=response_dict[\"num_input_tokens\"] or 0,\n completion_tokens=response_dict[\"num_generated_tokens\"] or 0,\n total_tokens=(response_dict[\"num_input_tokens\"] or 0)\n + (response_dict[\"num_generated_tokens\"] or 0),\n )" } ]
import os import uuid import async_timeout from typing import AsyncGenerator, List from fastapi import FastAPI, status from fastapi import Response as FastAPIResponse from fastapi.middleware.cors import CORSMiddleware from starlette.responses import Response, StreamingResponse from logger import get_logger from .request_handler import OpenAIHTTPException, openai_exception_handler from .query_client import RouterQueryClient from .openai_protocol import Prompt, ModelResponse, CompletionRequest, ChatCompletionRequest from .openai_protocol import ( ChatCompletionResponse, CompletionResponse, DeltaChoices, DeltaContent, DeltaEOS, DeltaRole, ChatMessage, ChatCompletionResponseChoice, ModelList, ModelCard, CompletionResponseChoice, UsageInfo, )
4,346
TIMEOUT = float(os.environ.get("ROUTER_HTTP_TIMEOUT", 600)) def init() -> FastAPI: router_app = FastAPI() router_app.add_exception_handler(OpenAIHTTPException, openai_exception_handler) router_app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) return router_app router_app = init() async def _completions_wrapper( completion_id: str, body: CompletionRequest, response: Response, generator: AsyncGenerator[ModelResponse, None], ) -> AsyncGenerator[str, None]: had_error = False async with async_timeout.timeout(TIMEOUT): all_results = [] async for results in generator: for subresult in results.unpack(): all_results.append(subresult) subresult_dict = subresult.dict() if subresult_dict.get("error"): response.status_code = subresult_dict["error"]["code"] # Drop finish reason as OpenAI doesn't expect it # for errors in streaming subresult_dict["finish_reason"] = None logger.error(f"{subresult_dict['error']}") all_results.pop() had_error = True yield "data: " + ModelResponse( **subresult_dict ).json() + "\n\n" # Return early in case of an error break choices = [ CompletionResponseChoice( index=0, text=subresult_dict["generated_text"] or "", finish_reason=subresult_dict["finish_reason"], ) ] usage = None if subresult_dict["finish_reason"]: usage = ( UsageInfo.from_response( ModelResponse.merge_stream(*all_results) ) if all_results else None ) yield "data: " + CompletionResponse( id=completion_id, object="text_completion", model=body.model, choices=choices, usage=usage, ).json() + "\n\n" if had_error: # Return early in case of an error break yield "data: [DONE]\n\n" async def _chat_completions_wrapper( completion_id: str, body: ChatCompletionRequest, response: Response, generator: AsyncGenerator[ModelResponse, None], ) -> AsyncGenerator[str, None]: had_error = False async with async_timeout.timeout(TIMEOUT): finish_reason = None choices: List[DeltaChoices] = [ DeltaChoices( delta=DeltaRole(role="assistant"), index=0, finish_reason=None, ) ] yield "data: " + ChatCompletionResponse( id=completion_id, object="chat.completion.chunk", model=body.model, choices=choices, usage=None, ).json() + "\n\n" all_results = [] async for results in generator: for subresult in results.unpack(): all_results.append(subresult) subresult_dict = subresult.dict() if subresult_dict.get("error"): response.status_code = subresult_dict["error"]["code"] logger.error(f"{subresult_dict['error']}") # Drop finish reason as OpenAI doesn't expect it # for errors in streaming subresult_dict["finish_reason"] = None all_results.pop() had_error = True yield "data: " + ModelResponse( **subresult_dict ).json() + "\n\n" # Return early in case of an error break else: finish_reason = subresult_dict["finish_reason"] choices: List[DeltaChoices] = [ DeltaChoices(
# # Copyright 2023 The LLM-on-Ray Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # =========================================================================== # # This file is adapted from # https://github.com/ray-project/ray-llm/blob/b3560aa55dadf6978f0de0a6f8f91002a5d2bed1/aviary/backend/server/routers/router_app.py # Copyright 2023 Anyscale # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # logger = get_logger(__name__) # timeout in 10 minutes. Streaming can take longer than 3 min TIMEOUT = float(os.environ.get("ROUTER_HTTP_TIMEOUT", 600)) def init() -> FastAPI: router_app = FastAPI() router_app.add_exception_handler(OpenAIHTTPException, openai_exception_handler) router_app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) return router_app router_app = init() async def _completions_wrapper( completion_id: str, body: CompletionRequest, response: Response, generator: AsyncGenerator[ModelResponse, None], ) -> AsyncGenerator[str, None]: had_error = False async with async_timeout.timeout(TIMEOUT): all_results = [] async for results in generator: for subresult in results.unpack(): all_results.append(subresult) subresult_dict = subresult.dict() if subresult_dict.get("error"): response.status_code = subresult_dict["error"]["code"] # Drop finish reason as OpenAI doesn't expect it # for errors in streaming subresult_dict["finish_reason"] = None logger.error(f"{subresult_dict['error']}") all_results.pop() had_error = True yield "data: " + ModelResponse( **subresult_dict ).json() + "\n\n" # Return early in case of an error break choices = [ CompletionResponseChoice( index=0, text=subresult_dict["generated_text"] or "", finish_reason=subresult_dict["finish_reason"], ) ] usage = None if subresult_dict["finish_reason"]: usage = ( UsageInfo.from_response( ModelResponse.merge_stream(*all_results) ) if all_results else None ) yield "data: " + CompletionResponse( id=completion_id, object="text_completion", model=body.model, choices=choices, usage=usage, ).json() + "\n\n" if had_error: # Return early in case of an error break yield "data: [DONE]\n\n" async def _chat_completions_wrapper( completion_id: str, body: ChatCompletionRequest, response: Response, generator: AsyncGenerator[ModelResponse, None], ) -> AsyncGenerator[str, None]: had_error = False async with async_timeout.timeout(TIMEOUT): finish_reason = None choices: List[DeltaChoices] = [ DeltaChoices( delta=DeltaRole(role="assistant"), index=0, finish_reason=None, ) ] yield "data: " + ChatCompletionResponse( id=completion_id, object="chat.completion.chunk", model=body.model, choices=choices, usage=None, ).json() + "\n\n" all_results = [] async for results in generator: for subresult in results.unpack(): all_results.append(subresult) subresult_dict = subresult.dict() if subresult_dict.get("error"): response.status_code = subresult_dict["error"]["code"] logger.error(f"{subresult_dict['error']}") # Drop finish reason as OpenAI doesn't expect it # for errors in streaming subresult_dict["finish_reason"] = None all_results.pop() had_error = True yield "data: " + ModelResponse( **subresult_dict ).json() + "\n\n" # Return early in case of an error break else: finish_reason = subresult_dict["finish_reason"] choices: List[DeltaChoices] = [ DeltaChoices(
delta=DeltaContent(
10
2023-11-13 05:08:21+00:00
8k
believethehype/nostrdvm
nostr_dvm/tasks/textextraction_whisperx.py
[ { "identifier": "check_server_status", "path": "nostr_dvm/backends/nova_server/utils.py", "snippet": "def check_server_status(jobID, address) -> str | pd.DataFrame:\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n url_status = 'http://' + address + '/job_status'\n url_log = 'http://' + address + '/log'\n\n print(\"Sending Status Request to Server\")\n data = {\"jobID\": jobID}\n\n status = 0\n length = 0\n while status != 2 and status != 3:\n response_status = requests.post(url_status, headers=headers, data=data)\n response_log = requests.post(url_log, headers=headers, data=data)\n status = int(json.loads(response_status.text)['status'])\n log_content = str(json.loads(response_log.text)['message']).replace(\"ERROR\", \"\").replace(\"INFO\", \"\")\n log = log_content[length:]\n length = len(log_content)\n if log != \"\":\n print(log)\n # WAITING = 0, RUNNING = 1, FINISHED = 2, ERROR = 3\n time.sleep(1.0)\n\n if status == 2:\n try:\n url_fetch = 'http://' + address + '/fetch_result'\n print(\"Fetching Results from Server...\")\n data = {\"jobID\": jobID, \"delete_after_download\": True}\n response = requests.post(url_fetch, headers=headers, data=data)\n content_type = response.headers['content-type']\n print(\"Content-type: \" + str(content_type))\n if content_type == \"image/jpeg\":\n image = Image.open(io.BytesIO(response.content))\n image.save(\"./outputs/image.jpg\")\n result = upload_media_to_hoster(\"./outputs/image.jpg\")\n os.remove(\"./outputs/image.jpg\")\n return result\n elif content_type == 'text/plain; charset=utf-8':\n return response.content.decode('utf-8')\n elif content_type == \"application/x-zip-compressed\":\n zf = zipfile.ZipFile(io.BytesIO(response.content), \"r\")\n\n for fileinfo in zf.infolist():\n if fileinfo.filename.endswith(\".annotation~\"):\n try:\n anno_string = zf.read(fileinfo).decode('utf-8', errors='replace')\n columns = ['from', 'to', 'name', 'conf']\n result = pd.DataFrame([row.split(';') for row in anno_string.split('\\n')],\n columns=columns)\n return result\n except Exception as e:\n print(e)\n except Exception as e:\n print(\"Couldn't fetch result: \" + str(e))\n\n elif status == 3:\n return \"error\"" }, { "identifier": "send_request_to_server", "path": "nostr_dvm/backends/nova_server/utils.py", "snippet": "def send_request_to_server(request_form, address):\n print(\"Sending job to Server\")\n url = ('http://' + address + '/process')\n headers = {'Content-type': 'application/x-www-form-urlencoded'}\n response = requests.post(url, headers=headers, data=request_form)\n return response.text" }, { "identifier": "send_file_to_server", "path": "nostr_dvm/backends/nova_server/utils.py", "snippet": "def send_file_to_server(filepath, address):\n print(\"Sending file to Server\")\n url = ('http://' + address + '/upload')\n try:\n fp = open(filepath, 'rb')\n response = requests.post(url, files={'file': fp})\n result = response.content.decode('utf-8')\n except Exception as e:\n print(e)\n print(response.content.decode('utf-8'))\n\n return result" }, { "identifier": "DVMTaskInterface", "path": "nostr_dvm/interfaces/dvmtaskinterface.py", "snippet": "class DVMTaskInterface:\n NAME: str\n KIND: int\n TASK: str = \"\"\n FIX_COST: float = 0\n PER_UNIT_COST: float = 0\n PRIVATE_KEY: str\n PUBLIC_KEY: str\n DVM = DVM\n SUPPORTS_ENCRYPTION = True # DVMs build with this framework support encryption, but others might not.\n ACCEPTS_CASHU = True # DVMs build with this framework support encryption, but others might not.\n dvm_config: DVMConfig\n admin_config: AdminConfig\n dependencies = []\n\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config, admin_config: AdminConfig = None,\n options=None, task=None):\n self.init(name, dvm_config, admin_config, nip89config, task)\n self.options = options\n self.install_dependencies(dvm_config)\n\n def init(self, name, dvm_config, admin_config=None, nip89config=None, task=None):\n self.NAME = name\n self.PRIVATE_KEY = dvm_config.PRIVATE_KEY\n if dvm_config.PUBLIC_KEY == \"\" or dvm_config.PUBLIC_KEY is None:\n dvm_config.PUBLIC_KEY = Keys.from_sk_str(dvm_config.PRIVATE_KEY).public_key().to_hex()\n self.PUBLIC_KEY = dvm_config.PUBLIC_KEY\n if dvm_config.FIX_COST is not None:\n self.FIX_COST = dvm_config.FIX_COST\n if dvm_config.PER_UNIT_COST is not None:\n self.PER_UNIT_COST = dvm_config.PER_UNIT_COST\n if task is not None:\n self.TASK = task\n\n dvm_config.SUPPORTED_DVMS = [self]\n dvm_config.DB = \"db/\" + self.NAME + \".db\"\n if nip89config.KIND is not None:\n self.KIND = nip89config.KIND\n\n dvm_config.NIP89 = self.NIP89_announcement(nip89config)\n self.dvm_config = dvm_config\n self.admin_config = admin_config\n\n def install_dependencies(self, dvm_config):\n if dvm_config.SCRIPT != \"\":\n if self.dvm_config.USE_OWN_VENV:\n dir = r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(\".py\")[0]\n pip_location = 'bin/pip'\n if platform == \"win32\":\n pip_location = dir + '/Scripts/pip'\n\n if not os.path.isdir(dir):\n print(\"Creating Venv: \" + dir)\n create(dir, with_pip=True, upgrade_deps=True)\n self.dependencies.append((\"nostr-dvm\", \"nostr-dvm\"))\n for (module, package) in self.dependencies:\n print(\"Installing Venv Module: \" + module)\n run([pip_location, \"install\", \"--upgrade\", package], cwd=dir)\n else:\n for module, package in self.dependencies:\n if module != \"nostr-dvm\":\n try:\n __import__(module)\n except ImportError:\n print(\"Installing global Module: \" + module)\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\n def run(self):\n nostr_dvm_thread = Thread(target=self.DVM, args=[self.dvm_config, self.admin_config])\n nostr_dvm_thread.start()\n\n def NIP89_announcement(self, nip89config: NIP89Config):\n nip89 = NIP89Config()\n nip89.NAME = self.NAME\n nip89.KIND = self.KIND\n nip89.PK = self.PRIVATE_KEY\n nip89.DTAG = nip89config.DTAG\n nip89.CONTENT = nip89config.CONTENT\n return nip89\n\n def is_input_supported(self, tags, client=None, dvm_config=None) -> bool:\n \"\"\"Check if input is supported for current Task.\"\"\"\n pass\n\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None) -> dict:\n \"\"\"Parse input into a request form that will be given to the process method\"\"\"\n pass\n\n def process(self, request_form):\n \"Process the data and return the result\"\n pass\n\n def post_process(self, result, event):\n \"\"\"Post-process the data and return the result Use default function, if not overwritten\"\"\"\n return post_process_result(result, event)\n\n @staticmethod\n def set_options(request_form):\n print(\"Setting options...\")\n opts = []\n if request_form.get(\"options\"):\n opts = json.loads(request_form[\"options\"])\n print(opts)\n return dict(opts)\n\n @staticmethod\n def process_args():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--request', dest='request')\n parser.add_argument('--identifier', dest='identifier')\n parser.add_argument('--output', dest='output')\n args = parser.parse_args()\n return args\n\n @staticmethod\n def write_output(result, output):\n with open(os.path.abspath(output), 'w') as f:\n f.write(result)\n # f.close()" }, { "identifier": "process_venv", "path": "nostr_dvm/interfaces/dvmtaskinterface.py", "snippet": "def process_venv(identifier):\n args = DVMTaskInterface.process_args()\n dvm_config = build_default_config(args.identifier)\n dvm = identifier(name=\"\", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)\n try:\n result = dvm.process(json.loads(args.request))\n DVMTaskInterface.write_output(result, args.output)\n except Exception as e:\n DVMTaskInterface.write_output(\"Error: \" + str(e), args.output)" }, { "identifier": "AdminConfig", "path": "nostr_dvm/utils/admin_utils.py", "snippet": "class AdminConfig:\n REBROADCAST_NIP89: bool = False\n UPDATE_PROFILE: bool = False\n DELETE_NIP89: bool = False\n WHITELISTUSER: bool = False\n UNWHITELISTUSER: bool = False\n BLACKLISTUSER: bool = False\n DELETEUSER: bool = False\n LISTDATABASE: bool = False\n ClEANDB: bool = False\n\n USERNPUB: str = \"\"\n LUD16: str = \"\"\n\n EVENTID: str = \"\"\n PRIVKEY: str = \"\"" }, { "identifier": "DVMConfig", "path": "nostr_dvm/utils/dvmconfig.py", "snippet": "class DVMConfig:\n SUPPORTED_DVMS = []\n PRIVATE_KEY: str = \"\"\n PUBLIC_KEY: str = \"\"\n FIX_COST: float = None\n PER_UNIT_COST: float = None\n\n RELAY_LIST = [\"wss://relay.damus.io\", \"wss://nostr-pub.wellorder.net\", \"wss://nos.lol\", \"wss://nostr.wine\",\n \"wss://nostr.mom\", \"wss://nostr.oxtr.dev\", \"wss://relay.nostr.bg\",\n \"wss://relay.f7z.io\", \"wss://pablof7z.nostr1.com\", \"wss://relay.nostr.net\", \"wss://140.f7z.io\",\n \"wss://relay.snort.social\", \"wss://offchain.pub/\", \"wss://relay.nostr.band\"]\n\n RELAY_TIMEOUT = 5\n EXTERNAL_POST_PROCESS_TYPE = PostProcessFunctionType.NONE # Leave this on None, except the DVM is external\n LNBITS_INVOICE_KEY = '' # Will all automatically generated by default, or read from .env\n LNBITS_ADMIN_KEY = '' # In order to pay invoices, e.g. from the bot to DVMs, or reimburse users.\n LNBITS_URL = 'https://lnbits.com'\n LN_ADDRESS = ''\n SCRIPT = ''\n IDENTIFIER = ''\n USE_OWN_VENV = True # Make an own venv for each dvm's process function.Disable if you want to install packages into main venv. Only recommended if you dont want to run dvms with different dependency versions\n DB: str\n NEW_USER_BALANCE: int = 0 # Free credits for new users\n NIP89: NIP89Config\n SHOW_RESULT_BEFORE_PAYMENT: bool = False # if this is true show results even when not paid right after autoprocess" }, { "identifier": "build_default_config", "path": "nostr_dvm/utils/dvmconfig.py", "snippet": "def build_default_config(identifier):\n dvm_config = DVMConfig()\n dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)\n dvm_config.IDENTIFIER = identifier\n npub = Keys.from_sk_str(dvm_config.PRIVATE_KEY).public_key().to_bech32()\n invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys(identifier, npub)\n dvm_config.LNBITS_INVOICE_KEY = invoice_key\n dvm_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back\n dvm_config.LNBITS_URL = os.getenv(\"LNBITS_HOST\")\n dvm_config.LN_ADDRESS = lnaddress\n return dvm_config" }, { "identifier": "organize_input_media_data", "path": "nostr_dvm/utils/mediasource_utils.py", "snippet": "def organize_input_media_data(input_value, input_type, start, end, dvm_config, client, process=True,\n media_format=\"audio/mp3\") -> str:\n if input_type == \"event\": # NIP94 event\n evt = get_event_by_id(input_value, client=client, config=dvm_config)\n if evt is not None:\n input_value, input_type = check_nip94_event_for_media(evt, input_value, input_type)\n\n if input_type == \"url\":\n source_type = check_source_type(input_value)\n audio_only = True\n if media_format.split('/')[0] == \"video\":\n audio_only = False\n filename, start, end, type = get_file_start_end_type(input_value, source_type, start, end, audio_only)\n\n if filename == \"\" or filename is None:\n return \"\"\n if type != \"audio\" and type != \"video\":\n return filename\n try:\n file_reader = AudioReader(filename, ctx=cpu(0), mono=False)\n duration = float(file_reader.duration())\n except Exception as e:\n print(e)\n try:\n from moviepy.editor import VideoFileClip\n clip = VideoFileClip(filename)\n duration = clip.duration\n except Exception as e:\n print(e)\n return \"\"\n\n print(\"Original Duration of the Media file: \" + str(duration))\n start_time, end_time, new_duration = (\n convert_media_length(start, end, duration))\n print(\"New Duration of the Media file: \" + str(new_duration))\n\n # TODO if already in a working format and time is 0 0, dont convert\n\n # for now, we cut and convert all files to mp3\n if process:\n # for now we cut and convert all files to mp3\n file = r'processed.' + str(media_format.split('/')[1])\n final_filename = os.path.abspath(os.curdir + r'/outputs/' + file)\n if media_format.split('/')[0] == \"audio\":\n print(\"Converting Audio from \" + str(start_time) + \" until \" + str(end_time))\n fs, x = ffmpegio.audio.read(filename, ss=start_time, to=end_time, sample_fmt='dbl', ac=1)\n ffmpegio.audio.write(final_filename, fs, x, overwrite=True)\n elif media_format.split('/')[0] == \"video\":\n print(\"Converting Video from \" + str(start_time) + \" until \" + str(end_time))\n ffmpegio.transcode(filename, final_filename, overwrite=True, show_log=True)\n elif media_format.split('/')[1] == \"gif\":\n from moviepy.editor import VideoFileClip\n print(\"Converting Video from \" + str(start_time) + \" until \" + str(end_time))\n videoClip = VideoFileClip(filename)\n videoClip.write_gif(final_filename, program=\"ffmpeg\")\n print(final_filename)\n return final_filename\n else:\n return filename" }, { "identifier": "NIP89Config", "path": "nostr_dvm/utils/nip89_utils.py", "snippet": "class NIP89Config:\n DTAG: str = \"\"\n NAME: str = \"\"\n KIND: int = None\n PK: str = \"\"\n CONTENT: str = \"\"" }, { "identifier": "check_and_set_d_tag", "path": "nostr_dvm/utils/nip89_utils.py", "snippet": "def check_and_set_d_tag(identifier, name, pk, imageurl):\n if not os.getenv(\"NIP89_DTAG_\" + identifier.upper()):\n new_dtag = nip89_create_d_tag(name, Keys.from_sk_str(pk).public_key().to_hex(),\n imageurl)\n nip89_add_dtag_to_env_file(\"NIP89_DTAG_\" + identifier.upper(), new_dtag)\n print(\"Some new dtag:\" + new_dtag)\n return new_dtag\n else:\n return os.getenv(\"NIP89_DTAG_\" + identifier.upper())" }, { "identifier": "EventDefinitions", "path": "nostr_dvm/utils/definitions.py", "snippet": "class EventDefinitions:\n KIND_DM = 4\n KIND_ZAP = 9735\n KIND_ANNOUNCEMENT = 31990\n KIND_NIP94_METADATA = 1063\n KIND_FEEDBACK = 7000\n KIND_NIP90_EXTRACT_TEXT = 5000\n KIND_NIP90_RESULT_EXTRACT_TEXT = KIND_NIP90_EXTRACT_TEXT + 1000\n KIND_NIP90_SUMMARIZE_TEXT = 5001\n KIND_NIP90_RESULT_SUMMARIZE_TEXT = KIND_NIP90_SUMMARIZE_TEXT + 1000\n KIND_NIP90_TRANSLATE_TEXT = 5002\n KIND_NIP90_RESULT_TRANSLATE_TEXT = KIND_NIP90_TRANSLATE_TEXT + 1000\n KIND_NIP90_GENERATE_TEXT = 5050\n KIND_NIP90_RESULT_GENERATE_TEXT = KIND_NIP90_GENERATE_TEXT + 1000\n KIND_NIP90_GENERATE_IMAGE = 5100\n KIND_NIP90_RESULT_GENERATE_IMAGE = KIND_NIP90_GENERATE_IMAGE + 1000\n KIND_NIP90_CONVERT_VIDEO = 5200\n KIND_NIP90_RESULT_CONVERT_VIDEO = KIND_NIP90_CONVERT_VIDEO + 1000\n KIND_NIP90_GENERATE_VIDEO = 5202\n KIND_NIP90_TEXT_TO_SPEECH = 5250\n KIND_NIP90_RESULT_TEXT_TO_SPEECH = KIND_NIP90_TEXT_TO_SPEECH + 1000\n KIND_NIP90_RESULT_GENERATE_VIDEO = KIND_NIP90_GENERATE_VIDEO + 1000\n KIND_NIP90_CONTENT_DISCOVERY = 5300\n KIND_NIP90_RESULT_CONTENT_DISCOVERY = KIND_NIP90_CONTENT_DISCOVERY + 1000\n KIND_NIP90_PEOPLE_DISCOVERY = 5301\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY = KIND_NIP90_PEOPLE_DISCOVERY + 1000\n KIND_NIP90_CONTENT_SEARCH = 5302\n KIND_NIP90_RESULTS_CONTENT_SEARCH = KIND_NIP90_CONTENT_SEARCH + 1000\n KIND_NIP90_GENERIC = 5999\n KIND_NIP90_RESULT_GENERIC = KIND_NIP90_GENERIC + 1000\n ANY_RESULT = [KIND_NIP90_RESULT_EXTRACT_TEXT,\n KIND_NIP90_RESULT_SUMMARIZE_TEXT,\n KIND_NIP90_RESULT_TRANSLATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_IMAGE,\n KIND_NIP90_CONTENT_DISCOVERY,\n KIND_NIP90_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_CONVERT_VIDEO,\n KIND_NIP90_RESULT_CONTENT_DISCOVERY,\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_GENERATE_VIDEO,\n KIND_NIP90_RESULT_GENERIC]" } ]
import json import os import time from multiprocessing.pool import ThreadPool from nostr_dvm.backends.nova_server.utils import check_server_status, send_request_to_server, send_file_to_server from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface, process_venv from nostr_dvm.utils.admin_utils import AdminConfig from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config from nostr_dvm.utils.mediasource_utils import organize_input_media_data from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag from nostr_dvm.utils.definitions import EventDefinitions
6,178
media_format = "audio/mp3" for tag in event.tags(): if tag.as_vec()[0] == 'i': input_type = tag.as_vec()[2] if input_type == "url": url = tag.as_vec()[1] elif tag.as_vec()[0] == 'param': print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2]) if tag.as_vec()[1] == "alignment": alignment = tag.as_vec()[2] elif tag.as_vec()[1] == "model": model = tag.as_vec()[2] elif tag.as_vec()[1] == "range": try: t = time.strptime(tag.as_vec()[2], "%H:%M:%S") seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec start_time = float(seconds) except: try: t = time.strptime(tag.as_vec()[2], "%M:%S") seconds = t.tm_min * 60 + t.tm_sec start_time = float(seconds) except: start_time = tag.as_vec()[2] try: t = time.strptime(tag.as_vec()[3], "%H:%M:%S") seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec end_time = float(seconds) except: try: t = time.strptime(tag.as_vec()[3], "%M:%S") seconds = t.tm_min * 60 + t.tm_sec end_time = float(seconds) except: end_time = float(tag.as_vec()[3]) filepath = organize_input_media_data(url, input_type, start_time, end_time, dvm_config, client, True, media_format) path_on_server = send_file_to_server(os.path.realpath(filepath), self.options['server']) io_input = { "id": "audio", "type": "input", "src": "file:stream", "uri": path_on_server } io_output = { "id": "transcript", "type": "output", "src": "request:annotation:free" } request_form['data'] = json.dumps([io_input, io_output]) options = { "model": model, "alignment_mode": alignment, } request_form['options'] = json.dumps(options) return request_form def process(self, request_form): try: # Call the process route of NOVA-Server with our request form. response = send_request_to_server(request_form, self.options['server']) if bool(json.loads(response)['success']): print("Job " + request_form['jobID'] + " sent to server") pool = ThreadPool(processes=1) thread = pool.apply_async(check_server_status, (request_form['jobID'], self.options['server'])) print("Wait for results of server...") result = thread.get() return result except Exception as e: raise Exception(e) # We build an example here that we can call by either calling this file directly from the main directory, # or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the # playground or elsewhere def build_example(name, identifier, admin_config, server_address): dvm_config = build_default_config(identifier) dvm_config.USE_OWN_VENV = False admin_config.LUD16 = dvm_config.LN_ADDRESS # A module might have options it can be initialized with, here we set a default model, and the server # address it should use. These parameters can be freely defined in the task component options = {'default_model': "base", 'server': server_address} nip89info = { "name": name, "image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg", "about": "I extract text from media files with WhisperX", "encryptionSupported": True, "cashuAccepted": True, "nip90Params": { "model": { "required": False, "values": ["base", "tiny", "small", "medium", "large-v1", "large-v2", "tiny.en", "base.en", "small.en", "medium.en"] }, "alignment": { "required": False, "values": ["raw", "segment", "word"] } } } nip89config = NIP89Config() nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"]) nip89config.CONTENT = json.dumps(nip89info) return SpeechToTextWhisperX(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config, options=options) if __name__ == '__main__':
""" This File contains a Module to transform A media file input on n-server and receive results back. Accepted Inputs: Url to media file (url) Outputs: Transcribed text """ class SpeechToTextWhisperX(DVMTaskInterface): KIND: int = EventDefinitions.KIND_NIP90_EXTRACT_TEXT TASK: str = "speech-to-text" FIX_COST: float = 10 PER_UNIT_COST: float = 0.1 def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config, admin_config: AdminConfig = None, options=None): super().__init__(name, dvm_config, nip89config, admin_config, options) def is_input_supported(self, tags, client=None, dvm_config=None): for tag in tags: if tag.as_vec()[0] == 'i': input_value = tag.as_vec()[1] input_type = tag.as_vec()[2] if input_type != "url": return False elif tag.as_vec()[0] == 'output': output = tag.as_vec()[1] if output == "" or not (output == "text/plain"): print("Output format not supported, skipping..") return False return True def create_request_from_nostr_event(self, event, client=None, dvm_config=None): request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", ""), "trainerFilePath": r'modules\whisperx\whisperx_transcript.trainer'} if self.options.get("default_model"): model = self.options['default_model'] else: model = "base" if self.options.get("alignment"): alignment = self.options['alignment'] else: alignment = "raw" url = "" input_type = "url" start_time = 0 end_time = 0 media_format = "audio/mp3" for tag in event.tags(): if tag.as_vec()[0] == 'i': input_type = tag.as_vec()[2] if input_type == "url": url = tag.as_vec()[1] elif tag.as_vec()[0] == 'param': print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2]) if tag.as_vec()[1] == "alignment": alignment = tag.as_vec()[2] elif tag.as_vec()[1] == "model": model = tag.as_vec()[2] elif tag.as_vec()[1] == "range": try: t = time.strptime(tag.as_vec()[2], "%H:%M:%S") seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec start_time = float(seconds) except: try: t = time.strptime(tag.as_vec()[2], "%M:%S") seconds = t.tm_min * 60 + t.tm_sec start_time = float(seconds) except: start_time = tag.as_vec()[2] try: t = time.strptime(tag.as_vec()[3], "%H:%M:%S") seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec end_time = float(seconds) except: try: t = time.strptime(tag.as_vec()[3], "%M:%S") seconds = t.tm_min * 60 + t.tm_sec end_time = float(seconds) except: end_time = float(tag.as_vec()[3]) filepath = organize_input_media_data(url, input_type, start_time, end_time, dvm_config, client, True, media_format) path_on_server = send_file_to_server(os.path.realpath(filepath), self.options['server']) io_input = { "id": "audio", "type": "input", "src": "file:stream", "uri": path_on_server } io_output = { "id": "transcript", "type": "output", "src": "request:annotation:free" } request_form['data'] = json.dumps([io_input, io_output]) options = { "model": model, "alignment_mode": alignment, } request_form['options'] = json.dumps(options) return request_form def process(self, request_form): try: # Call the process route of NOVA-Server with our request form. response = send_request_to_server(request_form, self.options['server']) if bool(json.loads(response)['success']): print("Job " + request_form['jobID'] + " sent to server") pool = ThreadPool(processes=1) thread = pool.apply_async(check_server_status, (request_form['jobID'], self.options['server'])) print("Wait for results of server...") result = thread.get() return result except Exception as e: raise Exception(e) # We build an example here that we can call by either calling this file directly from the main directory, # or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the # playground or elsewhere def build_example(name, identifier, admin_config, server_address): dvm_config = build_default_config(identifier) dvm_config.USE_OWN_VENV = False admin_config.LUD16 = dvm_config.LN_ADDRESS # A module might have options it can be initialized with, here we set a default model, and the server # address it should use. These parameters can be freely defined in the task component options = {'default_model': "base", 'server': server_address} nip89info = { "name": name, "image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg", "about": "I extract text from media files with WhisperX", "encryptionSupported": True, "cashuAccepted": True, "nip90Params": { "model": { "required": False, "values": ["base", "tiny", "small", "medium", "large-v1", "large-v2", "tiny.en", "base.en", "small.en", "medium.en"] }, "alignment": { "required": False, "values": ["raw", "segment", "word"] } } } nip89config = NIP89Config() nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"]) nip89config.CONTENT = json.dumps(nip89info) return SpeechToTextWhisperX(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config, options=options) if __name__ == '__main__':
process_venv(SpeechToTextWhisperX)
4
2023-11-17 18:32:56+00:00
8k
zouXH-god/meme_web
meme_generator/app.py
[ { "identifier": "meme_config", "path": "meme_generator/config.py", "snippet": "class MemeConfig(BaseModel):\nclass ResourceConfig(BaseModel):\nclass GifConfig(BaseModel):\nclass TranslatorConfig(BaseModel):\nclass ServerConfig(BaseModel):\nclass LogConfig(BaseModel):\nclass Config(BaseModel, extra=Extra.ignore):\n def load(cls) -> \"Config\":\n def dump(self):" }, { "identifier": "MemeGeneratorException", "path": "meme_generator/exception.py", "snippet": "class MemeGeneratorException(Exception):\n status_code: int = 520\n\n def __init__(self, message: str):\n self.message = message\n\n def __str__(self) -> str:\n return self.__repr__()\n\n def __repr__(self) -> str:\n return f\"Error in meme_generator: {self.message}\"" }, { "identifier": "NoSuchMeme", "path": "meme_generator/exception.py", "snippet": "class NoSuchMeme(MemeGeneratorException):\n status_code: int = 531\n\n def __init__(self, meme_key: str):\n self.meme_key = meme_key\n message = f'No such meme with key=\"{self.meme_key}\"'\n super().__init__(message)" }, { "identifier": "LOGGING_CONFIG", "path": "meme_generator/log.py", "snippet": "LOGGING_CONFIG = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"handlers\": {\n \"default\": {\n \"class\": \"meme_generator.log.LoguruHandler\",\n },\n },\n \"loggers\": {\n \"uvicorn.error\": {\"handlers\": [\"default\"], \"level\": \"INFO\"},\n \"uvicorn.access\": {\n \"handlers\": [\"default\"],\n \"level\": \"INFO\",\n },\n },\n}" }, { "identifier": "setup_logger", "path": "meme_generator/log.py", "snippet": "def setup_logger():\n from .config import config_file_path, meme_config\n\n def default_filter(record: \"Record\"):\n \"\"\"默认的日志过滤器,根据 `log_level` 配置改变日志等级。\"\"\"\n log_level = meme_config.log.log_level\n levelno = (\n logger.level(log_level).no if isinstance(log_level, str) else log_level\n )\n return record[\"level\"].no >= levelno\n\n default_format: str = (\n \"<g>{time:MM-DD HH:mm:ss}</g> \"\n \"[<lvl>{level}</lvl>] \"\n \"<c><u>{name}</u></c> | \"\n # \"<c>{function}:{line}</c>| \"\n \"{message}\"\n )\n\n logger.remove()\n logger.add(\n sys.stdout,\n level=0,\n diagnose=False,\n filter=default_filter,\n format=default_format,\n )\n\n logger.opt(colors=True).info(\n f\"Config file path: <y><d>{config_file_path.resolve()}</d></y>\"\n )\n logger.opt(colors=True).debug(\n f\"Loaded config: <y><d>{str(meme_config.dict())}</d></y>\"\n )" }, { "identifier": "get_meme", "path": "meme_generator/manager.py", "snippet": "def get_meme(key: str) -> Meme:\n if key not in _memes:\n raise NoSuchMeme(key)\n return _memes[key]" }, { "identifier": "get_meme_keys", "path": "meme_generator/manager.py", "snippet": "def get_meme_keys() -> List[str]:\n return list(_memes.keys())" }, { "identifier": "get_memes", "path": "meme_generator/manager.py", "snippet": "def get_memes() -> List[Meme]:\n return list(_memes.values())" }, { "identifier": "get_meme_keywords", "path": "meme_generator/manager.py", "snippet": "def get_meme_keywords() -> List[str]:\n keywords = []\n for key in list(_memes.keys()):\n keywords.append(_memes[key].keywords[0])\n return keywords" }, { "identifier": "Meme", "path": "meme_generator/meme.py", "snippet": "class Meme:\n key: str\n function: MemeFunction\n params_type: MemeParamsType\n keywords: List[str] = field(default_factory=list)\n patterns: List[str] = field(default_factory=list)\n\n async def __call__(\n self,\n *,\n images: Union[List[str], List[Path], List[bytes], List[BytesIO]] = [],\n texts: List[str] = [],\n args: Dict[str, Any] = {},\n ) -> BytesIO:\n if not (\n self.params_type.min_images <= len(images) <= self.params_type.max_images\n ):\n raise ImageNumberMismatch(\n self.key, self.params_type.min_images, self.params_type.max_images\n )\n\n if not (self.params_type.min_texts <= len(texts) <= self.params_type.max_texts):\n raise TextNumberMismatch(\n self.key, self.params_type.min_texts, self.params_type.max_texts\n )\n\n if args_type := self.params_type.args_type:\n args_model = args_type.model\n else:\n args_model = MemeArgsModel\n\n try:\n model = args_model.parse_obj(args)\n except ValidationError as e:\n raise ArgModelMismatch(self.key, str(e))\n\n imgs: List[BuildImage] = []\n try:\n for image in images:\n if isinstance(image, bytes):\n image = BytesIO(image)\n imgs.append(BuildImage.open(image))\n except Exception as e:\n raise OpenImageFailed(str(e))\n\n values = {\"images\": imgs, \"texts\": texts, \"args\": model}\n\n if is_coroutine_callable(self.function):\n return await cast(Callable[..., Awaitable[BytesIO]], self.function)(\n **values\n )\n else:\n return await run_sync(cast(Callable[..., BytesIO], self.function))(**values)\n\n def parse_args(self, args: List[str] = []) -> Dict[str, Any]:\n parser = (\n copy.deepcopy(self.params_type.args_type.parser)\n if self.params_type.args_type\n else MemeArgsParser()\n )\n parser.add_argument(\"texts\", nargs=\"*\", default=[])\n t = parser_message.set(\"\")\n try:\n return vars(parser.parse_args(args))\n except ArgumentError as e:\n raise ArgParserExit(self.key, str(e))\n except ParserExit as e:\n raise ArgParserExit(self.key, e.error_message)\n finally:\n parser_message.reset(t)\n\n async def generate_preview(self, *, args: Dict[str, Any] = {}) -> BytesIO:\n default_images = [random_image() for _ in range(self.params_type.min_images)]\n default_texts = (\n self.params_type.default_texts.copy()\n if (\n self.params_type.min_texts\n <= len(self.params_type.default_texts)\n <= self.params_type.max_texts\n )\n else [random_text() for _ in range(self.params_type.min_texts)]\n )\n\n async def _generate_preview(images: List[BytesIO], texts: List[str]):\n try:\n return await self.__call__(images=images, texts=texts, args=args)\n except TextOrNameNotEnough:\n texts.append(random_text())\n return await _generate_preview(images, texts)\n\n return await _generate_preview(default_images, default_texts)" }, { "identifier": "MemeArgsModel", "path": "meme_generator/meme.py", "snippet": "class MemeArgsModel(BaseModel):\n user_infos: List[UserInfo] = []" }, { "identifier": "TextProperties", "path": "meme_generator/utils.py", "snippet": "class TextProperties:\n fill: ColorType = \"black\"\n style: FontStyle = \"normal\"\n weight: FontWeight = \"normal\"\n stroke_width: int = 0\n stroke_fill: Optional[ColorType] = None" }, { "identifier": "render_meme_list", "path": "meme_generator/utils.py", "snippet": "def render_meme_list(\n meme_list: List[Tuple[\"Meme\", TextProperties]],\n *,\n template: Callable[[\"Meme\", int], str] = default_template,\n order_direction: Literal[\"row\", \"column\"] = \"column\",\n columns: int = 4,\n column_align: Literal[\"left\", \"center\", \"right\"] = \"left\",\n item_padding: Tuple[int, int] = (15, 6),\n image_padding: Tuple[int, int] = (50, 50),\n bg_color: ColorType = \"white\",\n fontsize: int = 30,\n fontname: str = \"\",\n fallback_fonts: List[str] = [],\n) -> BytesIO:\n item_images: List[Text2Image] = []\n for i, (meme, properties) in enumerate(meme_list, start=1):\n text = template(meme, i)\n t2m = Text2Image.from_text(\n text,\n fontsize=fontsize,\n style=properties.style,\n weight=properties.weight,\n fill=properties.fill,\n stroke_width=properties.stroke_width,\n stroke_fill=properties.stroke_fill,\n fontname=fontname,\n fallback_fonts=fallback_fonts,\n )\n item_images.append(t2m)\n char_A = (\n Text2Image.from_text(\n \"A\", fontsize=fontsize, fontname=fontname, fallback_fonts=fallback_fonts\n )\n .lines[0]\n .chars[0]\n )\n num_per_col = math.ceil(len(item_images) / columns)\n column_images: List[BuildImage] = []\n for col in range(columns):\n if order_direction == \"column\":\n images = item_images[col * num_per_col : (col + 1) * num_per_col]\n else:\n images = [\n item_images[num * columns + col]\n for num in range((len(item_images) - col - 1) // columns + 1)\n ]\n img_w = max((t2m.width for t2m in images)) + item_padding[0] * 2\n img_h = (char_A.ascent + item_padding[1] * 2) * len(images) + char_A.descent\n image = BuildImage.new(\"RGB\", (img_w, img_h), bg_color)\n y = item_padding[1]\n for t2m in images:\n if column_align == \"left\":\n x = 0\n elif column_align == \"center\":\n x = (img_w - t2m.width - item_padding[0] * 2) // 2\n else:\n x = img_w - t2m.width - item_padding[0] * 2\n t2m.draw_on_image(image.image, (x, y))\n y += char_A.ascent + item_padding[1] * 2\n column_images.append(image)\n\n img_w = sum((img.width for img in column_images)) + image_padding[0] * 2\n img_h = max((img.height for img in column_images)) + image_padding[1] * 2\n image = BuildImage.new(\"RGB\", (img_w, img_h), bg_color)\n x, y = image_padding\n for img in column_images:\n image.paste(img, (x, y))\n x += img.width\n return image.save_jpg()" } ]
import base64 import json import filetype import httpx import requests import uvicorn from typing import Any, Dict, List, Literal, Optional, Tuple from fastapi import Depends, FastAPI, Form, HTTPException, Response, UploadFile from fastapi.responses import HTMLResponse from pil_utils.types import ColorType, FontStyle, FontWeight from pydantic import BaseModel, ValidationError from meme_generator.config import meme_config from meme_generator.exception import MemeGeneratorException, NoSuchMeme from meme_generator.log import LOGGING_CONFIG, setup_logger from meme_generator.manager import get_meme, get_meme_keys, get_memes, get_meme_keywords from meme_generator.meme import Meme, MemeArgsModel from meme_generator.utils import TextProperties, render_meme_list
4,398
async with httpx.AsyncClient() as client: response = await client.get(resize_url + url, timeout=60) content = response.content media_type = str(filetype.guess_mime(content)) or "text/plain" return Response(content=content, media_type=media_type) @app.post("/memes/render_list") def _(params: RenderMemeListRequest = RenderMemeListRequest()): try: meme_list = [ ( get_meme(p.meme_key), TextProperties( fill=p.fill, style=p.style, weight=p.weight, stroke_width=p.stroke_width, stroke_fill=p.stroke_fill, ), ) for p in params.meme_list ] except NoSuchMeme as e: raise HTTPException(status_code=e.status_code, detail=str(e)) result = render_meme_list( meme_list, order_direction=params.order_direction, columns=params.columns, column_align=params.column_align, item_padding=params.item_padding, image_padding=params.image_padding, bg_color=params.bg_color, fontsize=params.fontsize, fontname=params.fontname, fallback_fonts=params.fallback_fonts, ) content = result.getvalue() media_type = str(filetype.guess_mime(content)) or "text/plain" return Response(content=content, media_type=media_type) @app.get("/memes/keywords") def _(): return get_meme_keywords() @app.get("/memes/keys") def _(): return get_meme_keys() @app.get("/memes/{key}/info") def _(key: str): try: meme = get_meme(key) except NoSuchMeme as e: raise HTTPException(status_code=e.status_code, detail=str(e)) args_model = ( meme.params_type.args_type.model if meme.params_type.args_type else MemeArgsModel ) properties: Dict[str, Dict[str, Any]] = ( args_model.schema().get("properties", {}).copy() ) properties.pop("user_infos") return MemeInfoResponse( key=meme.key, keywords=meme.keywords, patterns=meme.patterns, params=MemeParamsResponse( min_images=meme.params_type.min_images, max_images=meme.params_type.max_images, min_texts=meme.params_type.min_texts, max_texts=meme.params_type.max_texts, default_texts=meme.params_type.default_texts, args=[ MemeArgsResponse( name=name, type=info.get("type", ""), description=info.get("description"), default=info.get("default"), enum=info.get("enum"), ) for name, info in properties.items() ], ), ) @app.get("/memes/{key}/preview") async def _(key: str): try: meme = get_meme(key) # 返回值为byteIO result = await meme.generate_preview() except MemeGeneratorException as e: raise HTTPException(status_code=e.status_code, detail=str(e)) content = result.getvalue() media_type = str(filetype.guess_mime(content)) or "text/plain" return Response(content=content, media_type=media_type) @app.post("/memes/{key}/parse_args") async def _(key: str, args=None): if args is None: args = [] try: meme = get_meme(key) return meme.parse_args(args) except MemeGeneratorException as e: raise HTTPException(status_code=e.status_code, detail=str(e)) for meme in sorted(get_memes(), key=lambda meme: meme.key): register_router(meme) def run_server(): register_routers() uvicorn.run( app,
app = FastAPI() class MemeArgsResponse(BaseModel): name: str type: str description: Optional[str] = None default: Optional[Any] = None enum: Optional[List[Any]] = None class MemeParamsResponse(BaseModel): min_images: int max_images: int min_texts: int max_texts: int default_texts: List[str] args: List[MemeArgsResponse] class MemeInfoResponse(BaseModel): key: str keywords: List[str] patterns: List[str] params: MemeParamsResponse def register_router(meme: Meme): if args_type := meme.params_type.args_type: args_model = args_type.model else: args_model = MemeArgsModel def args_checker(args: Optional[str] = Form(default=str(args_model().json()))): if not args: return MemeArgsModel() try: model = args_model.parse_raw(args) except ValidationError as e: raise HTTPException(status_code=552, detail=str(e)) return model @app.post(f"/memes/{meme.key}/") async def _( images: List[UploadFile] = [], images_base64: List[str] = [], texts: List[str] = meme.params_type.default_texts, texts_json: List[str] = [], res: List[str] = [], args: args_model = Depends(args_checker), # type: ignore ): if texts_json: texts = json.loads(texts_json[0]) imgs: List[bytes] = [] for image in images: imgs.append(await image.read()) if images_base64: for image_base64 in json.loads(images_base64[0]): imgs.append(base64.b64decode(image_base64)) texts = [text for text in texts if text] assert isinstance(args, args_model) try: result = await meme(images=imgs, texts=texts, args=args.dict()) except MemeGeneratorException as e: raise HTTPException(status_code=e.status_code, detail=str(e)) content = result.getvalue() print(res) if res: return base64.b64encode(content) media_type = str(filetype.guess_mime(content)) or "text/plain" return Response(content=content, media_type=media_type) class MemeKeyWithProperties(BaseModel): meme_key: str fill: ColorType = "black" style: FontStyle = "normal" weight: FontWeight = "normal" stroke_width: int = 0 stroke_fill: Optional[ColorType] = None default_meme_list = [ MemeKeyWithProperties(meme_key=meme.key) for meme in sorted(get_memes(), key=lambda meme: meme.key) ] class RenderMemeListRequest(BaseModel): meme_list: List[MemeKeyWithProperties] = default_meme_list order_direction: Literal["row", "column"] = "column" columns: int = 4 column_align: Literal["left", "center", "right"] = "left" item_padding: Tuple[int, int] = (15, 2) image_padding: Tuple[int, int] = (50, 50) bg_color: ColorType = "white" fontsize: int = 30 fontname: str = "" fallback_fonts: List[str] = [] def register_routers(): @app.get("/") @app.get("/memes/make") def _(): with open("templates/make.html", "r") as fp: html = fp.read() return HTMLResponse(html) @app.get("/memes/get_img") async def _(qq: str = "", url: str = ""): if qq: url = f"http://q1.qlogo.cn/g?b=qq&nk={qq}&s=640" resize_url = "" else: resize_url = "https://api.s1f.top/img_resize?w=320&url=" async with httpx.AsyncClient() as client: response = await client.get(resize_url + url, timeout=60) content = response.content media_type = str(filetype.guess_mime(content)) or "text/plain" return Response(content=content, media_type=media_type) @app.post("/memes/render_list") def _(params: RenderMemeListRequest = RenderMemeListRequest()): try: meme_list = [ ( get_meme(p.meme_key), TextProperties( fill=p.fill, style=p.style, weight=p.weight, stroke_width=p.stroke_width, stroke_fill=p.stroke_fill, ), ) for p in params.meme_list ] except NoSuchMeme as e: raise HTTPException(status_code=e.status_code, detail=str(e)) result = render_meme_list( meme_list, order_direction=params.order_direction, columns=params.columns, column_align=params.column_align, item_padding=params.item_padding, image_padding=params.image_padding, bg_color=params.bg_color, fontsize=params.fontsize, fontname=params.fontname, fallback_fonts=params.fallback_fonts, ) content = result.getvalue() media_type = str(filetype.guess_mime(content)) or "text/plain" return Response(content=content, media_type=media_type) @app.get("/memes/keywords") def _(): return get_meme_keywords() @app.get("/memes/keys") def _(): return get_meme_keys() @app.get("/memes/{key}/info") def _(key: str): try: meme = get_meme(key) except NoSuchMeme as e: raise HTTPException(status_code=e.status_code, detail=str(e)) args_model = ( meme.params_type.args_type.model if meme.params_type.args_type else MemeArgsModel ) properties: Dict[str, Dict[str, Any]] = ( args_model.schema().get("properties", {}).copy() ) properties.pop("user_infos") return MemeInfoResponse( key=meme.key, keywords=meme.keywords, patterns=meme.patterns, params=MemeParamsResponse( min_images=meme.params_type.min_images, max_images=meme.params_type.max_images, min_texts=meme.params_type.min_texts, max_texts=meme.params_type.max_texts, default_texts=meme.params_type.default_texts, args=[ MemeArgsResponse( name=name, type=info.get("type", ""), description=info.get("description"), default=info.get("default"), enum=info.get("enum"), ) for name, info in properties.items() ], ), ) @app.get("/memes/{key}/preview") async def _(key: str): try: meme = get_meme(key) # 返回值为byteIO result = await meme.generate_preview() except MemeGeneratorException as e: raise HTTPException(status_code=e.status_code, detail=str(e)) content = result.getvalue() media_type = str(filetype.guess_mime(content)) or "text/plain" return Response(content=content, media_type=media_type) @app.post("/memes/{key}/parse_args") async def _(key: str, args=None): if args is None: args = [] try: meme = get_meme(key) return meme.parse_args(args) except MemeGeneratorException as e: raise HTTPException(status_code=e.status_code, detail=str(e)) for meme in sorted(get_memes(), key=lambda meme: meme.key): register_router(meme) def run_server(): register_routers() uvicorn.run( app,
host=meme_config.server.host,
0
2023-11-12 12:31:53+00:00
8k
OKC13/General-Documents-Layout-parser
utils/datasets.py
[ { "identifier": "xyxy2xywh", "path": "utils/utils.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "xywh2xyxy", "path": "utils/utils.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" } ]
import glob import math import os import random import shutil import time import cv2 import numpy as np import torch from pathlib import Path from threading import Thread from PIL import Image, ExifTags from torch.utils.data import Dataset from tqdm import tqdm from utils.utils import xyxy2xywh, xywh2xyxy from skimage import io # conda install -c conda-forge scikit-image
3,979
nf += 1 # file found # Create subdataset (a smaller dataset) if create_datasubset and ns < 1E4: if ns == 0: create_folder(path='./datasubset') os.makedirs('./datasubset/images') exclude_classes = 43 if exclude_classes not in l[:, 0]: ns += 1 # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image with open('./datasubset/images.txt', 'a') as f: f.write(self.img_files[i] + '\n') # Extract object detection boxes for a second stage classifier if extract_bounding_boxes: p = Path(self.img_files[i]) img = cv2.imread(str(p)) h, w = img.shape[:2] for j, x in enumerate(l): f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name) if not os.path.exists(Path(f).parent): os.makedirs(Path(f).parent) # make new output folder b = x[1:] * [w, h, w, h] # box b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.3 + 30 # pad b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes' else: ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % ( s, nf, nm, ne, nd, n) assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url) if not labels_loaded and n > 1000: print('Saving labels to %s for faster future loading' % np_labels_path) np.save(np_labels_path, self.labels) # save for next time # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) if cache_images: # if training gb = 0 # Gigabytes of cached images pbar = tqdm(range(len(self.img_files)), desc='Caching images') self.img_hw0, self.img_hw = [None] * n, [None] * n for i in pbar: # max 10k images self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized gb += self.imgs[i].nbytes pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9) # Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3 detect_corrupted_images = False if detect_corrupted_images: for file in tqdm(self.img_files, desc='Detecting corrupted images'): try: _ = io.imread(file) except: print('Corrupted image detected: %s' % file) def __len__(self): return len(self.img_files) # def __iter__(self): # self.count = -1 # print('ran dataset iter') # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) # return self def __getitem__(self, index): if self.image_weights: index = self.indices[index] hyp = self.hyp if self.mosaic: # Load mosaic img, labels = load_mosaic(self, index) shapes = None else: # Load image img, (h0, w0), (h, w) = load_image(self, index) # Letterbox shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling # Load labels labels = [] x = self.labels[index] if x.size > 0: # Normalized xywh to pixel xyxy format labels = x.copy() labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] if self.augment: # Augment imagespace if not self.mosaic: img, labels = random_affine(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear']) # Augment colorspace augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Apply cutouts # if random.random() < 0.9: # labels = cutout(img, labels) nL = len(labels) # number of labels if nL: # convert xyxy to xywh
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng'] vid_formats = ['.mov', '.avi', '.mp4'] # Get orientation exif tag for orientation in ExifTags.TAGS.keys(): if ExifTags.TAGS[orientation] == 'Orientation': break def exif_size(img): # Returns exif-corrected PIL size s = img.size # (width, height) try: rotation = dict(img._getexif().items())[orientation] if rotation == 6: # rotation 270 s = (s[1], s[0]) elif rotation == 8: # rotation 90 s = (s[1], s[0]) except: pass return s class LoadImages: # for inference def __init__(self, path, img_size=416): path = str(Path(path)) # os-agnostic files = [] if os.path.isdir(path): files = sorted(glob.glob(os.path.join(path, '*.*'))) elif os.path.isfile(path): files = [path] images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats] videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats] nI, nV = len(images), len(videos) self.img_size = img_size self.files = images + videos self.nF = nI + nV # number of files self.video_flag = [False] * nI + [True] * nV self.mode = 'images' if any(videos): self.new_video(videos[0]) # new video else: self.cap = None assert self.nF > 0, 'No images or videos found in ' + path def __iter__(self): self.count = 0 return self def __next__(self): if self.count == self.nF: raise StopIteration path = self.files[self.count] if self.video_flag[self.count]: # Read video self.mode = 'video' ret_val, img0 = self.cap.read() if not ret_val: self.count += 1 self.cap.release() if self.count == self.nF: # last video raise StopIteration else: path = self.files[self.count] self.new_video(path) ret_val, img0 = self.cap.read() self.frame += 1 print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='') else: # Read image self.count += 1 img0 = cv2.imread(path) # BGR assert img0 is not None, 'Image Not Found ' + path print('image %g/%g %s: ' % (self.count, self.nF, path), end='') # Padded resize img = letterbox(img0, new_shape=self.img_size)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) # cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image return path, img, img0, self.cap def new_video(self, path): self.frame = 0 self.cap = cv2.VideoCapture(path) self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) def __len__(self): return self.nF # number of files class LoadWebcam: # for inference def __init__(self, pipe=0, img_size=416): self.img_size = img_size if pipe == '0': pipe = 0 # local camera # pipe = 'rtsp://192.168.1.64/1' # IP camera # pipe = 'rtsp://username:[email protected]/1' # IP camera with login # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/ # pipe = '"rtspsrc location="rtsp://username:[email protected]/1" latency=10 ! appsink' # GStreamer # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/ # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help # pipe = "rtspsrc location=rtsp://root:[email protected]:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer self.pipe = pipe self.cap = cv2.VideoCapture(pipe) # video capture object self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 if cv2.waitKey(1) == ord('q'): # q to quit self.cap.release() cv2.destroyAllWindows() raise StopIteration # Read frame if self.pipe == 0: # local camera ret_val, img0 = self.cap.read() img0 = cv2.flip(img0, 1) # flip left-right else: # IP camera n = 0 while True: n += 1 self.cap.grab() if n % 30 == 0: # skip frames ret_val, img0 = self.cap.retrieve() if ret_val: break # Print assert ret_val, 'Camera Error %s' % self.pipe img_path = 'webcam.jpg' print('webcam %g: ' % self.count, end='') # Padded resize img = letterbox(img0, new_shape=self.img_size)[0] # Convert img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 img = np.ascontiguousarray(img) return img_path, img, img0, None def __len__(self): return 0 class LoadStreams: # multiple IP or RTSP cameras def __init__(self, sources='streams.txt', img_size=416): self.mode = 'images' self.img_size = img_size if os.path.isfile(sources): with open(sources, 'r') as f: sources = [x.strip() for x in f.read().splitlines() if len(x.strip())] else: sources = [sources] n = len(sources) self.imgs = [None] * n self.sources = sources for i, s in enumerate(sources): # Start the thread to read frames from the video stream print('%g/%g: %s... ' % (i + 1, n, s), end='') cap = cv2.VideoCapture(0 if s == '0' else s) assert cap.isOpened(), 'Failed to open %s' % s w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) % 100 _, self.imgs[i] = cap.read() # guarantee first frame thread = Thread(target=self.update, args=([i, cap]), daemon=True) print(' success (%gx%g at %.2f FPS).' % (w, h, fps)) thread.start() print('') # newline # check for common shapes s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal if not self.rect: print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') def update(self, index, cap): # Read next stream frame in a daemon thread n = 0 while cap.isOpened(): n += 1 # _, self.imgs[index] = cap.read() cap.grab() if n == 4: # read every 4th frame _, self.imgs[index] = cap.retrieve() n = 0 time.sleep(0.01) # wait time def __iter__(self): self.count = -1 return self def __next__(self): self.count += 1 img0 = self.imgs.copy() if cv2.waitKey(1) == ord('q'): # q to quit cv2.destroyAllWindows() raise StopIteration # Letterbox img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0] # Stack img = np.stack(img, 0) # Convert img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 img = np.ascontiguousarray(img) return self.sources, img, img0, None def __len__(self): return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years class LoadImagesAndLabels(Dataset): # for training/testing def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, cache_images=False, single_cls=False, pad=0.0): try: path = str(Path(path)) # os-agnostic parent = str(Path(path).parent) + os.sep if os.path.isfile(path): # file with open(path, 'r') as f: f = f.read().splitlines() f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path elif os.path.isdir(path): # folder f = glob.iglob(path + os.sep + '*.*') else: raise Exception('%s does not exist' % path) self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats] except: raise Exception('Error loading data from %s. See %s' % (path, help_url)) n = len(self.img_files) assert n > 0, 'No images found in %s. See %s' % (path, help_url) bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index nb = bi[-1] + 1 # number of batches self.n = n # number of images self.batch = bi # batch index of image self.img_size = img_size self.augment = augment self.hyp = hyp self.image_weights = image_weights self.rect = False if image_weights else rect self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) # Define labels self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in self.img_files] # Rectangular Training https://github.com/ultralytics/yolov3/issues/232 if self.rect: # Read image shapes (wh) sp = path.replace('.txt', '') + '.shapes' # shapefile path try: with open(sp, 'r') as f: # read existing shapefile s = [x.split() for x in f.read().splitlines()] assert len(s) == n, 'Shapefile out of sync' except: s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')] np.savetxt(sp, s, fmt='%g') # overwrites existing (if any) # Sort by aspect ratio s = np.array(s, dtype=np.float64) ar = s[:, 1] / s[:, 0] # aspect ratio irect = ar.argsort() self.img_files = [self.img_files[i] for i in irect] self.label_files = [self.label_files[i] for i in irect] self.shapes = s[irect] # wh ar = ar[irect] # Set training image shapes shapes = [[1, 1]] * nb for i in range(nb): ari = ar[bi == i] mini, maxi = ari.min(), ari.max() if maxi < 1: shapes[i] = [maxi, 1] elif mini > 1: shapes[i] = [1, 1 / mini] self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32. + pad).astype(np.int) * 32 # Cache labels self.imgs = [None] * n self.labels = [np.zeros((0, 5), dtype=np.float32)] * n create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file if os.path.isfile(np_labels_path): s = np_labels_path # print string x = np.load(np_labels_path, allow_pickle=True) if len(x) == n: self.labels = x labels_loaded = True else: s = path.replace('images', 'labels') pbar = tqdm(self.label_files) for i, file in enumerate(pbar): if labels_loaded: l = self.labels[i] # np.savetxt(file, l, '%g') # save *.txt from *.npy file else: try: with open(file, 'r') as f: l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) except: nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing continue if l.shape[0]: assert l.shape[1] == 5, '> 5 label columns: %s' % file assert (l >= 0).all(), 'negative labels: %s' % file try: assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file except: print('non-normalized or out of bounds coordinate labels: %s' % file) if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows if single_cls: l[:, 0] = 0 # force dataset into single-class mode self.labels[i] = l nf += 1 # file found # Create subdataset (a smaller dataset) if create_datasubset and ns < 1E4: if ns == 0: create_folder(path='./datasubset') os.makedirs('./datasubset/images') exclude_classes = 43 if exclude_classes not in l[:, 0]: ns += 1 # shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image with open('./datasubset/images.txt', 'a') as f: f.write(self.img_files[i] + '\n') # Extract object detection boxes for a second stage classifier if extract_bounding_boxes: p = Path(self.img_files[i]) img = cv2.imread(str(p)) h, w = img.shape[:2] for j, x in enumerate(l): f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name) if not os.path.exists(Path(f).parent): os.makedirs(Path(f).parent) # make new output folder b = x[1:] * [w, h, w, h] # box b[2:] = b[2:].max() # rectangle to square b[2:] = b[2:] * 1.3 + 30 # pad b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image b[[1, 3]] = np.clip(b[[1, 3]], 0, h) assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes' else: ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty # os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % ( s, nf, nm, ne, nd, n) assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url) if not labels_loaded and n > 1000: print('Saving labels to %s for faster future loading' % np_labels_path) np.save(np_labels_path, self.labels) # save for next time # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) if cache_images: # if training gb = 0 # Gigabytes of cached images pbar = tqdm(range(len(self.img_files)), desc='Caching images') self.img_hw0, self.img_hw = [None] * n, [None] * n for i in pbar: # max 10k images self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized gb += self.imgs[i].nbytes pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9) # Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3 detect_corrupted_images = False if detect_corrupted_images: for file in tqdm(self.img_files, desc='Detecting corrupted images'): try: _ = io.imread(file) except: print('Corrupted image detected: %s' % file) def __len__(self): return len(self.img_files) # def __iter__(self): # self.count = -1 # print('ran dataset iter') # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) # return self def __getitem__(self, index): if self.image_weights: index = self.indices[index] hyp = self.hyp if self.mosaic: # Load mosaic img, labels = load_mosaic(self, index) shapes = None else: # Load image img, (h0, w0), (h, w) = load_image(self, index) # Letterbox shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling # Load labels labels = [] x = self.labels[index] if x.size > 0: # Normalized xywh to pixel xyxy format labels = x.copy() labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0] labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1] if self.augment: # Augment imagespace if not self.mosaic: img, labels = random_affine(img, labels, degrees=hyp['degrees'], translate=hyp['translate'], scale=hyp['scale'], shear=hyp['shear']) # Augment colorspace augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) # Apply cutouts # if random.random() < 0.9: # labels = cutout(img, labels) nL = len(labels) # number of labels if nL: # convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
0
2023-11-16 08:37:10+00:00
8k
embrake/Aquilify
aquilify/orm/sqlite3.py
[ { "identifier": "MagicFilter", "path": "aquilify/orm/filters/magic.py", "snippet": "class MagicFilter(object):\n def __init__(self, query: str, variables: tuple = None, parameters: dict[str, object] = None) -> None:\n\n \"\"\"\n Magic filter used for advanced processing of sql queries\n\n :param query: Initial query\n :param variables: Query variables\n :param parameters: Parameters saved after writing the query to a new expression\n \"\"\"\n\n self.query = query\n\n self.variables = variables or tuple()\n self.parameters = parameters or dict()\n\n @staticmethod\n def _serialize_variable(variable: object) -> tuple:\n if isinstance(variable, MagicFilter):\n return variable.variables\n else:\n return (variable,)\n\n @staticmethod\n def _serialize_query(query: object) -> str:\n if isinstance(query, MagicFilter):\n return query.query\n else:\n return \"?\"\n\n @staticmethod\n def _wrap_query(query: str, wrap: bool) -> str:\n return f\"({query})\" if wrap else query\n\n def _get_expression(self, other: object, operator: str, *, wrap: bool = False) -> \"MagicFilter\":\n return MagicFilter(\n f\"{self._wrap_query(self.query, wrap)} \"\n f\"{operator} \"\n f\"{self._wrap_query(self._serialize_query(other), wrap)}\",\n\n (*self.variables, *self._serialize_variable(other)), self.parameters\n )\n\n def _invert_expression(self) -> \"MagicFilter\":\n return MagicFilter(\n f\"NOT ({self.query})\", self.variables, self.parameters\n )\n\n def __eq__(self, other: object) -> \"MagicFilter\":\n return self._get_expression(other, \"=\")\n\n def __ne__(self, other: object) -> \"MagicFilter\":\n return self._get_expression(other, \"!=\")\n\n def __ge__(self, other: object) -> \"MagicFilter\":\n return self._get_expression(other, \">=\")\n\n def __gt__(self, other: object) -> \"MagicFilter\":\n return self._get_expression(other, \">\")\n\n def __le__(self, other: object) -> \"MagicFilter\":\n return self._get_expression(other, \"<=\")\n\n def __lt__(self, other: object) -> \"MagicFilter\":\n return self._get_expression(other, \"<\")\n\n def __and__(self, other: object) -> \"MagicFilter\":\n return self._get_expression(other, \"AND\", wrap=True)\n\n def __or__(self, other: object) -> \"MagicFilter\":\n return self._get_expression(other, \"OR\", wrap=True)\n\n def __invert__(self) -> \"MagicFilter\":\n return self._invert_expression()" }, { "identifier": "Column", "path": "aquilify/orm/column.py", "snippet": "class Column(MagicFilter):\n def __init__(self, type: str, not_null: bool = False, default: object = None, autoincrement: bool = False,\n unique: bool = False, primary_key: bool = False, check: str = None) -> None:\n\n \"\"\"\n Standard class for creating a column object.\n\n :param type: Data type\n :param not_null: Causes the column to not be NULL\n :param default: Sets the default value for the insert\n :param autoincrement: Causes a unique number to be generated automatically\n :param unique: Ensures that all values in a column are distinct\n :param primary_key: Uniquely identifies each entry in a table\n :param check: Used to limit the range of values that can be placed in a column\n \"\"\"\n\n super().__init__(\"\")\n\n self.type = type\n self.options = self._serialize_options(\n not_null, default, autoincrement, unique, primary_key, check\n )\n\n @staticmethod\n def _serialize_options(not_null: bool = False, default: object = None, autoincrement: bool = False,\n unique: bool = False, primary_key: bool = False, check: str = None) -> str:\n\n options = []\n if not_null:\n options.append(\"NOT NULL\")\n if primary_key:\n options.append(\"PRIMARY KEY\")\n if autoincrement:\n options.append(\"AUTOINCREMENT\")\n if unique:\n options.append(\"UNIQUE\")\n if default:\n options.append(f\"DEFAULT {default}\")\n if check:\n options.append(f\"CHECK({check})\")\n\n return \" \".join(options)" }, { "identifier": "ColumnType", "path": "aquilify/orm/column.py", "snippet": "class ColumnType(MagicFilter):\n def __init__(self, table: str, name: str, type: str, options: str = \"\") -> None:\n\n \"\"\"\n System class used to store additional column values.\n\n :param table: Parent table name\n :param name: Column name\n :param type: Column type\n :param options: Column options\n \"\"\"\n\n super().__init__(f\"{table}.{name}\", parameters={\"table\": table})\n\n self.table = table\n self.name = name\n self.type = type\n self.options = options\n\n def serialize(self) -> str:\n \"\"\"\n Function to serialize basic steam.\n\n :return: Serialized string\n \"\"\"\n\n return f\"{self.name} {self.type.upper()} {self.options}\".strip()" }, { "identifier": "Types", "path": "aquilify/orm/constants.py", "snippet": "class Types(object):\n\n \"\"\"\n All data types to create a column.\n \"\"\"\n\n BIGINT = \"BIGINT\"\n BLOB = \"BLOB\"\n BOOLEAN = \"BOOLEAN\"\n CHAR = \"CHAR\"\n DATE = \"DATE\"\n DATETIME = \"DATETIME\"\n DECIMAL = \"DECIMAL\"\n DOUBLE = \"DOUBLE\"\n INTEGER = \"INTEGER\"\n INT = \"INT\"\n NONE = \"NONE\"\n NUMERIC = \"NUMERIC\"\n REAL = \"REAL\"\n STRING = \"STRING\"\n TEXT = \"TEXT\"\n TIME = \"TIME\"\n TIMESTAMP = \"TIMESTAMP\"\n VARCHAR = \"VARCHAR\"" }, { "identifier": "SessionExecuteError", "path": "aquilify/orm/exceptions.py", "snippet": "class SessionExecuteError(Exception):\n ..." }, { "identifier": "Table", "path": "aquilify/orm/table.py", "snippet": "class Table(object, metaclass=TableMeta):\n __tablename__ = \"null\"\n\n def __init__(self, **values) -> None:\n\n \"\"\"\n Row initialization for a table with given parameters.\n\n :param values: The values that the table takes\n \"\"\"\n\n self.values = values\n\n @classmethod\n def columns(cls) -> dict[str, ColumnType]:\n\n \"\"\"\n Returns all columns defined in the new table.\n\n :return: Defined columns\n \"\"\"\n\n return {attribute: getattr(cls, attribute)\n for attribute in dir(cls)\n if isinstance(getattr(cls, attribute), ColumnType)}" }, { "identifier": "DynamicTable", "path": "aquilify/orm/table.py", "snippet": "class DynamicTable(object):\n def __init__(self, name: str, columns: dict[str, Column | ColumnType]) -> None:\n\n \"\"\"\n Dynamic table initializer.\n\n :param name: Table name\n :param columns: Table columns\n \"\"\"\n\n self.__tablename__ = name\n\n for key, column in columns.items():\n self.add(key, column)\n\n def add(self, key: str, column: Column | ColumnType) -> None:\n\n \"\"\"\n Adds a new column to the table.\n\n :param key: Column name\n :param column: Column object\n :return: Nothing\n \"\"\"\n\n self.__setattr__(key, ColumnType(self.__tablename__, key, column.type, column.options))\n\n def columns(self) -> dict[str, ColumnType]:\n\n \"\"\"\n Returns all columns defined in the new table.\n\n :return: Defined columns\n \"\"\"\n\n return {attribute: getattr(self, attribute)\n for attribute in dir(self)\n if isinstance(getattr(self, attribute), ColumnType)}\n\n def __call__(self, **values) -> Table:\n\n \"\"\"\n Row initialization for a dynamic table with given parameters.\n\n :param values: The values that the table takes\n :return: Initialized table object\n \"\"\"\n\n class TempTable(Table):\n __tablename__ = self.__tablename__\n\n for key, column in self.columns().items():\n setattr(TempTable, key, column)\n\n return TempTable(**values)" }, { "identifier": "dict_factory", "path": "aquilify/orm/utils/dict_factory.py", "snippet": "def dict_factory(cursor: sqlite3.Cursor) -> list[dict[str, object]]:\n\n \"\"\"\n Converts a list of tuples to a list of dictionaries.\n\n :param cursor: SQLite database cursor\n :return: List of dictionaries\n \"\"\"\n\n output = []\n for row in cursor.fetchall():\n output.append(\n {column[0]: row[index] for index, column in enumerate(cursor.description)}\n )\n\n return output" }, { "identifier": "DatabaseConnectionManager", "path": "aquilify/orm/connection.py", "snippet": "class DatabaseConnectionManager:\n def __init__(self, default_vendor: str = DatabaseVendor.SQLITE, default_name: str = 'db.sqlite3') -> None:\n self.default_vendor = default_vendor\n self.default_name = default_name\n self.logger = logging.getLogger(__name__)\n\n def _validate_database_settings(self, settings_data):\n if not settings_data or not settings_data.get(\"default\"):\n raise ImproperlyConfigured(\"The 'DATABASE' settings are missing or improperly configured.\")\n engine = settings_data['default'].get(\"ENGINE\", self.default_vendor)\n if engine != DatabaseVendor.SQLITE:\n raise UnsupportedVendor(f\"Vendor '{engine}' isn't supported by aquilify by default.\")\n\n def _get_engine_module(self, engine):\n try:\n engine_module = import_string(engine)\n return engine_module\n except ImportError as e:\n raise ImproperlyConfigured(f\"Invalid engine '{engine}' or Database connection not configured properly: {e}\")\n\n def _establish_connection(self, engine_module, name):\n try:\n db_connection = engine_module(name)\n if not db_connection:\n raise DatabaseConnectionError(\"Failed to establish the database connection.\")\n return db_connection\n except Exception as e:\n raise DatabaseConnectionError(f\"Error while establishing database connection: {e}\")\n\n def _get_connection(self):\n try:\n database_settings = getattr(settings, 'DATABASE')\n self._validate_database_settings(database_settings)\n default_settings = database_settings.get(\"default\", {})\n engine = default_settings.get(\"ENGINE\", self.default_vendor)\n name = default_settings.get('NAME', self.default_name)\n\n engine_module = self._get_engine_module(engine)\n return self._establish_connection(engine_module, name)\n except (ImproperlyConfigured, UnsupportedVendor, DatabaseConnectionError) as e:\n self.logger.error(str(e))\n raise" }, { "identifier": "TransactionContextManager", "path": "aquilify/orm/transactions.py", "snippet": "class TransactionContextManager(AbstractContextManager):\n def __init__(self, db_connection, isolation_level=IsolationLevel.DEFERRED):\n self._db_connection = db_connection\n self._isolation_level = isolation_level\n self._nested = 0\n self._completed = False\n\n def __enter__(self):\n self._nested += 1\n if self._nested == 1:\n self._db_connection.execute(f\"BEGIN {self._isolation_level.value} TRANSACTION\")\n else:\n self._db_connection.execute(f\"SAVEPOINT sp_{self._nested}\")\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._nested -= 1\n if self._nested == 0:\n if exc_type is None and not self._completed:\n self.commit()\n else:\n self.rollback()\n\n def commit(self):\n \"\"\"Manually commit the transaction.\"\"\"\n if not self._completed:\n try:\n self._db_connection.execute(\"COMMIT\")\n self._completed = True\n except sqlite3.Error as e:\n raise SessionExecuteError(f\"Error committing transaction: {e}\") from e\n\n def rollback(self):\n \"\"\"Manually roll back the transaction.\"\"\"\n if not self._completed:\n try:\n self._db_connection.execute(\"ROLLBACK\")\n self._completed = True\n except sqlite3.Error as e:\n raise SessionExecuteError(f\"Error rolling back transaction: {e}\") from e\n\n def __call__(self, isolation_level=IsolationLevel.DEFERRED):\n \"\"\"Allows chaining transaction context managers.\"\"\"\n return TransactionContextManager(self._db_connection, isolation_level)" }, { "identifier": "IsolationLevel", "path": "aquilify/orm/transactions.py", "snippet": "class IsolationLevel(Enum):\n DEFERRED = \"DEFERRED\"\n IMMEDIATE = \"IMMEDIATE\"\n EXCLUSIVE = \"EXCLUSIVE\"" } ]
import sqlite3 from .filters import MagicFilter from .column import Column, ColumnType from .constants import Types from .exceptions import SessionExecuteError from .table import Table, DynamicTable from .utils.dict_factory import dict_factory from .connection import DatabaseConnectionManager from .transactions import TransactionContextManager, IsolationLevel from typing import Callable, Union, Type
3,881
class Typing(object): """ Namespace with type hints. """ AnyTable = Union[MagicFilter, DynamicTable, Table, Type[Table]] NamespaceTable = Union[DynamicTable, Type[Table]] AnyColumn = Union[Column, ColumnType] class Sqlite3: def __init__(self, path: str) -> None: self.path = path def get_path(self) -> str: return self.path def __str__(self) -> str: return self.path class Session(object): def __init__(self, tables: list[Typing.NamespaceTable] = None, **kwargs) -> None: """ Creates a new session to work with the database. :param path: Path to the database :param tables: List of tables to be created during session initialization :param kwargs: Other options for opening a database [ More details in `sqlite3.connect(...)` ] """ self._connection = DatabaseConnectionManager()._get_connection() self._database = sqlite3.connect(self._connection.get_path(), **kwargs) self._tables = tables or [] for table in self._tables: self.create(table) def create(self, table: Typing.NamespaceTable) -> None: """ Creates a new table in the database. :param table: Table or dynamic table :return: Nothing """ self._database.execute(f"CREATE TABLE IF NOT EXISTS {table.__tablename__} " f"({', '.join([column.serialize() for column in table.columns().values()])})") self._database.commit() def clear(self, table: Typing.NamespaceTable) -> None: """ Clears the selected table. :param table: Table or dynamic table :return: Nothing """ self._database.execute( f"DELETE FROM {table.__tablename__}" ) self._database.commit() def drop(self, table: Typing.NamespaceTable) -> None: """ Completely removes the table from the database. :param table: Table or dynamic table :return: Nothing """ self._database.execute( f"DROP TABLE IF EXISTS {table.__tablename__}" ) self._database.commit() def insert(self, table: Table, replace: bool = False) -> None: """ Adds a new row to the table. :param table: Initialized table object :param replace: Will replace an existing row :return: Nothing """ values = table.values try: self._database.execute( f"INSERT {'OR REPLACE' if replace else ''} INTO {table.__tablename__} ({', '.join(values.keys())}) " f"VALUES ({', '.join(['?'] * len(values))})", list(values.values()) ) self._database.commit() return True except Exception: return False def update(self, data: Typing.AnyTable, table: Table) -> None: """ Updates the selected rows in the table. :param data: Initialized table object :param table: Any type of table or magic filter :return: Nothing """ if not isinstance(data, (MagicFilter, DynamicTable, Table, type(Table))):
class Typing(object): """ Namespace with type hints. """ AnyTable = Union[MagicFilter, DynamicTable, Table, Type[Table]] NamespaceTable = Union[DynamicTable, Type[Table]] AnyColumn = Union[Column, ColumnType] class Sqlite3: def __init__(self, path: str) -> None: self.path = path def get_path(self) -> str: return self.path def __str__(self) -> str: return self.path class Session(object): def __init__(self, tables: list[Typing.NamespaceTable] = None, **kwargs) -> None: """ Creates a new session to work with the database. :param path: Path to the database :param tables: List of tables to be created during session initialization :param kwargs: Other options for opening a database [ More details in `sqlite3.connect(...)` ] """ self._connection = DatabaseConnectionManager()._get_connection() self._database = sqlite3.connect(self._connection.get_path(), **kwargs) self._tables = tables or [] for table in self._tables: self.create(table) def create(self, table: Typing.NamespaceTable) -> None: """ Creates a new table in the database. :param table: Table or dynamic table :return: Nothing """ self._database.execute(f"CREATE TABLE IF NOT EXISTS {table.__tablename__} " f"({', '.join([column.serialize() for column in table.columns().values()])})") self._database.commit() def clear(self, table: Typing.NamespaceTable) -> None: """ Clears the selected table. :param table: Table or dynamic table :return: Nothing """ self._database.execute( f"DELETE FROM {table.__tablename__}" ) self._database.commit() def drop(self, table: Typing.NamespaceTable) -> None: """ Completely removes the table from the database. :param table: Table or dynamic table :return: Nothing """ self._database.execute( f"DROP TABLE IF EXISTS {table.__tablename__}" ) self._database.commit() def insert(self, table: Table, replace: bool = False) -> None: """ Adds a new row to the table. :param table: Initialized table object :param replace: Will replace an existing row :return: Nothing """ values = table.values try: self._database.execute( f"INSERT {'OR REPLACE' if replace else ''} INTO {table.__tablename__} ({', '.join(values.keys())}) " f"VALUES ({', '.join(['?'] * len(values))})", list(values.values()) ) self._database.commit() return True except Exception: return False def update(self, data: Typing.AnyTable, table: Table) -> None: """ Updates the selected rows in the table. :param data: Initialized table object :param table: Any type of table or magic filter :return: Nothing """ if not isinstance(data, (MagicFilter, DynamicTable, Table, type(Table))):
raise SessionExecuteError("The data is not a successor of MagicFilterData or Table!")
4
2023-11-16 08:26:02+00:00
8k
IBM/oper8
tests/test_session.py
[ { "identifier": "STRATEGIC_MERGE_PATCH", "path": "oper8/patch.py", "snippet": "STRATEGIC_MERGE_PATCH = \"patchStrategicMerge\"" }, { "identifier": "MAX_NAME_LEN", "path": "oper8/session.py", "snippet": "MAX_NAME_LEN = 63" }, { "identifier": "Session", "path": "oper8/session.py", "snippet": "class Session: # pylint: disable=too-many-instance-attributes,too-many-public-methods\n \"\"\"A session is the core context manager for the state of an in-progress\n reconciliation\n \"\"\"\n\n # We strictly define the set of attributes that a Session can have to\n # disallow arbitrary assignment\n __slots__ = [\n \"__components\",\n \"__component_dependencies\",\n \"__enabled_components\",\n \"__disabled_components\",\n \"__id\",\n \"__cr_manifest\",\n \"__config\",\n \"__temporary_patches\",\n \"__deploy_manager\",\n \"__status\",\n \"__current_version\",\n \"__graph\",\n # _app is retained for backwards compatibility\n \"_app\",\n ]\n\n def __init__( # pylint: disable=too-many-arguments\n self,\n reconciliation_id: str,\n cr_manifest: aconfig.Config,\n config: aconfig.Config,\n deploy_manager: DeployManagerBase,\n temporary_patches: Optional[List[dict]] = None,\n ):\n \"\"\"Construct a session object to hold the state for a reconciliation\n\n Args:\n reconciliation_id: str\n The unique ID for this reconciliation\n cr_manifest: aconfig.Config\n The full value of the CR mainfest that triggered this\n reconciliation\n config: aconfig.Config\n The compiled backend config for this reconciliation\n deploy_manager: DeployManagerBase\n The preconfigured DeployManager in charge of running the actual\n deploy operations for this deployment\n temporary_patches: list(dict)\n List of temporary patch object to apply to resources managed by\n this rollout\n \"\"\"\n\n ##################################################################\n # Private Members: These members will be hidden from client code #\n ##################################################################\n\n # Mapping from component name to Component instance\n self.__graph = Graph()\n\n ###################################################\n # Properties: These properties will be exposed as #\n # @property members to be used by client code #\n ###################################################\n\n self.__id = reconciliation_id\n if not isinstance(cr_manifest, aconfig.Config):\n cr_manifest = aconfig.Config(cr_manifest, override_env_vars=False)\n self._validate_cr(cr_manifest)\n self.__cr_manifest = cr_manifest\n if not isinstance(config, aconfig.Config):\n config = aconfig.Config(config, override_env_vars=False)\n self.__config = config\n self.__temporary_patches = temporary_patches or []\n\n # The deploy manager that will be used to manage interactions with the\n # cluster\n self.__deploy_manager = deploy_manager\n\n # Get the current status and version so that it can be referenced by the\n # Application and Components that use it\n self.__status = self.get_status()\n self.__current_version = get_version(self.status)\n\n ## Properties ##############################################################\n\n @property\n def id(self) -> str: # pylint: disable=invalid-name\n \"\"\"The unique reconciliation ID\"\"\"\n return self.__id\n\n @property\n def cr_manifest(self) -> aconfig.Config:\n \"\"\"The full CR manifest that triggered this reconciliation\"\"\"\n return self.__cr_manifest\n\n @property\n def spec(self) -> aconfig.Config:\n \"\"\"The spec section of the CR manifest\"\"\"\n return self.cr_manifest.get(\"spec\", aconfig.Config({}))\n\n @property\n def version(self) -> str:\n \"\"\"The spec.version for this CR\"\"\"\n return get_manifest_version(self.cr_manifest)\n\n @property\n def metadata(self) -> aconfig.Config:\n \"\"\"The metadata for this CR\"\"\"\n return self.cr_manifest.metadata\n\n @property\n def kind(self) -> str:\n \"\"\"The kind of the operand for this CR\"\"\"\n return self.cr_manifest.kind\n\n @property\n def api_version(self) -> str:\n \"\"\"The api version of the operand for this CR\"\"\"\n return self.cr_manifest.apiVersion\n\n @property\n def name(self) -> str:\n \"\"\"The metadata.name for this CR\"\"\"\n return self.metadata.name\n\n @property\n def namespace(self) -> str:\n \"\"\"The metadata.namespace for this CR\"\"\"\n return self.metadata.namespace\n\n @property\n def finalizers(self) -> str:\n \"\"\"The metadata.namespace for this CR\"\"\"\n\n # Manually create finalizer list if it doesn't exist so its\n # editable\n if \"finalizers\" not in self.metadata:\n self.metadata[\"finalizers\"] = []\n\n return self.metadata.get(\"finalizers\")\n\n @property\n def config(self) -> aconfig.Config:\n \"\"\"The backend config for this reconciliation\"\"\"\n return self.__config\n\n @property\n def temporary_patches(self) -> List[aconfig.Config]:\n \"\"\"Ordered list of temporary patches that apply to the operand being\n reconciled\n \"\"\"\n return self.__temporary_patches\n\n @property\n def status(self) -> aconfig.Config:\n \"\"\"The operand status\"\"\"\n return self.__status\n\n @property\n def current_version(self) -> aconfig.Config:\n \"\"\"The most recently reconciled version of the operand\"\"\"\n return self.__current_version\n\n @property\n def deploy_manager(self) -> DeployManagerBase:\n \"\"\"Allow read access to the deploy manager\"\"\"\n return self.__deploy_manager\n\n @property\n def graph(self) -> str: # pylint: disable=invalid-name\n \"\"\"The component graph\"\"\"\n return self.__graph\n\n ## State Management ########################################################\n #\n # These functions are used by derived controllers in their setup_components\n # implementations\n ##\n\n @alog.logged_function(log.debug2)\n def add_component(self, component: COMPONENT_INSTANCE_TYPE):\n \"\"\"Add a component to this deploy associated with a specfic application\n\n Args:\n component: Component\n The component to add to this deploy\n disabled: bool\n Whether or not the component is disabled in this deploy\n \"\"\"\n self.graph.add_node(component)\n\n def add_component_dependency(\n self,\n component: Union[str, COMPONENT_INSTANCE_TYPE],\n upstream_component: Union[str, COMPONENT_INSTANCE_TYPE],\n verify_function: Optional[VERIFY_FUNCTION] = None,\n ):\n \"\"\"Add a dependency indicating that one component requires an upstream\n component to be deployed before it can be deployed.\n\n Args:\n component: str or Component\n The component or name of component in the deploy that must wait for the upstream\n upstream_component: str or Component\n The upstream component or name of upstream that must be deployed before component\n verify_function: callable\n A callable function of the form `def verify(session) -> bool:`\n to use to verify that the dependency has been satisified. This\n will be used to block deployment of the component beyond\n requiring that the upstream has been deployed successfully.\n \"\"\"\n # Get component obj if name was provided\n component_node = component\n if isinstance(component, str):\n component_node = self.get_component(component)\n\n upstream_component_node = upstream_component\n if isinstance(upstream_component, str):\n upstream_component_node = self.get_component(upstream_component)\n\n if not component_node or not upstream_component_node:\n raise ValueError(\n f\"Cannot add dependency [{component} -> {upstream_component}]\",\n \" for unknown component(s)\",\n )\n\n if component_node.disabled or upstream_component_node.disabled:\n raise ValueError(\n f\"Cannot add dependency [{component} -> {upstream_component}]\",\n \" for with disabled component(s)\",\n )\n\n # Add session parameter to verify function if one was provided\n if verify_function:\n verify_function = partial(verify_function, self)\n self.graph.add_node_dependency(\n component_node, upstream_component_node, verify_function\n )\n\n ## Utilities ###############################################################\n #\n # These utilities may be used anywhere in client code to perform common\n # operations based on the state of the session.\n ##\n def get_component(\n self, name: str, disabled: Optional[bool] = None\n ) -> Optional[COMPONENT_INSTANCE_TYPE]:\n \"\"\"Get an individual component by name\n\n Args:\n name: str\n Name of component to return\n disabled: Optional[bool]\n Option on wether to return disabled components. If this option is not supplied then\n the referenced component will be returned irregardless whether its disabled\n or enabled\n\n Returns:\n component: Optional[Component]\n The component with the given name or None if component does not exit or does\n not match disabled arg\n \"\"\"\n comp = self.graph.get_node(name)\n\n # Only filter disabled/enabled components if the option was passed in.\n if isinstance(disabled, bool):\n if disabled:\n return comp if comp.disabled else None\n return comp if not comp.disabled else None\n\n return comp\n\n def get_components(self, disabled: bool = False) -> List[COMPONENT_INSTANCE_TYPE]:\n \"\"\"Get all components associated with an application\n\n Args:\n disabled: bool\n Whether to return disabled or enabled components\n\n Returns:\n components: list(Component)\n The list of Component objects associated with the given\n application\n \"\"\"\n assert isinstance(\n disabled, bool\n ), \"Disabled flag must be a bool. You may be using the old function signature!\"\n\n # Get list of all components.\n comp_list = self.graph.get_all_nodes()\n\n # Filter out disabled/enabled components using get_component\n filtered_list = [\n comp for comp in comp_list if self.get_component(comp.get_name(), disabled)\n ]\n\n return filtered_list\n\n def get_component_dependencies(\n self,\n component: Union[str, COMPONENT_INSTANCE_TYPE],\n ) -> List[Tuple[COMPONENT_INSTANCE_TYPE, Optional[VERIFY_FUNCTION]]]:\n \"\"\"Get the list of (upstream_name, verify_function) tuples for a given\n component.\n\n NOTE: This is primarily for use inside of the RolloutManager. Do not use\n this method in user code unless you know what you're doing!\n\n Args:\n component_name: str\n The name of the component to lookup dependencies for\n\n Returns:\n upstreams: List[Tuple[str, Optional[VERIFY_FUNCTION]]]\n The list of upstream (name, verify_fn) pairs\n \"\"\"\n component_node = component\n if isinstance(component, str):\n component_node = self.get_component(component)\n\n return component_node.get_children()\n\n def get_scoped_name(self, name: str) -> str:\n \"\"\"Get a name that is scoped to the application instance\n\n Args:\n name: str\n The name of a resource that will be managed by this operator\n which should have instance name scoping applied\n\n Returns:\n scoped_name: str\n The scoped and truncated version of the input name\n \"\"\"\n scoped_name = self.get_truncated_name(f\"{self.name}-{name}\")\n log.debug3(\"Scoped name [%s] -> [%s]\", name, scoped_name)\n return scoped_name\n\n @staticmethod\n def get_truncated_name(name: str) -> str:\n \"\"\"Perform truncation on a cluster name to make it conform to kubernetes\n limits while remaining unique.\n\n Args:\n name: str\n The name of the resource that should be truncated and made\n unique\n\n Returns:\n truncated_name: str\n A version of name that has been truncated and made unique\n \"\"\"\n if len(name) > MAX_NAME_LEN:\n sha = hashlib.sha256()\n sha.update(name.encode(\"utf-8\"))\n trunc_name = name[: MAX_NAME_LEN - 4] + sha.hexdigest()[:4]\n log.debug2(\"Truncated name [%s] -> [%s]\", name, trunc_name)\n name = trunc_name\n return name\n\n def get_object_current_state(\n self,\n kind: str,\n name: str,\n api_version: Optional[str] = None,\n namespace: Optional[str] = _SESSION_NAMESPACE,\n ) -> Tuple[bool, Optional[dict]]:\n \"\"\"Get the current state of the given object in the namespace of this\n session\n\n Args:\n kind: str\n The kind of the object to fetch\n name: str\n The full name of the object to fetch\n api_version: str\n The api_version of the resource kind to fetch\n\n Returns:\n success: bool\n Whether or not the state fetch operation succeeded\n current_state: dict or None\n The dict representation of the current object's configuration,\n or None if not present\n \"\"\"\n namespace = namespace if namespace != _SESSION_NAMESPACE else self.namespace\n return self.deploy_manager.get_object_current_state(\n kind=kind,\n name=name,\n namespace=namespace,\n api_version=api_version,\n )\n\n def filter_objects_current_state( # pylint: disable=too-many-arguments\n self,\n kind: str,\n api_version: Optional[str] = None,\n label_selector: Optional[str] = None,\n field_selector: Optional[str] = None,\n namespace: Optional[str] = _SESSION_NAMESPACE,\n ) -> Tuple[bool, List[dict]]:\n \"\"\"Get the current state of the given object in the namespace of this\n session\n\n Args:\n kind: str\n The kind of the object to fetch\n label_selector: str\n The label selector to filter the results by\n field_selector: str\n The field selector to filter the results by\n api_version: str\n The api_version of the resource kind to fetch\n\n Returns:\n success: bool\n Whether or not the state fetch operation succeeded\n current_state: List[Dict]\n The list of resources in dict representation,\n or [] if none match\n \"\"\"\n namespace = namespace if namespace != _SESSION_NAMESPACE else self.namespace\n return self.deploy_manager.filter_objects_current_state(\n kind=kind,\n namespace=namespace,\n api_version=api_version,\n label_selector=label_selector,\n field_selector=field_selector,\n )\n\n @alog.logged_function(log.debug2)\n @alog.timed_function(log.debug2)\n def get_status(self) -> dict:\n \"\"\"Get the status of the resource being managed by this session or an\n empty dict if not available\n\n Returns:\n current_status: dict\n The dict representation of the status subresource for the CR\n being managed by this session\n \"\"\"\n\n # Pull the kind, name, and namespace\n kind = self.cr_manifest.get(\"kind\")\n name = self.name\n api_version = self.api_version\n log.debug3(\"Getting status for %s.%s/%s\", api_version, kind, name)\n\n # Fetch the current status\n success, content = self.get_object_current_state(\n kind=kind,\n name=name,\n api_version=api_version,\n )\n assert_cluster(\n success, f\"Failed to fetch status for [{api_version}/{kind}/{name}]\"\n )\n if content:\n return content.get(\"status\", {})\n return {}\n\n ## Implementation Details ##################################################\n\n @staticmethod\n def _validate_cr(cr_manifest: aconfig.Config):\n \"\"\"Ensure that all expected elements of the CR are present. Expected\n elements are those that are guaranteed to be present by the kube API.\n \"\"\"\n assert \"kind\" in cr_manifest, \"CR missing required section ['kind']\"\n assert \"apiVersion\" in cr_manifest, \"CR missing required section ['apiVersion']\"\n assert \"metadata\" in cr_manifest, \"CR missing required section ['metadata']\"\n assert (\n \"name\" in cr_manifest.metadata\n ), \"CR missing required section ['metadata.name']\"\n assert (\n \"namespace\" in cr_manifest.metadata\n ), \"CR missing required section ['metadata.namespace']\"" }, { "identifier": "make_application_status", "path": "oper8/status.py", "snippet": "def make_application_status( # pylint: disable=too-many-arguments,too-many-locals\n ready_reason: Optional[Union[ReadyReason, str]] = None,\n ready_message: str = \"\",\n updating_reason: Optional[Union[UpdatingReason, str]] = None,\n updating_message: str = \"\",\n component_state: Optional[CompletionState] = None,\n external_conditions: Optional[List[dict]] = None,\n external_status: Optional[dict] = None,\n version: Optional[str] = None,\n supported_versions: Optional[List[str]] = None,\n operator_version: Optional[str] = None,\n kind: Optional[str] = None,\n) -> dict:\n \"\"\"Create a full status object for an application\n\n Args:\n ready_reason: Optional[ReadyReason or str]\n The reason enum for the Ready condition\n ready_message: str\n Plain-text message explaining the Ready condition value\n updating_reason: Optional[UpdatingReason or str]\n The reason enum for the Updating condition\n updating_message: str\n Plain-text message explaining the Updating condition value\n component_state: Optional[CompletionState]\n The terminal state of components in the latest rollout\n external_conditions: Optional[List[dict]]\n Additional conditions to include in the update\n external_status: Optional[dict]\n Additional key/value status elements besides \"conditions\" that\n should be preserved through the update\n version: Optional[str]\n The verified version of the application\n supported_versions: Optional[List[str]]\n The list of supported versions for this application\n operator_version: Optional[str]\n The operator version for this application\n kind: Optional[str]\n The kind of reconciliing CR. If specified, this function adds\n service status field which is compliant with IBM Cloud Pak\n requirements.\n\n Returns:\n current_status: dict\n Dict representation of the status for the application\n \"\"\"\n now = datetime.now()\n conditions = []\n if ready_reason is not None:\n conditions.append(_make_ready_condition(ready_reason, ready_message, now))\n if updating_reason is not None:\n conditions.append(\n _make_updating_condition(updating_reason, updating_message, now)\n )\n conditions.extend(external_conditions or [])\n status = external_status or {}\n status[\"conditions\"] = conditions\n\n # If a component_state is given, create the top-level status elements to\n # track which components have deployed and verified\n if component_state is not None:\n log.debug2(\"Adding component state to status\")\n status[COMPONENT_STATUS] = _make_component_state(component_state)\n log.debug3(status[COMPONENT_STATUS])\n\n # Create the versions section\n if version is not None:\n nested_set(status, VERSIONS_FIELD_CURRENT_VERSION, version)\n if supported_versions is not None:\n nested_set(\n status,\n VERSIONS_FIELD_AVAILABLE_VERSIONS,\n [_make_available_version(version) for version in supported_versions],\n )\n if operator_version is not None:\n nested_set(status, OPERATOR_VERSION, operator_version)\n\n # Create service status section\n if kind:\n # make field name follow k8s naming convention\n service_status_field = kind[0].lower()\n if len(kind) > 1:\n service_status_field += kind[1:]\n service_status_field += \"Status\"\n\n # Only update service status if the current value is set by oper8. This\n # allows services to override the service status section\n current_service_status = status.get(service_status_field)\n managed_service_values = [status.value for status in ServiceStatus]\n if (\n not current_service_status\n or current_service_status in managed_service_values\n ):\n current_service_status = _make_service_status(\n ready_reason, updating_reason\n ).value\n\n status[service_status_field] = current_service_status\n\n return status" }, { "identifier": "DummyNodeComponent", "path": "oper8/test_helpers/helpers.py", "snippet": "class DummyNodeComponent(DummyComponentBase):\n \"\"\"\n Configurable dummy component which will create an abritrary set of\n resource node instances.\n \"\"\"\n\n def __init__(self, session, *args, **kwargs):\n \"\"\"Construct with the additional option to fail build_chart\"\"\"\n super().__init__(*args, session=session, **kwargs)\n self._add_resources(self, session)" }, { "identifier": "MockDeployManager", "path": "oper8/test_helpers/helpers.py", "snippet": "class MockDeployManager(DryRunDeployManager):\n \"\"\"The MockDeployManager wraps a standard DryRunDeployManager and adds\n configuration options to simulate failures in each of its operations.\n \"\"\"\n\n def __init__(\n self,\n deploy_fail=False,\n deploy_raise=False,\n disable_fail=False,\n disable_raise=False,\n get_state_fail=False,\n get_state_raise=False,\n set_status_fail=False,\n set_status_raise=False,\n auto_enable=True,\n resources=None,\n resource_dir=None,\n **kwargs,\n ):\n \"\"\"This DeployManager can be configured to have various failure cases\n and will mock the state of the cluster so that get_object_current_state\n will pull its information from the local dict.\n \"\"\"\n\n # Add apiVersion to resources that are missing it, then initialize the\n # dry run manager\n\n resources = resources or []\n # Parse pre-populated resources if needed\n resources = resources + (RunOperatorCmd._parse_resource_dir(resource_dir))\n\n for resource in resources:\n resource.setdefault(\"apiVersion\", \"v1\")\n super().__init__(resources, **kwargs)\n\n self.deploy_fail = \"assert\" if deploy_raise else deploy_fail\n self.disable_fail = \"assert\" if disable_raise else disable_fail\n self.get_state_fail = \"assert\" if get_state_raise else get_state_fail\n self.set_status_fail = \"assert\" if set_status_raise else set_status_fail\n\n # If auto-enabling, turn the mocks on now\n if auto_enable:\n self.enable_mocks()\n\n #######################\n ## Helpers for Tests ##\n #######################\n\n def enable_mocks(self):\n \"\"\"Turn the mocks on\"\"\"\n self.deploy = mock.Mock(\n side_effect=get_failable_method(\n self.deploy_fail, super().deploy, (False, False)\n )\n )\n self.disable = mock.Mock(\n side_effect=get_failable_method(\n self.disable_fail, super().disable, (False, False)\n )\n )\n self.get_object_current_state = mock.Mock(\n side_effect=get_failable_method(\n self.get_state_fail, super().get_object_current_state, (False, None)\n )\n )\n self.set_status = mock.Mock(\n side_effect=get_failable_method(\n self.set_status_fail, super().set_status, (False, False)\n )\n )\n\n def get_obj(self, kind, name, namespace=None, api_version=None):\n return self.get_object_current_state(kind, name, namespace, api_version)[1]\n\n def has_obj(self, *args, **kwargs):\n return self.get_obj(*args, **kwargs) is not None" }, { "identifier": "make_patch", "path": "oper8/test_helpers/helpers.py", "snippet": "def make_patch(\n patch_type,\n body,\n name=\"test\",\n target=None,\n namespace=TEST_NAMESPACE,\n api_version=\"org.oper8/v1\",\n kind=\"TemporaryPatch\",\n):\n \"\"\"Make a sample TemporaryPatch resource body\"\"\"\n target = target or {}\n patch_obj = {\n \"apiVersion\": api_version,\n \"kind\": kind,\n \"metadata\": {\"name\": name},\n \"spec\": {\n \"apiVersion\": target.get(\"apiVersion\", \"fake\"),\n \"kind\": target.get(\"kind\", \"fake\"),\n \"name\": target.get(\"metadata\", {}).get(\"name\", \"fake\"),\n \"patchType\": patch_type,\n \"patch\": body,\n },\n }\n if namespace is not None:\n patch_obj[\"metadata\"][\"namespace\"] = namespace\n return aconfig.Config(\n patch_obj,\n override_env_vars=False,\n )" }, { "identifier": "setup_cr", "path": "oper8/test_helpers/helpers.py", "snippet": "def setup_cr(\n kind=\"Widget\",\n api_version=\"foo.bar.com/v123\",\n deploy_config=None,\n version=\"1.2.3\",\n name=TEST_INSTANCE_NAME,\n namespace=TEST_NAMESPACE,\n **kwargs,\n):\n deploy_config = deploy_config or {}\n cr_dict = kwargs or {}\n cr_dict.setdefault(\"kind\", kind)\n cr_dict.setdefault(\"apiVersion\", api_version)\n cr_dict.setdefault(\"metadata\", {}).setdefault(\"name\", name)\n cr_dict.setdefault(\"metadata\", {}).setdefault(\"namespace\", namespace)\n cr_dict.setdefault(\"metadata\", {}).setdefault(\"uid\", TEST_INSTANCE_UID)\n cr_dict.setdefault(\"spec\", {}).update(copy.deepcopy(deploy_config))\n cr_dict[\"spec\"].setdefault(\"version\", version)\n return aconfig.Config(cr_dict)" } ]
import pytest import aconfig from oper8.patch import STRATEGIC_MERGE_PATCH from oper8.session import MAX_NAME_LEN, Session from oper8.status import make_application_status from oper8.test_helpers.helpers import ( DummyNodeComponent, MockDeployManager, make_patch, setup_cr, )
6,983
api_version="foo.bar/v1", name="foo", namespace="testit", ): return aconfig.Config( { "kind": kind, "apiVersion": api_version, "metadata": { "name": name, "namespace": namespace, "labels": {"app": "test", "run": name}, }, }, override_env_vars=False, ) ## Tests ####################################################################### ############### ## Prperties ## ############### def test_constructed_properties(): """Make sure all properties derived from the constructor args are populated correctly """ rec_id = "1ab" cr = setup_cr() cfg = aconfig.Config({"foo": "bar"}, override_env_vars=False) dm = MockDeployManager() patches = [make_patch(STRATEGIC_MERGE_PATCH, {})] session = Session(rec_id, cr, cfg, dm, patches) assert session.id == rec_id assert session.cr_manifest == cr assert session.config == cfg assert session.deploy_manager == dm assert session.temporary_patches == patches def test_cr_properties(): """Make sure all properties derived from the CR manifest are populated correctly """ version = "develop.1.2.3" namespace = "wingbat" name = "wombat" api_version = "critters.bats/v23" kind = "Critter" spec = {"key": "value"} cr = setup_cr( api_version=api_version, kind=kind, namespace=namespace, name=name, version=version, spec=spec, ) session = Session("1ab", cr, {}, MockDeployManager()) assert session.version == version assert session.namespace == namespace assert session.name == name assert session.kind == kind assert session.api_version == api_version assert session.spec == cr.spec assert session.metadata == cr.metadata @pytest.mark.parametrize( "field", ["kind", "apiVersion", "metadata", "metadata.name", "metadata.namespace"], ) def test_missing_cr_required_fields(field): """Make sure that required fields missing from the CR correctly raise validation errors """ cr = setup_cr() field_parts = field.split(".") dct = cr while len(field_parts) > 1: dct = dct[field_parts[0]] field_parts = field_parts[1:] del dct[field_parts[0]] with pytest.raises(AssertionError): session = Session("1ab", cr, {}, MockDeployManager()) @pytest.mark.parametrize( "field,expected", [("spec", aconfig.Config({})), ("spec.version", None)], ) def test_missing_cr_optional_fields(field, expected): """Make sure that optional fields in the CR that are accessed via properties do not raise errors """ cr = setup_cr() field_parts = field.split(".") dct = cr while len(field_parts) > 1: dct = dct[field_parts[0]] field_parts = field_parts[1:] del dct[field_parts[0]] session = Session("1ab", cr, {}, MockDeployManager()) assert getattr(session, field.split(".")[-1]) == expected def test_current_version(): """Make sure that retrieving the current_version works when it's present in the deploy manager """ # Make sure that current_version is not set when it hasn't been deployed cr = setup_cr() dm = MockDeployManager() session = Session("1ab", cr, {}, dm) assert session.current_version is None # Make sure that current_version is set when it's been deployed before current_version = "some-version"
""" Tests for all functionality of the Session object """ # Third Party # First Party # Local ## Helpers ##################################################################### def make_component_class(comp_name): class DerivedComponent(DummyNodeComponent): name = comp_name return DerivedComponent def make_api_obj( kind="Foo", api_version="foo.bar/v1", name="foo", namespace="testit", ): return aconfig.Config( { "kind": kind, "apiVersion": api_version, "metadata": { "name": name, "namespace": namespace, "labels": {"app": "test", "run": name}, }, }, override_env_vars=False, ) ## Tests ####################################################################### ############### ## Prperties ## ############### def test_constructed_properties(): """Make sure all properties derived from the constructor args are populated correctly """ rec_id = "1ab" cr = setup_cr() cfg = aconfig.Config({"foo": "bar"}, override_env_vars=False) dm = MockDeployManager() patches = [make_patch(STRATEGIC_MERGE_PATCH, {})] session = Session(rec_id, cr, cfg, dm, patches) assert session.id == rec_id assert session.cr_manifest == cr assert session.config == cfg assert session.deploy_manager == dm assert session.temporary_patches == patches def test_cr_properties(): """Make sure all properties derived from the CR manifest are populated correctly """ version = "develop.1.2.3" namespace = "wingbat" name = "wombat" api_version = "critters.bats/v23" kind = "Critter" spec = {"key": "value"} cr = setup_cr( api_version=api_version, kind=kind, namespace=namespace, name=name, version=version, spec=spec, ) session = Session("1ab", cr, {}, MockDeployManager()) assert session.version == version assert session.namespace == namespace assert session.name == name assert session.kind == kind assert session.api_version == api_version assert session.spec == cr.spec assert session.metadata == cr.metadata @pytest.mark.parametrize( "field", ["kind", "apiVersion", "metadata", "metadata.name", "metadata.namespace"], ) def test_missing_cr_required_fields(field): """Make sure that required fields missing from the CR correctly raise validation errors """ cr = setup_cr() field_parts = field.split(".") dct = cr while len(field_parts) > 1: dct = dct[field_parts[0]] field_parts = field_parts[1:] del dct[field_parts[0]] with pytest.raises(AssertionError): session = Session("1ab", cr, {}, MockDeployManager()) @pytest.mark.parametrize( "field,expected", [("spec", aconfig.Config({})), ("spec.version", None)], ) def test_missing_cr_optional_fields(field, expected): """Make sure that optional fields in the CR that are accessed via properties do not raise errors """ cr = setup_cr() field_parts = field.split(".") dct = cr while len(field_parts) > 1: dct = dct[field_parts[0]] field_parts = field_parts[1:] del dct[field_parts[0]] session = Session("1ab", cr, {}, MockDeployManager()) assert getattr(session, field.split(".")[-1]) == expected def test_current_version(): """Make sure that retrieving the current_version works when it's present in the deploy manager """ # Make sure that current_version is not set when it hasn't been deployed cr = setup_cr() dm = MockDeployManager() session = Session("1ab", cr, {}, dm) assert session.current_version is None # Make sure that current_version is set when it's been deployed before current_version = "some-version"
cr.status = make_application_status(version=current_version)
3
2023-11-15 16:43:29+00:00
8k
smrfeld/tsmixer-pytorch
main.py
[ { "identifier": "plot_preds", "path": "utils/plotting.py", "snippet": "def plot_preds(preds: List[List[float]], preds_gt: List[List[float]], no_feats_plot: int, fname_save: Optional[str] = None, inputs: Optional[List[List[float]]] = None, show: bool = True):\n \"\"\"Plot predictions\n\n Args:\n preds (List[List[float]]): Predictions of shape (no_samples, no_feats)\n preds_gt (List[List[float]]): Predictions of shape (no_samples, no_feats)\n no_feats_plot (int): Number of features to plot\n fname_save (Optional[str], optional): File name to save the plot. Defaults to None.\n inputs (Optional[List[List[float]]], optional): Input of shape (no_samples, no_feats)\n show (bool): Show the plot\n \"\"\" \n import plotly.graph_objects as go\n from plotly.subplots import make_subplots\n\n no_feats = len(preds[0])\n if no_feats_plot > no_feats:\n logger.warning(f\"no_feats_plot ({no_feats_plot}) is larger than no_feats ({no_feats}). Setting no_feats_plot to no_feats\")\n no_feats_plot = no_feats\n\n no_cols = 3\n no_rows = int(no_feats_plot / no_cols)\n if no_feats_plot % no_cols != 0:\n no_rows += 1\n\n fig = make_subplots(rows=no_rows, cols=no_cols, subplot_titles=[f\"Feature {ifeat}\" for ifeat in range(no_feats_plot)])\n\n no_inputs = len(inputs) if inputs is not None else 0\n x_preds = list(range(no_inputs, no_inputs + len(preds)))\n for ifeat in range(no_feats_plot):\n row = int(ifeat / no_cols) + 1\n col = (ifeat % no_cols) + 1\n\n if inputs is not None:\n x_inputs = list(range(len(inputs)))\n fig.add_trace(go.Scatter(x=x_inputs, y=[in_y[ifeat] for in_y in inputs], mode=\"lines\", name=f\"Inputs\", line=dict(color=\"black\"), showlegend=ifeat==0), row=row, col=col)\n\n fig.add_trace(go.Scatter(x=x_preds, y=[pred[ifeat] for pred in preds_gt], mode=\"lines\", name=f\"Ground truth\", line=dict(color=\"red\"), showlegend=ifeat==0), row=row, col=col)\n fig.add_trace(go.Scatter(x=x_preds, y=[pred[ifeat] for pred in preds], mode=\"lines\", name=f\"Model\", line=dict(color=\"blue\"), showlegend=ifeat==0), row=row, col=col)\n\n fig.update_layout(\n height=300*no_rows, \n width=400*no_cols, \n title_text=\"Predictions\",\n font=dict(size=18),\n xaxis_title_text=\"Time\",\n yaxis_title_text=\"Signal\",\n )\n\n if fname_save is not None:\n fig.write_image(fname_save)\n logger.info(f\"Saved plot to {fname_save}\")\n\n if show:\n fig.show()\n\n return fig" }, { "identifier": "plot_loss", "path": "utils/plotting.py", "snippet": "def plot_loss(train_data: TrainingMetadata, fname_save: Optional[str] = None, show: bool = True):\n \"\"\"Plot loss\n\n Args:\n train_data (TSMixer.TrainingMetadata): Training metadata\n fname_save (Optional[str], optional): File name to save the plot. Defaults to None.\n show (bool): Show the plot\n \"\"\" \n import plotly.graph_objects as go\n\n fig = go.Figure()\n x = [ epoch for epoch in train_data.epoch_to_data.keys() ]\n y = [ data.val_loss for data in train_data.epoch_to_data.values() ]\n fig.add_trace(go.Scatter(x=x, y=y, mode=\"lines\", name=\"Val. loss\"))\n y = [ data.train_loss for data in train_data.epoch_to_data.values() ]\n fig.add_trace(go.Scatter(x=x, y=y, mode=\"lines\", name=\"Train loss\"))\n\n fig.update_layout(\n height=500, \n width=700, \n title_text=\"Loss during training\",\n xaxis_title_text=\"Epoch\",\n yaxis_title_text=\"Loss\",\n font=dict(size=18),\n )\n\n if fname_save is not None:\n fig.write_image(fname_save)\n logger.info(f\"Saved plot to {fname_save}\")\n\n if show:\n fig.show()\n\n return fig" }, { "identifier": "TSMixerConf", "path": "utils/tsmixer_conf.py", "snippet": "class TSMixerConf(DataClassDictMixin):\n\n class Initialize(Enum):\n FROM_LATEST_CHECKPOINT = \"from-latest-checkpoint\"\n \"Load the model from the latest checkpoint\"\n\n FROM_BEST_CHECKPOINT = \"from-best-checkpoint\"\n \"Load the model from the best checkpoint\"\n\n FROM_SCRATCH = \"from-scratch\"\n \"Initialize the model from scratch\"\n\n class DataSrc(Enum):\n\n CSV_FILE = \"csv-file\"\n \"Load the dataset from a CSV file\"\n\n class ValidationSplit(Enum):\n \n TEMPORAL_HOLDOUT = \"temporal-holdout\"\n \"Reserve the last portion (e.g., 10-20%) of your time-ordered data for validation, and use the remaining data for training. This is a simple and widely used approach.\"\n\n output_dir: str\n \"Directory where to save checkpoints and generated images\"\n\n input_length: int\n \"Number of time steps to use as input\"\n\n no_features: int\n \"Number of features in the dataset\"\n\n no_mixer_layers: int\n \"Number of mixer layers\"\n\n prediction_length: int\n \"Number of time steps to predict\"\n\n data_src: DataSrc\n \"Where to load the dataset from\"\n\n device: str = \"mps\"\n \"Device to use for training\"\n\n data_src_csv: Optional[str] = None\n \"Path to the CSV file to load the dataset from. Only used if data_src is CSV_FILE\"\n\n batch_size: int = 64\n \"Batch size\"\n\n shuffle: bool = True\n \"Shuffle the data\"\n\n num_epochs: int = 10\n \"Number of epochs to train for\"\n\n learning_rate: float = 0.001\n \"Learning rate\"\n\n optimizer: str = \"Adam\"\n \"Optimizer to use\"\n\n random_seed: int = 42\n \"Random seed for reproducibility\"\n\n validation_split: ValidationSplit = ValidationSplit.TEMPORAL_HOLDOUT\n \"How to split the data into training and validation\"\n\n validation_split_holdout: float = 0.2\n \"Use the last X% of the data for validation. Only used for TEMPORAL_HOLDOUT\"\n\n initialize: Initialize = Initialize.FROM_SCRATCH\n \"How to initialize the model\"\n\n dropout: float = 0.5\n \"Dropout\"\n\n feat_mixing_hidden_channels: Optional[int] = None\n \"Number of hidden channels in the feature mixing MLP. If None, uses same as input features.\"\n\n early_stopping_patience: Optional[int] = 5\n \"Early stopping patience. If the validation loss does not improve over this many epochs, stop early. If None, no early stopping is used.\"\n\n @property\n def image_dir(self):\n makedirs(self.output_dir)\n makedirs(os.path.join(self.output_dir, \"images\"))\n return os.path.join(self.output_dir, \"images\")\n\n @property\n def checkpoint_init(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"init.pth\")\n\n @property\n def checkpoint_best(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"best.pth\")\n\n @property\n def checkpoint_latest(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"latest.pth\")\n\n @property\n def train_progress_json(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"loss.json\")\n\n @property\n def pred_val_dataset_json(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"pred_val_dataset.json\")\n\n @property\n def data_norm_json(self):\n makedirs(self.output_dir)\n return os.path.join(self.output_dir, \"data_norm.json\")\n\n def check_valid(self):\n assert 0 <= self.validation_split_holdout <= 1, \"validation_split_holdout must be between 0 and 1\"\n\n # Check device exists\n import torch\n assert self.device in [\"cpu\", \"cuda\", \"cuda:0\", \"cuda:1\", \"cuda:2\", \"cuda:3\", \"mps\"], f\"Device {self.device} not supported\"\n if self.device == \"cuda\":\n assert torch.cuda.is_available(), \"CUDA is not available\"\n assert torch.cuda.device_count() > 1, \"Must have more than one CUDA device to use MPS\"\n elif self.device == \"mps\":\n assert torch.backends.mps.is_available(), \"MPS is not available\"\n \n\n def load_training_metadata_or_new(self, epoch_start: Optional[int] = None) -> \"TrainingMetadata\":\n \"\"\"Load the training progress from a JSON file, or create a new one\n\n Args:\n epoch_start (Optional[int], optional): Starting epoch - earlier epochs will be removed if not None. Defaults to None.\n\n Returns:\n TrainProgress: Training metadata\n \"\"\" \n if os.path.exists(self.train_progress_json):\n with open(self.train_progress_json, \"r\") as f:\n tp = TrainingMetadata.from_dict(json.load(f))\n\n # Remove epochs after epoch_start\n if epoch_start is not None:\n tp.epoch_to_data = { epoch: tp.epoch_to_data[epoch] for epoch in tp.epoch_to_data if epoch < epoch_start }\n \n return tp\n else:\n return TrainingMetadata(epoch_to_data={})\n\n\n def write_data_norm(self, data_norm: DataNormalization):\n \"\"\"Write the data normalization to a JSON file\n\n Args:\n data_norm (DataNormalization): Data normalization\n \"\"\" \n with open(self.data_norm_json, \"w\") as f:\n json.dump(data_norm.to_dict(), f, indent=3)\n logger.debug(f\"Saved data normalization to {f.name}\")\n\n\n def write_training_metadata(self, train_data: \"TrainingMetadata\"):\n \"\"\"Write the training progress to a JSON file\n\n Args:\n train_data (TrainingMetadata): _description_\n \"\"\" \n if os.path.dirname(self.train_progress_json) != \"\":\n makedirs(os.path.dirname(self.train_progress_json))\n with open(self.train_progress_json, \"w\") as f:\n json.dump(train_data.to_dict(), f, indent=3)\n\n\n def create_data_loaders_train_val(self, data_norm: Optional[DataNormalization] = None) -> Tuple[DataLoader, DataLoader, DataNormalization]:\n \"\"\"Create the training and validation data loaders\n\n Args:\n data_norm (Optional[DataNormalization], optional): Data normalization to use, otherwise will be calculated. Defaults to None.\n\n Returns:\n Tuple[DataLoader, DataLoader, DataNormalization]: Training and validation data loaders\n \"\"\" \n\n if self.data_src == self.DataSrc.CSV_FILE:\n assert self.data_src_csv is not None, \"data_src_csv must be set if data_src is CSV_FILE\"\n\n from .load_csv import load_csv_dataset, ValidationSplit\n return load_csv_dataset(\n csv_file=self.data_src_csv,\n batch_size=self.batch_size,\n input_length=self.input_length,\n prediction_length=self.prediction_length,\n val_split=ValidationSplit(self.validation_split.value),\n val_split_holdout=self.validation_split_holdout,\n shuffle=self.shuffle,\n data_norm_exist=data_norm\n )\n else:\n raise NotImplementedError(f\"data_src {self.data_src} not implemented\")" }, { "identifier": "TSMixerGridSearch", "path": "utils/tsmixer_grid_search_conf.py", "snippet": "class TSMixerGridSearch(DataClassDictMixin):\n \"\"\"Configuration for grid search\n \"\"\" \n\n @dataclass\n class ParamRange(DataClassDictMixin):\n \n learning_rates: List[float]\n \"Learning rates\"\n\n no_mixer_layers: List[int]\n \"Number of mixer layers\"\n\n dropouts: List[float]\n \"Dropout\"\n\n input_lengths: List[int]\n \"Number of time steps to use as input\"\n\n prediction_lengths: List[int]\n \"Number of time steps to predict\"\n\n feat_mixing_hidden_channels: List[Optional[int]] = field(default_factory=lambda: [None])\n \"Number of hidden channels in the feature mixing MLP. If None, uses same as input features.\"\n\n batch_sizes: List[int] = field(default_factory=lambda: [64])\n \"Batch size\"\n\n num_epochs: List[int] = field(default_factory=lambda: [100])\n \"Number of epochs to train for\"\n\n optimizers: List[str] = field(default_factory=lambda: [\"Adam\"])\n \"Optimizer to use\"\n\n @property\n def options_str(self) -> str:\n s = []\n s.append((\"lr\",str(self.learning_rates)))\n s.append((\"nmix\",str(self.no_mixer_layers)))\n s.append((\"drop\",str(self.dropouts)))\n s.append((\"in\",str(self.input_lengths)))\n s.append((\"pred\",str(self.prediction_lengths)))\n s.append((\"hidden\",str(self.feat_mixing_hidden_channels)))\n s.append((\"batch\",str(self.batch_sizes)))\n s.append((\"epochs\",str(self.num_epochs)))\n s.append((\"opt\",str(self.optimizers)))\n\n # Sort by key\n s = sorted(s, key=lambda x: x[0])\n\n return \"_\".join([f\"{k}{v}\" for k,v in s])\n\n param_ranges: List[ParamRange]\n \"Any number of parameter ranges to try\"\n\n output_dir: str\n \"Output directory\"\n\n no_features: int\n \"Number of features in the dataset\"\n\n data_src: TSMixerConf.DataSrc = TSMixerConf.DataSrc.CSV_FILE\n \"Where to load the dataset from\"\n\n data_src_csv: Optional[str] = None\n \"Path to the CSV file to load the dataset from. Only used if data_src is CSV_FILE\"\n\n def iterate(self) -> Iterator[TSMixerConf]:\n \"\"\"Iterate over all configurations\n\n Yields:\n Iterator[TSMixerConf]: Configuration for a single run\n \"\"\" \n for idx,param_range in enumerate(self.param_ranges):\n logger.info(\"===========================================\")\n logger.info(f\"Grid search iteration {idx+1}/{len(self.param_ranges)}\")\n logger.info(\"===========================================\")\n\n for learning_rate in param_range.learning_rates:\n for no_mixer_layers in param_range.no_mixer_layers:\n for dropout in param_range.dropouts:\n for feat_mixing_hidden_channels in param_range.feat_mixing_hidden_channels:\n for input_length in param_range.input_lengths:\n for prediction_length in param_range.prediction_lengths:\n for batch_size in param_range.batch_sizes:\n for num_epochs in param_range.num_epochs:\n for optimizer in param_range.optimizers:\n # Output subdir\n output_dir = os.path.join(self.output_dir, param_range.options_str)\n conf = TSMixerConf(\n input_length=input_length,\n prediction_length=prediction_length,\n no_features=self.no_features,\n no_mixer_layers=no_mixer_layers,\n output_dir=output_dir,\n data_src=self.data_src,\n data_src_csv=self.data_src_csv,\n batch_size=batch_size,\n num_epochs=num_epochs,\n learning_rate=learning_rate,\n optimizer=optimizer,\n dropout=dropout,\n feat_mixing_hidden_channels=feat_mixing_hidden_channels\n )\n logger.info(f\"TSMixer config: {conf}\")\n logger.info(f\"Output sub-dir: {output_dir}\")\n yield conf" }, { "identifier": "TSMixer", "path": "utils/tsmixer.py", "snippet": "class TSMixer:\n \"\"\"TSMixer including training and prediction methods\n \"\"\" \n\n\n def __init__(self, conf: TSMixerConf):\n \"\"\"Constructor for TSMixer class\n\n Args:\n conf (TSMixerConf): Configuration\n \"\"\" \n conf.check_valid()\n self.conf = conf\n\n # Create the model\n self.model = TSMixerModel(\n input_length=self.conf.input_length,\n forecast_length=self.conf.prediction_length,\n no_feats=self.conf.no_features,\n feat_mixing_hidden_channels=self.conf.feat_mixing_hidden_channels or self.conf.no_features,\n no_mixer_layers=self.conf.no_mixer_layers,\n dropout=self.conf.dropout\n )\n\n # Move to device\n self.model.to(self.conf.device)\n\n # Load the model\n if self.conf.initialize == self.conf.Initialize.FROM_LATEST_CHECKPOINT:\n self.load_checkpoint(fname=self.conf.checkpoint_latest)\n elif self.conf.initialize == self.conf.Initialize.FROM_BEST_CHECKPOINT:\n self.load_checkpoint(fname=self.conf.checkpoint_best)\n elif self.conf.initialize == self.conf.Initialize.FROM_SCRATCH:\n pass\n else:\n raise NotImplementedError(f\"Initialize {self.conf.initialize} not implemented\")\n\n\n def load_checkpoint(self, fname: str, optimizer: Optional[torch.optim.Optimizer] = None) -> Tuple[int,float]:\n \"\"\"Load a checkpoint, optionally including the optimizer state\n\n Args:\n fname (str): File name\n optimizer (Optional[torch.optim.Optimizer], optional): Optimizer to update from checkpoint. Defaults to None.\n\n Returns:\n Tuple[int,float]: Epoch and loss\n \"\"\" \n logger.debug(f\"Loading model weights from {fname}\")\n checkpoint = torch.load(fname)\n self.model.load_state_dict(checkpoint['model_state_dict'])\n\n if optimizer is not None:\n logger.debug(f\"Loading optimizer state from {fname}\")\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epoch = checkpoint['epoch']\n loss = checkpoint['loss']\n logger.info(f\"Loaded optimizer state from epoch {epoch} with loss {loss}\")\n return epoch, loss\n\n\n def predict(self, batch_input: torch.Tensor) -> torch.Tensor:\n \"\"\"Predict the output for a batch of input data\n\n Args:\n batch_input (torch.Tensor): Input data of shape (batch_size, input_length (time), no_features)\n\n Returns:\n torch.Tensor: Predicted output of shape (batch_size, prediction_length (time), no_features)\n \"\"\" \n self.model.eval()\n\n # Check size\n assert batch_input.shape[1] == self.conf.input_length, f\"Input length {batch_input.shape[1]} does not match configuration {self.conf.input_length}\"\n assert batch_input.shape[2] == self.conf.no_features, f\"Number of features {batch_input.shape[2]} does not match configuration {self.conf.no_features}\"\n\n # Predict\n batch_input = batch_input.to(self.conf.device)\n with torch.no_grad():\n batch_pred_hat = self.model(batch_input)\n return batch_pred_hat\n\n\n def load_data_norm(self) -> Optional[DataNormalization]:\n \"\"\"Load the data normalization from a JSON file\n\n Returns:\n Optional[DataNormalization]: Data normalization, or None if the file does not exist\n \"\"\" \n\n if os.path.exists(self.conf.data_norm_json):\n logger.debug(f\"Loading data normalization from {self.conf.data_norm_json}\")\n with open(self.conf.data_norm_json, \"r\") as f:\n return DataNormalization.from_dict(json.load(f))\n else:\n return None\n\n\n @dataclass\n class PredData(DataClassDictMixin):\n \"\"\"Prediction data\n \"\"\" \n\n pred_gt: List[List[float]]\n \"Ground truth prediction\"\n\n pred: List[List[float]]\n \"Model prediction\"\n\n inputs: Optional[List[List[float]]] = None\n \"Inputs\"\n\n\n def predict_val_dataset(self, max_samples: Optional[int] = None, save_inputs: bool = False) -> List[PredData]:\n \"\"\"Predict on the validation dataset\n\n Args:\n max_samples (Optional[int], optional): Maximum number of samples to predict from the validation dataset. Defaults to None.\n save_inputs (bool, optional): Save the inputs as well as the predictions. Defaults to False.\n\n Returns:\n List[PredData]: List of predictions\n \"\"\" \n\n # Change batch size to 1 and not shuffle data for consistency\n batch_size_save = self.conf.batch_size\n shuffle_save = self.conf.shuffle\n self.conf.batch_size = 1\n self.conf.shuffle = False\n\n # Load the data normalization if it exists and use it\n data_norm = self.load_data_norm()\n\n # Create the loaders\n _, loader_val, _ = self.conf.create_data_loaders_train_val(data_norm)\n \n # Predict\n data_list: List[TSMixer.PredData] = []\n for _ in tqdm(range(max_samples or len(loader_val)), desc=\"Predicting\"):\n batch_input, batch_pred = next(iter(loader_val))\n batch_pred_hat = self.predict(batch_input)\n data = TSMixer.PredData(\n pred_gt=batch_pred.tolist()[0],\n pred=batch_pred_hat.tolist()[0],\n inputs=batch_input.tolist()[0] if save_inputs else None\n )\n data_list.append(data) \n\n # Save data to json\n with open(self.conf.pred_val_dataset_json, \"w\") as f:\n json.dump([ d.to_dict() for d in data_list ], f)\n logger.info(f\"Saved data to {f.name}\")\n\n # Reset options\n self.conf.batch_size = batch_size_save\n self.conf.shuffle = shuffle_save\n\n return data_list\n\n\n def train(self):\n \"\"\"Train the model\n \"\"\" \n\n # Create the optimizer\n optimizer_cls = getattr(torch.optim, self.conf.optimizer)\n optimizer = optimizer_cls(self.model.parameters(), lr=self.conf.learning_rate)\n\n # Load if needed\n if self.conf.initialize == self.conf.Initialize.FROM_LATEST_CHECKPOINT:\n epoch_start, val_loss_best = self.load_checkpoint(fname=self.conf.checkpoint_latest, optimizer=optimizer)\n data_norm = self.load_data_norm()\n elif self.conf.initialize == self.conf.Initialize.FROM_BEST_CHECKPOINT:\n epoch_start, val_loss_best = self.load_checkpoint(fname=self.conf.checkpoint_best, optimizer=optimizer)\n data_norm = self.load_data_norm()\n elif self.conf.initialize == self.conf.Initialize.FROM_SCRATCH:\n epoch_start, val_loss_best = 0, float(\"inf\")\n\n # Clear the output directory\n if os.path.exists(self.conf.output_dir):\n logger.warning(f\"Output directory {self.conf.output_dir} already exists. Deleting it to start over. You have 8 seconds.\")\n for _ in range(8):\n print(\".\", end=\"\", flush=True)\n time.sleep(1)\n print(\"\")\n shutil.rmtree(self.conf.output_dir)\n makedirs(self.conf.output_dir)\n\n # Save initial weights\n self._save_checkpoint(epoch=epoch_start, optimizer=optimizer, loss=val_loss_best, fname=self.conf.checkpoint_init)\n data_norm = None\n\n # Copy the config to the output directory for reference\n fname_conf = os.path.join(self.conf.output_dir, \"conf.yml\")\n makedirs(self.conf.output_dir)\n with open(fname_conf, \"w\") as f:\n yaml.dump(self.conf.to_dict(), f, indent=3)\n logger.info(f\"Saved configuration to {f.name}\")\n \n else:\n raise NotImplementedError(f\"Initialize {self.conf.initialize} not implemented\")\n train_data = self.conf.load_training_metadata_or_new(epoch_start)\n\n # Create the loaders\n loader_train, loader_val, data_norm = self.conf.create_data_loaders_train_val(data_norm)\n\n # Write data normalization\n self.conf.write_data_norm(data_norm)\n\n # Train\n epoch_last_improvement = None\n for epoch in range(epoch_start, self.conf.num_epochs):\n logger.info(f\"Epoch {epoch+1}/{self.conf.num_epochs}\")\n t0 = time.time()\n\n # Training\n train_loss = 0\n for batch_input, batch_pred in tqdm(loader_train, desc=\"Training batches\"):\n batch_input, batch_pred = batch_input.to(self.conf.device), batch_pred.to(self.conf.device)\n train_loss += self._train_step(batch_input, batch_pred, optimizer)\n\n # Validation loss\n self.model.eval()\n with torch.no_grad():\n val_loss = 0\n for batch_input, batch_pred in tqdm(loader_val, desc=\"Validation batches\"):\n batch_input, batch_pred = batch_input.to(self.conf.device), batch_pred.to(self.conf.device)\n val_loss += self._compute_loss(batch_input, batch_pred).item()\n\n # Log\n train_loss /= len(loader_train)\n val_loss /= len(loader_val)\n dur = time.time() - t0\n logger.info(f\"Training loss: {train_loss:.5f} val: {val_loss:.5f} duration: {dur:.2f}s\")\n\n # Store metadata about training\n train_data.epoch_to_data[epoch] = TrainingMetadata.EpochData(epoch=epoch, train_loss=train_loss, val_loss=val_loss, duration_seconds=dur)\n\n # Save checkpoint\n if val_loss < val_loss_best:\n logger.info(f\"New best validation loss: {val_loss:.5f}\")\n self._save_checkpoint(epoch=epoch, optimizer=optimizer, loss=val_loss, fname=self.conf.checkpoint_best)\n val_loss_best = val_loss\n epoch_last_improvement = epoch\n self._save_checkpoint(epoch=epoch, optimizer=optimizer, loss=val_loss, fname=self.conf.checkpoint_latest)\n self.conf.write_training_metadata(train_data)\n\n # Early stopping\n if epoch_last_improvement is not None and self.conf.early_stopping_patience is not None and epoch - epoch_last_improvement >= self.conf.early_stopping_patience:\n logger.info(f\"Stopping early after {epoch - epoch_last_improvement} epochs without improvement in validation loss.\")\n break\n\n\n def _save_checkpoint(self, epoch: int, optimizer: torch.optim.Optimizer, loss: float, fname: str):\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': self.model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': loss,\n }, fname)\n\n\n def _compute_loss(self, batch_input: torch.Tensor, batch_pred: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute the loss\n\n Args:\n batch_input (torch.Tensor): Batch input of shape (batch_size, input_length (time), no_features)\n batch_pred (torch.Tensor): Batch prediction of shape (batch_size, prediction_length (time), no_features)\n\n Returns:\n torch.Tensor: Loss (MSE)\n \"\"\" \n\n # Forward pass\n batch_pred_hat = self.model(batch_input)\n\n # Compute MSE loss\n loss = torch.nn.functional.mse_loss(batch_pred_hat, batch_pred)\n\n # Normalize the loss by the batch size\n # batch_size = batch_input.size(0)\n # loss /= batch_size\n\n return loss\n\n\n def _train_step(self, batch_input: torch.Tensor, batch_pred: torch.Tensor, optimizer: torch.optim.Optimizer) -> float:\n \"\"\"Training step\n\n Args:\n batch_input (torch.Tensor): Input data of shape (batch_size, input_length (time), no_features)\n batch_pred (torch.Tensor): Prediction data of shape (batch_size, prediction_length (time), no_features)\n optimizer (torch.optim.Optimizer): Optimizer\n\n Returns:\n float: Loss (MSE)\n \"\"\" \n optimizer.zero_grad()\n\n # Train mode\n self.model.train()\n\n # Loss\n loss = self._compute_loss(batch_input, batch_pred)\n\n # Backward pass\n loss.backward()\n\n # Update parameters\n optimizer.step()\n\n return loss.item()" } ]
from utils import TSMixer, plot_preds, plot_loss, TSMixerConf, TSMixerGridSearch import argparse import yaml import os
6,955
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run") parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file") parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot") parser.add_argument("--show", action="store_true", required=False, help="Show plots") args = parser.parse_args() if args.command == "train": # Load configuration assert args.conf is not None, "Must provide a configuration file" with open(args.conf, "r") as f:
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--command", type=str, required=True, choices=["train", "predict", "loss", "grid-search"], help="Command to run") parser.add_argument("--conf", type=str, required=False, help="Path to the configuration file") parser.add_argument("--no-feats-plot", type=int, required=False, default=6, help="Number of features to plot") parser.add_argument("--show", action="store_true", required=False, help="Show plots") args = parser.parse_args() if args.command == "train": # Load configuration assert args.conf is not None, "Must provide a configuration file" with open(args.conf, "r") as f:
conf = TSMixerConf.from_dict(yaml.safe_load(f))
2
2023-11-18 19:56:18+00:00
8k
Jisencc/yolov5_dual_weighting
models/yolo.py
[ { "identifier": "check_anchor_order", "path": "utils/autoanchor.py", "snippet": "def check_anchor_order(m):\n # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary\n a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer\n da = a[-1] - a[0] # delta a\n ds = m.stride[-1] - m.stride[0] # delta s\n if da and (da.sign() != ds.sign()): # same order\n LOGGER.info(f'{PREFIX}Reversing anchor order')\n m.anchors[:] = m.anchors.flip(0)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_version", "path": "utils/general.py", "snippet": "def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\n # Check version vs. required version\n current, minimum = (pkg.parse_version(x) for x in (current, minimum))\n result = (current == minimum) if pinned else (current >= minimum) # bool\n s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string\n if hard:\n assert result, emojis(s) # assert min requirements met\n if verbose and not result:\n LOGGER.warning(s)\n return result" }, { "identifier": "check_yaml", "path": "utils/general.py", "snippet": "def check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)" }, { "identifier": "make_divisible", "path": "utils/general.py", "snippet": "def make_divisible(x, divisor):\n # Returns nearest x divisible by divisor\n if isinstance(divisor, torch.Tensor):\n divisor = int(divisor.max()) # to int\n return math.ceil(x / divisor) * divisor" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(args: Optional[dict] = None, show_file=True, show_func=False):\n # Print function arguments (optional args dict)\n x = inspect.currentframe().f_back # previous frame\n file, _, func, _, _ = inspect.getframeinfo(x)\n if args is None: # get args automatically\n args, _, _, frm = inspect.getargvalues(x)\n args = {k: v for k, v in frm.items() if k in args}\n try:\n file = Path(file).resolve().relative_to(ROOT).with_suffix('')\n except ValueError:\n file = Path(file).stem\n s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')\n LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))" }, { "identifier": "feature_visualization", "path": "utils/plots.py", "snippet": "def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\n \"\"\"\n x: Features to be visualized\n module_type: Module type\n stage: Module stage within model\n n: Maximum number of feature maps to plot\n save_dir: Directory to save results\n \"\"\"\n if 'Detect' not in module_type:\n batch, channels, height, width = x.shape # batch, channels, height, width\n if height > 1 and width > 1:\n f = save_dir / f\"stage{stage}_{module_type.split('.')[-1]}_features.png\" # filename\n\n blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels\n n = min(n, channels) # number of plots\n fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols\n ax = ax.ravel()\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n for i in range(n):\n ax[i].imshow(blocks[i].squeeze()) # cmap='gray'\n ax[i].axis('off')\n\n LOGGER.info(f'Saving {f}... ({n}/{channels})')\n plt.savefig(f, dpi=300, bbox_inches='tight')\n plt.close()\n np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save" }, { "identifier": "fuse_conv_and_bn", "path": "utils/torch_utils.py", "snippet": "def fuse_conv_and_bn(conv, bn):\n # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n fusedconv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n dilation=conv.dilation,\n groups=conv.groups,\n bias=True).requires_grad_(False).to(conv.weight.device)\n\n # Prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n # Prepare spatial bias\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv" }, { "identifier": "initialize_weights", "path": "utils/torch_utils.py", "snippet": "def initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-3\n m.momentum = 0.03\n elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True" }, { "identifier": "model_info", "path": "utils/torch_utils.py", "snippet": "def model_info(model, verbose=False, imgsz=640):\n # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print(f\"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}\")\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPs\n p = next(model.parameters())\n stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride\n im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format\n flops = thop.profile(deepcopy(model), inputs=(im, ), verbose=False)[0] / 1E9 * 2 # stride GFLOPs\n imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float\n fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs\n except Exception:\n fs = ''\n\n name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model'\n LOGGER.info(f'{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}')" }, { "identifier": "profile", "path": "utils/torch_utils.py", "snippet": "def profile(input, ops, n=10, device=None):\n \"\"\" YOLOv5 speed/memory/FLOPs profiler\n Usage:\n input = torch.randn(16, 3, 640, 640)\n m1 = lambda x: x * torch.sigmoid(x)\n m2 = nn.SiLU()\n profile(input, [m1, m2], n=100) # profile over 100 iterations\n \"\"\"\n results = []\n if not isinstance(device, torch.device):\n device = select_device(device)\n print(f\"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}\"\n f\"{'input':>24s}{'output':>24s}\")\n\n for x in input if isinstance(input, list) else [input]:\n x = x.to(device)\n x.requires_grad = True\n for m in ops if isinstance(ops, list) else [ops]:\n m = m.to(device) if hasattr(m, 'to') else m # device\n m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m\n tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward\n try:\n flops = thop.profile(m, inputs=(x, ), verbose=False)[0] / 1E9 * 2 # GFLOPs\n except Exception:\n flops = 0\n\n try:\n for _ in range(n):\n t[0] = time_sync()\n y = m(x)\n t[1] = time_sync()\n try:\n _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()\n t[2] = time_sync()\n except Exception: # no backward method\n # print(e) # for debug\n t[2] = float('nan')\n tf += (t[1] - t[0]) * 1000 / n # ms per op forward\n tb += (t[2] - t[1]) * 1000 / n # ms per op backward\n mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB)\n s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes\n p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters\n print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')\n results.append([p, flops, mem, tf, tb, s_in, s_out])\n except Exception as e:\n print(e)\n results.append(None)\n torch.cuda.empty_cache()\n return results" }, { "identifier": "scale_img", "path": "utils/torch_utils.py", "snippet": "def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)\n # Scales img(bs,3,y,x) by ratio constrained to gs-multiple\n if ratio == 1.0:\n return img\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # PyTorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" } ]
import argparse import contextlib import os import platform import sys import thop # for FLOPs computation import yaml # for torch hub from copy import deepcopy from pathlib import Path from models.common import * # noqa from models.experimental import * # noqa from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, time_sync)
5,570
try: except ImportError: thop = None class Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid class Segment(Detect): # YOLOv5 Segment head for segmentation models def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): super().__init__(nc, anchors, ch, inplace) self.nm = nm # number of masks self.npr = npr # number of protos self.no = 5 + nc + self.nm # number of outputs per anchor self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.proto = Proto(ch[0], self.npr, self.nm) # protos self.detect = Detect.forward def forward(self, x): p = self.proto(x[0]) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) class BaseModel(nn.Module): # YOLOv5 base model def forward(self, x, profile=False, visualize=False): return self._forward_once(x, profile, visualize) # single-scale inference, train def _forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) return x def _profile_one_layer(self, m, x, dt): c = m == self.model[-1] # is final layer, copy input as inplace fix o = thop.profile(m, inputs=(x.copy() if c else x, ), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') if c: LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('Fusing layers... ') for m in self.model.modules(): if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, 'bn') # remove batchnorm m.forward = m.forward_fuse # update forward self.info() return self def info(self, verbose=False, img_size=640): # print model information
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ YOLO-specific modules Usage: $ python models/yolo.py --cfg yolov5s.yaml """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative try: except ImportError: thop = None class Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1), ) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid class Segment(Detect): # YOLOv5 Segment head for segmentation models def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): super().__init__(nc, anchors, ch, inplace) self.nm = nm # number of masks self.npr = npr # number of protos self.no = 5 + nc + self.nm # number of outputs per anchor self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.proto = Proto(ch[0], self.npr, self.nm) # protos self.detect = Detect.forward def forward(self, x): p = self.proto(x[0]) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) class BaseModel(nn.Module): # YOLOv5 base model def forward(self, x, profile=False, visualize=False): return self._forward_once(x, profile, visualize) # single-scale inference, train def _forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize: feature_visualization(x, m.type, m.i, save_dir=visualize) return x def _profile_one_layer(self, m, x, dt): c = m == self.model[-1] # is final layer, copy input as inplace fix o = thop.profile(m, inputs=(x.copy() if c else x, ), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs t = time_sync() for _ in range(10): m(x.copy() if c else x) dt.append((time_sync() - t) * 100) if m == self.model[0]: LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') if c: LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers LOGGER.info('Fusing layers... ') for m in self.model.modules(): if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv delattr(m, 'bn') # remove batchnorm m.forward = m.forward_fuse # update forward self.info() return self def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
9
2023-11-12 13:28:26+00:00
8k
giu-guarino/PCA-Z-PNN
test.py
[ { "identifier": "PCA_Z_PNN_model", "path": "network.py", "snippet": "class PCA_Z_PNN_model(nn.Module):\n def __init__(self, nbands, padding='same', padding_mode='reflect', bias=True) -> None:\n super(PCA_Z_PNN_model, self).__init__()\n self.conv1 = nn.Conv2d(nbands + 1, 48, 7, padding=padding, padding_mode=padding_mode, bias=bias)\n self.conv2 = nn.Conv2d(48, 32, 5, padding=padding, padding_mode=padding_mode, bias=bias)\n self.conv3 = nn.Conv2d(32, nbands, 3, padding=padding, padding_mode=padding_mode, bias=bias)\n\n\n def forward(self, input):\n x = func.relu(self.conv1(input))\n x = func.relu(self.conv2(x))\n x = self.conv3(x)\n x = x + input[:, :-1, :, :]\n return x" }, { "identifier": "SpectralLoss", "path": "loss.py", "snippet": "class SpectralLoss(nn.Module):\n def __init__(self, mtf, ratio, device):\n\n # Class initialization\n super(SpectralLoss, self).__init__()\n kernel = mtf\n # Parameters definition\n self.nbands = kernel.shape[-1]\n self.device = device\n self.ratio = ratio\n\n # Conversion of filters in Tensor\n self.pad = floor((kernel.shape[0] - 1) / 2)\n\n self.cut_border = kernel.shape[0] // 2 // ratio\n\n kernel = np.moveaxis(kernel, -1, 0)\n kernel = np.expand_dims(kernel, axis=1)\n\n kernel = torch.from_numpy(kernel).type(torch.float32)\n\n # DepthWise-Conv2d definition\n self.depthconv = nn.Conv2d(in_channels=self.nbands,\n out_channels=self.nbands,\n groups=self.nbands,\n kernel_size=kernel.shape,\n bias=False)\n\n self.depthconv.weight.data = kernel\n self.depthconv.weight.requires_grad = False\n\n self.loss = nn.L1Loss(reduction='mean')\n\n def forward(self, outputs, labels):\n\n outputs = self.depthconv(outputs)\n outputs = outputs[:, :, 2::self.ratio, 2::self.ratio]\n\n loss_value = self.loss(outputs, labels[:, :, self.cut_border:-self.cut_border, self.cut_border:-self.cut_border])\n\n return loss_value" }, { "identifier": "StructuralLoss", "path": "loss.py", "snippet": "class StructuralLoss(nn.Module):\n\n def __init__(self, sigma):\n # Class initialization\n super(StructuralLoss, self).__init__()\n\n # Parameters definition:\n self.scale = ceil(sigma / 2)\n\n def forward(self, outputs, labels, xcorr_thr):\n x_corr = torch.clamp(ccorr(outputs, labels, self.scale), min=-1)\n x = 1.0 - x_corr\n\n with torch.no_grad():\n loss_cross_corr_wo_thr = torch.mean(x)\n\n worst = x.gt(xcorr_thr)\n y = x * worst\n loss_cross_corr = torch.mean(y)\n\n return loss_cross_corr, loss_cross_corr_wo_thr.item()" }, { "identifier": "gen_mtf", "path": "tools/spectral_tools.py", "snippet": "def gen_mtf(ratio, sensor='none', kernel_size=41, nbands=3):\r\n \"\"\"\r\n Compute the estimated MTF filter kernels for the supported satellites.\r\n Parameters\r\n ----------\r\n ratio : int\r\n The resolution scale which elapses between MS and PAN.\r\n sensor : str\r\n The name of the satellites which has provided the images.\r\n kernel_size : int\r\n The size of the kernel (Only squared kernels have been implemented).\r\n Return\r\n ------\r\n kernel : Numpy array\r\n The filter based on Modulation Transfer Function for the desired satellite.\r\n \"\"\"\r\n GNyq = []\r\n\r\n if sensor == 'S2-10':\r\n GNyq = [0.275, 0.28, 0.25, 0.24]\r\n elif sensor == 'S2-10-PAN':\r\n GNyq = [0.26125] * nbands\r\n elif sensor == 'S2-20':\r\n GNyq = [0.365, 0.33, 0.34, 0.32, 0.205, 0.235]\r\n elif sensor == 'S2-60':\r\n GNyq = [0.3175, 0.295, 0.30]\r\n elif sensor == 'S2-60_bis':\r\n GNyq = [0.3175, 0.295]\r\n elif sensor == 'WV3':\r\n GNyq = [0.325, 0.355, 0.360, 0.350, 0.365, 0.360, 0.335, 0.315] ## TO REMOVE\r\n else:\r\n GNyq = [0.3] * nbands\r\n\r\n h = nyquist_filter_generator(GNyq, ratio, kernel_size)\r\n\r\n return h\r" }, { "identifier": "normalize_prisma", "path": "tools/spectral_tools.py", "snippet": "def normalize_prisma(img, nbits, nbands):\r\n return img / (np.sqrt(nbands)*(2**nbits))\r" }, { "identifier": "denormalize_prisma", "path": "tools/spectral_tools.py", "snippet": "def denormalize_prisma(img, nbits, nbands):\r\n return img * (np.sqrt(nbands)*(2**nbits))\r" }, { "identifier": "open_mat", "path": "dataset.py", "snippet": "def open_mat(path):\n # Open .mat file\n dic_file = io.loadmat(path)\n\n # Extract fields and convert them in float32 numpy arrays\n pan_np = dic_file['I_PAN'].astype(np.float32)\n ms_lr_np = dic_file['I_MS_LR'].astype(np.float32)\n ms_np = dic_file['I_MS'].astype(np.float32)\n\n if 'I_GT' in dic_file.keys():\n gt_np = dic_file['I_GT'].astype(np.float32)\n gt = torch.from_numpy(np.moveaxis(gt_np, -1, 0)[None, :, :, :])\n else:\n gt = None\n\n # Convert numpy arrays to torch tensors\n ms_lr = torch.from_numpy(np.moveaxis(ms_lr_np, -1, 0)[None, :, :, :])\n pan = torch.from_numpy(pan_np[None, None, :, :])\n ms = torch.from_numpy(np.moveaxis(ms_np, -1, 0)[None, :, :, :])\n wavelenghts = torch.from_numpy(dic_file['Wavelengths']).float()\n\n return pan, ms_lr, ms, gt, wavelenghts" }, { "identifier": "config", "path": "config_dict.py", "snippet": "" }, { "identifier": "local_corr_mask", "path": "tools/cross_correlation.py", "snippet": "def local_corr_mask(img_in, ratio, sensor, device, kernel=8):\n \"\"\"\n Compute the threshold mask for the structural loss.\n\n Parameters\n ----------\n img_in : Torch Tensor\n The test image, already normalized and with the MS part upsampled with ideal interpolator.\n ratio : int\n The resolution scale which elapses between MS and PAN.\n sensor : str\n The name of the satellites which has provided the images.\n device : Torch device\n The device on which perform the operation.\n kernel : int\n The semi-width for local cross-correlation computation.\n (See the cross-correlation function for more details)\n\n Return\n ------\n mask : PyTorch Tensor\n Local correlation field stack, composed by each MS and PAN. Dimensions: Batch, B, H, W.\n\n \"\"\"\n\n I_PAN = torch.clone(torch.unsqueeze(img_in[:, -1, :, :], dim=1))\n I_MS = torch.clone(img_in[:, :-1, :, :])\n\n MTF_kern = gen_mtf(ratio, sensor)[:, :, 0]\n MTF_kern = np.expand_dims(MTF_kern, axis=(0, 1))\n MTF_kern = torch.from_numpy(MTF_kern).type(torch.float32)\n pad = floor((MTF_kern.shape[-1] - 1) / 2)\n\n padding = nn.ReflectionPad2d(pad)\n\n depthconv = nn.Conv2d(in_channels=1,\n out_channels=1,\n groups=1,\n kernel_size=MTF_kern.shape,\n bias=False)\n\n depthconv.weight.data = MTF_kern\n depthconv.weight.requires_grad = False\n depthconv.to(device)\n I_PAN = padding(I_PAN)\n I_PAN = depthconv(I_PAN)\n mask = xcorr_torch(I_PAN, I_MS, kernel)\n mask = 1.0 - mask\n\n return mask.float()" }, { "identifier": "pca", "path": "tools/pca_tools.py", "snippet": "def pca(ms_lr):\n \"\"\"\n Perform Principal Component Analysis (PCA) on a PyTorch tensor image.\n\n Args:\n ms_lr (torch.Tensor): Input image tensor of shape (1, B, H, W).\n\n Returns:\n pca_image (torch.Tensor): PCA-transformed image tensor with the same shape.\n pca_matrix (torch.Tensor): PCA transformation matrix.\n mean (torch.Tensor): Tensor of mean values.\n \"\"\"\n # Reshape the input tensor to (B, H * W) and mean-center the data\n _, B, H, W = ms_lr.shape\n flattened = torch.reshape(ms_lr, (B, H*W))\n mean = torch.mean(flattened, dim=1).unsqueeze(1)\n centered = flattened - mean\n\n # Compute the covariance matrix\n cov_matrix = torch.matmul(centered, centered.t()) / (H * W - 1)\n\n # Perform PCA using SVD\n U, S, _ = torch.svd(cov_matrix)\n\n # PCA-transformed image\n pca_image = torch.matmul(-U.t(), centered).view(1, B, H, W)\n\n return pca_image, U, mean" }, { "identifier": "inverse_pca", "path": "tools/pca_tools.py", "snippet": "def inverse_pca(pca_image, pca_matrix, mean):\n \"\"\"\n Perform the inverse of Principal Component Analysis (PCA) on a PCA-transformed image.\n\n Args:\n pca_image (torch.Tensor): PCA-transformed image tensor with the same shape as the input image.\n pca_matrix (torch.Tensor): PCA transformation matrix obtained from the 'pca' function.\n mean (torch.Tensor): Tensor of mean values.\n\n Returns:\n original_image (torch.Tensor): Inverse PCA-reconstructed image tensor.\n \"\"\"\n _, B, H, W = pca_image.shape\n flattened_pca = torch.reshape(pca_image, (B, H*W))\n\n flattened_image = torch.matmul(-pca_matrix, flattened_pca) + mean\n\n # Reconstruct the original image\n original_image = flattened_image.view(1, B, H, W)\n\n return original_image" } ]
import argparse import gc import os import numpy as np import scipy.io as io import torch from tqdm import tqdm from network import PCA_Z_PNN_model from loss import SpectralLoss, StructuralLoss from tools.spectral_tools import gen_mtf, normalize_prisma, denormalize_prisma from dataset import open_mat from config_dict import config from tools.cross_correlation import local_corr_mask from tools.pca_tools import pca, inverse_pca from skimage.transform import rescale
3,645
gpu_number = args.gpu_number use_cpu = args.use_cpu # Training hyperparameters if args.learning_rate != -1: learning_rate = args.learning_rate else: learning_rate = config['learning_rate'] # Satellite configuration sensor = config['satellite'] ratio = config['ratio'] num_blocks = config['num_blocks'] n_components = config['n_components'] last_wl = config['last_wl'] epochs = args.epochs if epochs == -1: epochs = config['epochs'] # Environment Configuration os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_number) # Devices definition device = torch.device("cuda:0" if torch.cuda.is_available() and not use_cpu else "cpu") if sensor == 'PRISMA': normalize = normalize_prisma denormalize = denormalize_prisma else: raise 'Satellite not supported' # Open the image pan, ms_lr, ms, _, wl = open_mat(basepath) pan = normalize(pan, nbits=16, nbands=1).to(device) criterion_spec = SpectralLoss(gen_mtf(ratio, sensor, kernel_size=61, nbands=n_components), ratio, device).to(device) criterion_struct = StructuralLoss(ratio).to(device) history_loss_spec = [] history_loss_struct = [] alpha = config['alpha_1'] fused = [] band_blocks = [] band_rgb = 0 while wl[band_rgb] < last_wl: band_rgb += 1 band_blocks.append(ms_lr[:, :band_rgb + 1, :, :]) band_blocks.append(ms_lr[:, band_rgb:, :, :]) for block_index in range(num_blocks): net = PCA_Z_PNN_model(nbands=n_components).to(device) optim = torch.optim.Adam(net.parameters(), lr=learning_rate, betas=(config['beta_1'], config['beta_2'])) net.train() ms_lr_pca, W, mu = pca(band_blocks[block_index]) ms_pca = torch.tensor(rescale(torch.squeeze(ms_lr_pca).numpy(), ratio, order=3, channel_axis=0))[None, :, :, :] spec_ref_exp = normalize(ms_pca[:, :n_components, :, :], nbands=ms_pca.shape[1], nbits=16).to(device) spec_ref = normalize(ms_lr_pca[:, :n_components, :, :], nbands=ms_pca.shape[1], nbits=16).to(device) min_loss = torch.inf inp = torch.cat([spec_ref_exp, pan], dim=1) threshold = local_corr_mask(inp, ratio, sensor, device, config['semi_width']) if block_index == 1: alpha = config['alpha_2'] print('Block index {} / {}'.format(block_index + 1, num_blocks)) pbar = tqdm(range(epochs)) for epoch in pbar: pbar.set_description('Epoch %d/%d' % (epoch + 1, epochs)) net.train() optim.zero_grad() outputs = net(inp) loss_spec = criterion_spec(outputs, spec_ref) loss_struct, loss_struct_without_threshold = criterion_struct(outputs[:,:1,:,:], pan, threshold[:,:1,:,:]) loss = loss_spec + alpha * loss_struct loss.backward() optim.step() running_loss_spec = loss_spec.item() running_loss_struct = loss_struct_without_threshold history_loss_spec.append(running_loss_spec) history_loss_struct.append(running_loss_struct) if loss.item() < min_loss: min_loss = loss.item() if not os.path.exists('temp'): os.makedirs(os.path.join('temp')) torch.save(net.state_dict(), os.path.join('temp', 'PCA-Z-PNN_best_model.tar')) pbar.set_postfix( {'Spec Loss': running_loss_spec, 'Struct Loss': running_loss_struct}) net.eval() net.load_state_dict(torch.load(os.path.join('temp', 'PCA-Z-PNN_best_model.tar'))) ms_pca[:, :n_components, :, :] = denormalize(net(inp), nbands=ms_pca.shape[1], nbits=16)
def test_pca_z_pnn(args): # Paths and env configuration basepath = args.input method = 'PCA-Z-PNN' out_dir = os.path.join(args.out_dir, method) gpu_number = args.gpu_number use_cpu = args.use_cpu # Training hyperparameters if args.learning_rate != -1: learning_rate = args.learning_rate else: learning_rate = config['learning_rate'] # Satellite configuration sensor = config['satellite'] ratio = config['ratio'] num_blocks = config['num_blocks'] n_components = config['n_components'] last_wl = config['last_wl'] epochs = args.epochs if epochs == -1: epochs = config['epochs'] # Environment Configuration os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_number) # Devices definition device = torch.device("cuda:0" if torch.cuda.is_available() and not use_cpu else "cpu") if sensor == 'PRISMA': normalize = normalize_prisma denormalize = denormalize_prisma else: raise 'Satellite not supported' # Open the image pan, ms_lr, ms, _, wl = open_mat(basepath) pan = normalize(pan, nbits=16, nbands=1).to(device) criterion_spec = SpectralLoss(gen_mtf(ratio, sensor, kernel_size=61, nbands=n_components), ratio, device).to(device) criterion_struct = StructuralLoss(ratio).to(device) history_loss_spec = [] history_loss_struct = [] alpha = config['alpha_1'] fused = [] band_blocks = [] band_rgb = 0 while wl[band_rgb] < last_wl: band_rgb += 1 band_blocks.append(ms_lr[:, :band_rgb + 1, :, :]) band_blocks.append(ms_lr[:, band_rgb:, :, :]) for block_index in range(num_blocks): net = PCA_Z_PNN_model(nbands=n_components).to(device) optim = torch.optim.Adam(net.parameters(), lr=learning_rate, betas=(config['beta_1'], config['beta_2'])) net.train() ms_lr_pca, W, mu = pca(band_blocks[block_index]) ms_pca = torch.tensor(rescale(torch.squeeze(ms_lr_pca).numpy(), ratio, order=3, channel_axis=0))[None, :, :, :] spec_ref_exp = normalize(ms_pca[:, :n_components, :, :], nbands=ms_pca.shape[1], nbits=16).to(device) spec_ref = normalize(ms_lr_pca[:, :n_components, :, :], nbands=ms_pca.shape[1], nbits=16).to(device) min_loss = torch.inf inp = torch.cat([spec_ref_exp, pan], dim=1) threshold = local_corr_mask(inp, ratio, sensor, device, config['semi_width']) if block_index == 1: alpha = config['alpha_2'] print('Block index {} / {}'.format(block_index + 1, num_blocks)) pbar = tqdm(range(epochs)) for epoch in pbar: pbar.set_description('Epoch %d/%d' % (epoch + 1, epochs)) net.train() optim.zero_grad() outputs = net(inp) loss_spec = criterion_spec(outputs, spec_ref) loss_struct, loss_struct_without_threshold = criterion_struct(outputs[:,:1,:,:], pan, threshold[:,:1,:,:]) loss = loss_spec + alpha * loss_struct loss.backward() optim.step() running_loss_spec = loss_spec.item() running_loss_struct = loss_struct_without_threshold history_loss_spec.append(running_loss_spec) history_loss_struct.append(running_loss_struct) if loss.item() < min_loss: min_loss = loss.item() if not os.path.exists('temp'): os.makedirs(os.path.join('temp')) torch.save(net.state_dict(), os.path.join('temp', 'PCA-Z-PNN_best_model.tar')) pbar.set_postfix( {'Spec Loss': running_loss_spec, 'Struct Loss': running_loss_struct}) net.eval() net.load_state_dict(torch.load(os.path.join('temp', 'PCA-Z-PNN_best_model.tar'))) ms_pca[:, :n_components, :, :] = denormalize(net(inp), nbands=ms_pca.shape[1], nbits=16)
fused_block = inverse_pca(ms_pca, W, mu)
10
2023-11-13 10:26:11+00:00
8k
airalcorn2/paved2paradise
train.py
[ { "identifier": "config", "path": "config.py", "snippet": "(C, S) = (64, 1)" }, { "identifier": "KITTIDataset", "path": "kitti_dataset.py", "snippet": "class KITTIDataset(Dataset):\n def __init__(\n self,\n dataset,\n jsons_path,\n npys_path,\n labels_path,\n idxs_path,\n backgrounds_path,\n json_fs,\n prepare_pillars,\n augment,\n max_drop_p,\n ):\n super().__init__()\n self.dataset = dataset\n self.jsons_path = jsons_path\n self.npys_path = npys_path\n self.labels_path = labels_path\n self.idxs_path = idxs_path\n self.backgrounds_path = backgrounds_path\n self.json_fs = json_fs\n self.prepare_pillars = prepare_pillars\n self.augment = augment\n self.max_drop_p = max_drop_p\n self.scans_name = \"kitti\"\n\n def __len__(self):\n return len(self.json_fs)\n\n def load_points(self, idx):\n json_f = self.json_fs[idx]\n with open(f\"{self.jsons_path}/{json_f}\") as f:\n metadata = json.load(f)\n\n frame_name = json_f.split(\".json\")[0]\n bbox_points = []\n labels = []\n if self.dataset == \"baseline\":\n bbox_fs = metadata[\"bboxes\"]\n else:\n bbox_fs = [f\"{frame_name}.npy\"]\n\n if self.augment:\n glob_angle = np.random.uniform(-ROT_GLOBAL, ROT_GLOBAL)\n glob_R = Rotation.from_euler(\"Z\", glob_angle).as_matrix()\n glob_t = np.random.normal(T_MU_GLOBAL, T_VAR_GLOBAL**0.5, size=3)\n glob_scale = np.random.uniform(*SCALE_GLOBAL)\n mirror = np.random.random() > 0.5\n\n for bbox_f in bbox_fs:\n points = np.load(f\"{self.npys_path}/{bbox_f}\")\n bbox_labels = np.load(f\"{self.labels_path}/{bbox_f}\")\n if self.augment:\n center = bbox_labels[:3]\n extent = bbox_labels[3:6]\n bbox_R = bbox_labels[6:].reshape(3, 3)\n\n # Apply object augmentations.\n if self.max_drop_p > 0.0:\n drop_p = np.random.uniform(0.0, self.max_drop_p)\n ps = np.random.random(len(points))\n drop = ps < drop_p\n points = points[~drop]\n if len(points) == 0:\n continue\n\n angle = np.random.uniform(-ROT_BOX, ROT_BOX)\n R = Rotation.from_euler(\"Z\", angle).as_matrix()\n points = (R @ (points - center).T).T + center\n bbox_R = R @ bbox_R\n\n t = np.random.normal(T_MU_BOX, T_VAR_BOX**0.5, size=3)\n points = points + t\n center = center + t\n\n # Apply global augmentations.\n points = glob_scale * ((glob_R @ points.T).T + glob_t)\n center = glob_scale * (glob_R @ center + glob_t)\n extent = glob_scale * extent\n bbox_R = glob_R @ bbox_R\n if mirror:\n points[:, 1] = -points[:, 1]\n center[1] = -center[1]\n rotvec = Rotation.from_matrix(bbox_R).as_rotvec()\n rotvec[-1] = -rotvec[-1]\n bbox_R = Rotation.from_rotvec(rotvec).as_matrix()\n\n bbox_labels = np.concatenate([center, extent, bbox_R.flatten()])\n\n bbox_points.append(points)\n labels.append(bbox_labels)\n\n if self.dataset == \"baseline\":\n bg_points = np.load(f\"{self.npys_path}/{frame_name}.npy\")\n\n else:\n bg_f = metadata[\"background\"]\n bg_points = np.load(f\"{self.backgrounds_path}/{bg_f}\")\n occlude_idxs = np.load(f\"{self.idxs_path}/{frame_name}.npy\")\n bg_mask = np.ones(len(bg_points), dtype=\"bool\")\n bg_mask[occlude_idxs] = False\n bg_points = bg_points[bg_mask]\n\n if self.augment:\n # Apply global augmentations.\n bg_points = glob_scale * ((glob_R @ bg_points.T).T + glob_t)\n if mirror:\n bg_points[:, 1] = -bg_points[:, 1]\n\n if len(bbox_points) > 0:\n bbox_points = np.concatenate(bbox_points)\n points = np.concatenate([bbox_points, bg_points])\n labels = np.stack(labels)\n else:\n points = bg_points\n labels = np.full(15, -500)\n\n return (points, labels)\n\n def __getitem__(self, idx):\n (points, labels) = self.load_points(idx)\n\n (pillar_pieces, tgt) = self.prepare_pillars(points, labels)\n pillars_buffer = Tensor(pillar_pieces[0])\n pillar_pixels = LongTensor(pillar_pieces[1])\n pillar_avgs = Tensor(pillar_pieces[2])\n\n return {\n \"pillar_buffers\": pillars_buffer,\n \"pillar_pixels\": pillar_pixels,\n \"pillar_avgs\": pillar_avgs,\n \"tgt\": tgt,\n }" }, { "identifier": "KITTIEnv", "path": "kitti_env.py", "snippet": "class KITTIEnv(P2PEnv):\n dataset = \"kitti\"\n\n # Defines the bounding prism (in meters) for the training data. The detection prism\n # is one meter shorter in the x and y dimensions to avoid cropping.\n # See Section 4.2 in Lang et al. (2019).\n x_range = (0, 20)\n y_range = (-10, 10)\n z_range = (-2.5, 2)\n\n # LiDAR sensor configurations.\n # Velodyne settings. See: https://hypertech.co.il/wp-content/uploads/2015/12/HDL-64E-Data-Sheet.pdf.\n # See also page 37 here: https://www.termocam.it/pdf/manuale-HDL-64E.pdf. The KITTI\n # dataset was collected at 10 Hz. See: https://www.cvlibs.net/publications/Geiger2012CVPR.pdf.\n min_elev = np.deg2rad(-24.8)\n max_elev = np.deg2rad(2)\n vert_res = 64\n elevs = np.linspace(min_elev, max_elev, vert_res)\n horiz_res = 2083\n azims = np.linspace(-np.pi, np.pi, horiz_res, False)\n azim_pm = 2 * np.pi / horiz_res\n # For some lidar sensors, like the Ouster, the sensor frame is shifted relative to\n # the lidar frame. See Section 3.1 in:\n # https://data.ouster.io/downloads/software-user-manual/software-user-manual-v2p0.pdf.\n sensor2lidar = -np.array([0, 0, 0.0])\n\n # Data-generating variables.\n # Whether to randomly flip the point clouds along the x-axis.\n mirror = True\n mirrors = [False, True] if mirror else [False]\n # Whether to randomly move the human point cloud in the region of interest.\n move = True\n # The number of points along one side of the grid used to estimate the ground plane.\n n_grid_points = 100\n # The range of the space used for building the leveling grid of points for object\n # scenes.\n x_start_end_object = (0, 7)\n y_start_end_object = (-3.0, 3.0)\n # The range of the space used for building the leveling grid of points for\n # background scenes.\n x_start_end_background = (6, 13)\n y_start_end_background = (-1.7, 1.7)\n # The range for randomly placing the object. Minus one to avoid cropped objects.\n # See get_kitti_stats.py for x minimum.\n samp_x_range = (2.25, x_range[1] - 1)\n samp_y_range = (y_range[0] + 1, y_range[1] - 1)\n # If an object point is within this distance of a lidar beam, the lidar beam\n # potentially hits the object.\n hit_thresh = 0.04\n # Points that are within this distance of a ray cause the point associated with the\n # ray to be occluded.\n occlude_object_thresh = 0.08\n occlude_background_thresh = 0.03\n # Numbers come from fitting an exponential function to KITTI pedestrian data.\n # See: get_kitti_stats.py.\n abc = (1445.4513596044249, 0.16880749180490645, 17.230571487821805)\n # The minimum proportion of the object that must be visible in the final scene.\n min_prop = 0.15\n # The proportion of KITTI training data that can potentially be used for\n # backgrounds.\n train_p = 0.9\n # The forward direction for the LiDAR sensor.\n fwd = np.array([1, 0, 0])\n # The maximum number of close object points to a ray that are used to estimate the\n # surface point of an object.\n neighbors = 2\n\n # Directory names.\n parking_lot_dir = \"parking_lot\"\n backgrounds_dir = \"backgrounds\"\n objects_dir = \"objects\"\n raw_dir = \"raw\"\n synthetic_dir = \"synthetic\"\n level_dir = \"level\"\n final_dir = \"final\"\n pcds_dir = \"pcds\"\n npys_dir = \"npys\"\n transforms_dir = \"levels\"\n raw_labels_dir = \"raw_labels\"\n labels_dir = \"labels\"\n idxs_dir = \"idxs\"\n jsons_dir = \"jsons\"\n\n # Dataset paths.\n datasets_root = \"/home/michael/datasets\"\n raw_backgrounds_path = f\"{datasets_root}/KITTI/training\"\n\n raw_objects_path = f\"{datasets_root}/Paved2Paradise/{raw_dir}/parking_lot\"\n raw_object_pcds_path = f\"{raw_objects_path}/{pcds_dir}\"\n\n samples_path = f\"{raw_backgrounds_path}/velodyne\"\n paved2paradise_path = f\"{raw_backgrounds_path}/paved2paradise\"\n npys_path = f\"{paved2paradise_path}/{npys_dir}\"\n labels_path = f\"{paved2paradise_path}/{labels_dir}\"\n jsons_path = f\"{paved2paradise_path}/{jsons_dir}\"\n\n # This is the root for where all of the synthetic data will be stored.\n synthetic_path = f\"{raw_backgrounds_path}/{synthetic_dir}\"\n\n raw_object_labels_path = f\"{synthetic_path}/{raw_labels_dir}\"\n\n level_path = f\"{synthetic_path}/{level_dir}\"\n\n unlevel_backgrounds_path = f\"{level_path}/{backgrounds_dir}\"\n unlevel_background_npys_path = f\"{unlevel_backgrounds_path}/{npys_dir}\"\n level_background_transforms_path = f\"{unlevel_backgrounds_path}/{transforms_dir}\"\n\n level_objects_path = f\"{level_path}/{objects_dir}\"\n level_object_npys_path = f\"{level_objects_path}/{npys_dir}\"\n level_object_labels_path = f\"{level_objects_path}/{labels_dir}\"\n\n final_path = f\"{synthetic_path}/{final_dir}\"\n final_npys_path = f\"{final_path}/{npys_dir}\"\n final_labels_path = f\"{final_path}/{labels_dir}\"\n final_idxs_path = f\"{final_path}/{idxs_dir}\"\n final_jsons_path = f\"{final_path}/{jsons_dir}\"\n\n # The Segments.ai object bounding boxes.\n labels_json_f = \"AAFieldDay.json\"\n\n # Weights & Biases stuff.\n wandb_project = \"paved2paradise_kitti\"\n entity = \"airalcorn2\"\n data_dict_f = \"data_dict.json\"\n kitti_data_dict_f = \"data_dict_kitti.json\"\n model_f = \"pointpillars.py\"\n config_f = \"config.pydict\"\n best_params_f = \"best_params.pth\"\n wandb_runs = \"wandb_runs\"" }, { "identifier": "PointPillars", "path": "pointpillars.py", "snippet": "class PointPillars(nn.Module):\n def __init__(\n self,\n position_encodings,\n mlp_layers,\n block_layers,\n side_cells,\n x_range,\n y_range,\n z_range,\n max_pillars,\n max_points,\n ):\n super().__init__()\n # Three for raw point coordinates plus three for difference from mean point in\n # cell.\n in_feats = 3 + 3\n self.pointnet = PointNet(in_feats, mlp_layers, position_encodings)\n backbone = Backbone(mlp_layers[-1], block_layers)\n in_channels = sum([block_layer[-1] for block_layer in block_layers])\n # Presence/absence in grid cell.\n head = nn.Conv2d(in_channels, 1, 1)\n # Predictor takes in a pseudo image, extracts features using the backbone, and\n # then predicts a presence/absence grid using the head.\n self.predictor = Predictor(backbone, head, side_cells)\n\n # Set up the prepare_pillars function.\n self.max_pillars = max_pillars\n self.max_points = max_points\n self.prepare_pillars = get_prepare_pillars_function(\n max_pillars, max_points, side_cells, x_range, y_range, z_range\n )\n\n # Stuff for predicting detections.\n self.register_buffer(\"prob_idxs\", torch.arange(side_cells**2).long())\n self.register_buffer(\"side_cells\", torch.LongTensor([side_cells]))\n self.register_buffer(\"offset\", (1 / self.side_cells) / 2)\n self.register_buffer(\"x_scale\", torch.Tensor([x_range[1] - x_range[0]]))\n self.register_buffer(\"x_shift\", torch.Tensor([x_range[0]]))\n self.register_buffer(\"y_scale\", torch.Tensor([y_range[1] - y_range[0]]))\n self.register_buffer(\"y_shift\", torch.Tensor([y_range[0]]))\n self.cell_length = (x_range[1] - x_range[0]) / side_cells\n self.cell_width = (y_range[1] - y_range[0]) / side_cells\n\n def __str__(self):\n desc = f\"PointPillars\\n\"\n desc += f\"max_pillars: {self.max_pillars}\\n\"\n desc += f\"max_points: {self.max_points}\\n\"\n desc += f\"side_cells: {self.side_cells[0].item()}\\n\"\n desc += f\"cell_length: {self.cell_length}\\n\"\n desc += f\"cell_width: {self.cell_width}\"\n return desc\n\n def get_detections(self, points, device, min_logit):\n pillar_pieces = self.prepare_pillars(points)[0]\n pillar_buffers = Tensor(pillar_pieces[0])[None].to(device)\n pillar_pixels = LongTensor(pillar_pieces[1])[None].to(device)\n pillar_avgs = Tensor(pillar_pieces[2])[None].to(device)\n\n preds = self(pillar_buffers, pillar_avgs, pillar_pixels)\n\n keep_idxs = self.prob_idxs[preds[0].flatten() > min_logit]\n rows = keep_idxs // self.side_cells\n cols = keep_idxs % self.side_cells\n\n # Convert rows and columns into world coordinates.\n xs = self.x_scale * (1 - rows / self.side_cells - self.offset) + self.x_shift\n ys = self.y_scale * (cols / self.side_cells + self.offset) + self.y_shift\n centers = torch.stack([xs, ys]).permute(1, 0)\n\n return centers\n\n def forward(self, pillar_buffers, pillar_avgs, pillar_pixels):\n P = pillar_buffers.shape[2]\n pillar_avgs = pillar_avgs.unsqueeze(2).expand(-1, -1, P, -1)\n pillar_buffers[..., 3:] = pillar_buffers[..., :3] - pillar_avgs\n # These are empty points, so we zero out their center offset.\n mask = (pillar_buffers[..., :3].sum(-1) == 0).unsqueeze(3)\n pillar_buffers[mask.squeeze(-1)] = 0\n pillar_buffers = pillar_buffers.permute(0, 3, 1, 2).contiguous()\n\n # See Section 2.1. Each pillar buffer is processed independently by a PointNet.\n pn_feats = self.pointnet(pillar_buffers)\n\n # See Section 2.2 and Section 2.3.\n preds = self.predictor(pn_feats, pillar_pixels)\n\n return preds" } ]
import os import sys import json import random import shutil import torch import wandb from config import config from kitti_dataset import KITTIDataset from kitti_env import KITTIEnv from pointpillars import PointPillars from torch import nn, optim from torch.utils.data import DataLoader
5,640
for idx, tensors_dict in enumerate(train_loader): if idx % eval_every == 0: if use_amp: with torch.autocast(device_type="cuda", dtype=torch.float16): total_valid_loss = validate( model, valid_loader, device, criterion ) else: total_valid_loss = validate(model, valid_loader, device, criterion) if total_valid_loss < best_valid_loss: best_valid_loss = total_valid_loss no_improvement = 0 lr_drops = 0 torch.save( model.state_dict(), f"{wandb.run.dir}/{KITTIEnv.best_params_f}" ) else: no_improvement += 1 if no_improvement == patience: lr_drops += 1 if lr_drops == max_lr_drops: sys.exit() no_improvement = 0 lr_reductions += 1 for g in optimizer.param_groups: g["lr"] *= lr_reducer if n_train > 0: average_train_loss = total_train_loss / n_train else: average_train_loss = total_train_loss wandb.log( { "average_train_loss": average_train_loss, "average_valid_loss": total_valid_loss / n_valid, "lr_reductions": lr_reductions, } ) total_train_loss = 0.0 n_train = 0 model.train() optimizer.zero_grad() if use_amp: with torch.autocast(device_type="cuda", dtype=torch.float16): loss = get_loss(model, tensors_dict, device, criterion) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() else: loss = get_loss(model, tensors_dict, device, criterion) loss.backward() optimizer.step() total_train_loss += loss.item() n_train += len(tensors_dict["tgt"]) def main(): dataset = config["dataset"] assert dataset in {"baseline", "paved2paradise"} if dataset == "paved2paradise": jsons_path = KITTIEnv.final_jsons_path npys_path = KITTIEnv.final_npys_path labels_path = KITTIEnv.final_labels_path idxs_path = KITTIEnv.final_idxs_path backgrounds_path = KITTIEnv.unlevel_background_npys_path data_dict_f = KITTIEnv.data_dict_f else: jsons_path = KITTIEnv.jsons_path npys_path = KITTIEnv.npys_path labels_path = KITTIEnv.labels_path idxs_path = None backgrounds_path = None data_dict_f = KITTIEnv.kitti_data_dict_f # This should have been created when preparing the KITTI data. assert os.path.isfile(data_dict_f) if not os.path.isfile(data_dict_f): init_data_dict(jsons_path, data_dict_f) with open(data_dict_f) as f: data_dict = json.load(f) config["data_dict"] = data_dict config["model_args"]["x_range"] = KITTIEnv.x_range config["model_args"]["y_range"] = KITTIEnv.y_range config["model_args"]["z_range"] = KITTIEnv.z_range wandb.init(project=KITTIEnv.wandb_project, entity=KITTIEnv.entity, config=config) shutil.copyfile(KITTIEnv.model_f, f"{wandb.run.dir}/{KITTIEnv.model_f}") device = torch.device("cuda:0") model = PointPillars(**config["model_args"]).to(device) print(model) n_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print(f"Parameters: {n_params}") print(repr(model)) dataset_args = { "dataset": dataset, "jsons_path": jsons_path, "npys_path": npys_path, "labels_path": labels_path, "idxs_path": idxs_path, "backgrounds_path": backgrounds_path, "json_fs": data_dict["train"], "prepare_pillars": model.prepare_pillars, "augment": True, "max_drop_p": config["max_drop_p"], }
# See: https://github.com/pytorch/pytorch/issues/9158#issuecomment-402358096. if len(sys.argv) > 1: os.environ["CUDA_VISIBLE_DEVICES"] = sys.argv[1] def init_data_dict(jsons_path, data_dict_f): json_fs = os.listdir(jsons_path) random.shuffle(json_fs) train_p = config["train_p"] train_n = int(train_p * len(json_fs)) train_val_fs = json_fs[:train_n] test_fs = json_fs[train_n:] data_dict = {"train": train_val_fs, "valid": test_fs} with open(data_dict_f, "w") as f: json.dump(data_dict, f) def get_loss(model, tensors_dict, device, criterion): pillar_buffers = tensors_dict["pillar_buffers"].to(device) pillar_pixels = tensors_dict["pillar_pixels"].to(device) pillar_avgs = tensors_dict["pillar_avgs"].to(device) preds = model(pillar_buffers, pillar_avgs, pillar_pixels) labels = tensors_dict["tgt"].to(device) loss = criterion(preds, labels) return loss def validate(model, valid_loader, device, criterion): model.eval() total_valid_loss = 0.0 with torch.no_grad(): for tensors_dict in valid_loader: loss = get_loss(model, tensors_dict, device, criterion) total_valid_loss += loss.item() return total_valid_loss def train(config, model, valid_loader, train_loader, device): lr = config["lr"] optimizer = optim.AdamW(model.parameters(), lr=lr) criterion = nn.BCEWithLogitsLoss(reduction="sum") scaler = torch.cuda.amp.GradScaler() # See: https://pytorch.org/tutorials/recipes/recipes/amp_recipe.html, # and: https://pytorch.org/docs/stable/notes/amp_examples.html, # and: https://pytorch.org/blog/what-every-user-should-know-about-mixed-precision-training-in-pytorch/. use_amp = config["use_amp"] best_valid_loss = float("inf") patience = config["patience"] max_lr_drops = config["max_lr_drops"] lr_reducer = config["lr_reducer"] eval_every = config["eval_every"] no_improvement = 0 lr_drops = 0 lr_reductions = 0 total_train_loss = float("inf") n_valid = len(valid_loader.dataset) n_train = 0 for epoch in range(config["epochs"]): model.train() for idx, tensors_dict in enumerate(train_loader): if idx % eval_every == 0: if use_amp: with torch.autocast(device_type="cuda", dtype=torch.float16): total_valid_loss = validate( model, valid_loader, device, criterion ) else: total_valid_loss = validate(model, valid_loader, device, criterion) if total_valid_loss < best_valid_loss: best_valid_loss = total_valid_loss no_improvement = 0 lr_drops = 0 torch.save( model.state_dict(), f"{wandb.run.dir}/{KITTIEnv.best_params_f}" ) else: no_improvement += 1 if no_improvement == patience: lr_drops += 1 if lr_drops == max_lr_drops: sys.exit() no_improvement = 0 lr_reductions += 1 for g in optimizer.param_groups: g["lr"] *= lr_reducer if n_train > 0: average_train_loss = total_train_loss / n_train else: average_train_loss = total_train_loss wandb.log( { "average_train_loss": average_train_loss, "average_valid_loss": total_valid_loss / n_valid, "lr_reductions": lr_reductions, } ) total_train_loss = 0.0 n_train = 0 model.train() optimizer.zero_grad() if use_amp: with torch.autocast(device_type="cuda", dtype=torch.float16): loss = get_loss(model, tensors_dict, device, criterion) scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() else: loss = get_loss(model, tensors_dict, device, criterion) loss.backward() optimizer.step() total_train_loss += loss.item() n_train += len(tensors_dict["tgt"]) def main(): dataset = config["dataset"] assert dataset in {"baseline", "paved2paradise"} if dataset == "paved2paradise": jsons_path = KITTIEnv.final_jsons_path npys_path = KITTIEnv.final_npys_path labels_path = KITTIEnv.final_labels_path idxs_path = KITTIEnv.final_idxs_path backgrounds_path = KITTIEnv.unlevel_background_npys_path data_dict_f = KITTIEnv.data_dict_f else: jsons_path = KITTIEnv.jsons_path npys_path = KITTIEnv.npys_path labels_path = KITTIEnv.labels_path idxs_path = None backgrounds_path = None data_dict_f = KITTIEnv.kitti_data_dict_f # This should have been created when preparing the KITTI data. assert os.path.isfile(data_dict_f) if not os.path.isfile(data_dict_f): init_data_dict(jsons_path, data_dict_f) with open(data_dict_f) as f: data_dict = json.load(f) config["data_dict"] = data_dict config["model_args"]["x_range"] = KITTIEnv.x_range config["model_args"]["y_range"] = KITTIEnv.y_range config["model_args"]["z_range"] = KITTIEnv.z_range wandb.init(project=KITTIEnv.wandb_project, entity=KITTIEnv.entity, config=config) shutil.copyfile(KITTIEnv.model_f, f"{wandb.run.dir}/{KITTIEnv.model_f}") device = torch.device("cuda:0") model = PointPillars(**config["model_args"]).to(device) print(model) n_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print(f"Parameters: {n_params}") print(repr(model)) dataset_args = { "dataset": dataset, "jsons_path": jsons_path, "npys_path": npys_path, "labels_path": labels_path, "idxs_path": idxs_path, "backgrounds_path": backgrounds_path, "json_fs": data_dict["train"], "prepare_pillars": model.prepare_pillars, "augment": True, "max_drop_p": config["max_drop_p"], }
train_dataset = KITTIDataset(**dataset_args)
1
2023-11-15 17:13:30+00:00
8k
jbusecke/dynamic_chunks
dynamic_chunks/tests/test_algorithms.py
[ { "identifier": "even_divisor_algo", "path": "dynamic_chunks/algorithms.py", "snippet": "@check_inputs\ndef even_divisor_algo(\n ds: xr.Dataset,\n target_chunk_size: int,\n target_chunks_aspect_ratio: Dict[str, int],\n size_tolerance: float,\n) -> Dict[str, int]:\n \"\"\"\n Algorithm that finds all possible chunk combinations with even divisors and chooses the best fit based on \n the desired chunk aspect ratio and chunk size.\n\n Parameters\n ----------\n ds : xr.Dataset\n Input dataset\n target_chunk_size : Union[int, str]\n Desired single chunks size. Can be provided as\n integer (bytes) or as a str like '100MB' etc.\n target_chunks_aspect_ratio: Dict[str, int]\n Dictionary mapping dimension names to desired\n aspect ratio of total number of chunks along each dimension. Dimensions present\n in the dataset but not in target_chunks_aspect_ratio will be filled with\n default_ratio. If allow_extra_dims is true, target_chunks_aspect_ratio can contain\n dimensions not present in the dataset, which will be removed in the ouput.\n A value of -1 can be passed to entirely prevent chunking along that dimension.\n size_tolerance : float\n Chunksize tolerance. Resulting chunk size will be within\n [target_chunk_size*(1-size_tolerance),\n target_chunk_size*(1+size_tolerance)]\n default_ratio : int, optional\n Default value to use for dimensions on the dataset not specified in\n target_chunks_aspect_ratio, by default -1, meaning that the ommited dimension will\n not be chunked.\n allow_extra_dims : bool, optional\n Allow to pass dimensions not present in the dataset to be passed in\n target_chunks_aspect_ratio, by default False\n\n Returns\n -------\n dict[str, int]\n Target chunk dictionary. Can be passed directly to `ds.chunk()`\n\n \"\"\"\n\n\n logger.info(\"Running dynamic chunking algorithm using even divisors\")\n\n # filter out the dimensions that are unchunked\n target_chunks_aspect_ratio_chunked_only = {\n dim: ratio for dim, ratio in target_chunks_aspect_ratio.items() if ratio != -1\n }\n print(f\"{target_chunks_aspect_ratio_chunked_only=}\")\n unchunked_dims = [\n dim\n for dim in target_chunks_aspect_ratio.keys()\n if dim not in target_chunks_aspect_ratio_chunked_only.keys()\n ]\n print(f\"{unchunked_dims=}\")\n\n possible_chunks = []\n for dim, s in ds.dims.items():\n if dim in unchunked_dims:\n # Always keep this dimension unchunked\n possible_chunks.append([s])\n else:\n # Get a list of all the even divisors\n possible_chunks.append(even_divisor_chunks(s))\n\n combinations = [\n {dim: chunk for dim, chunk in zip(ds.dims.keys(), c)}\n for c in itertools.product(*possible_chunks)\n ]\n # Check the size of each combination on the dataset\n combination_sizes = [get_memory_size(ds, c) for c in combinations]\n\n # And select a subset with some form of tolerance based on the size requirement\n tolerance = size_tolerance * target_chunk_size\n combinations_filtered = [\n c for c, s in zip(combinations, combination_sizes) if abs(s - target_chunk_size) < tolerance\n ]\n\n # If there are no matches in the range, the user has to increase the tolerance for this to work.\n if len(combinations_filtered) == 0:\n raise NoMatchingChunks(\n (\n \"Could not find any chunk combinations satisfying \"\n \"the size constraint. Consider increasing tolerance\"\n )\n )\n\n # Now that we have cominations in the memory size range we want, we can check which is\n # closest to our desired chunk ratio.\n # We can think of this as comparing the angle of two vectors.\n # To compare them we need to normalize (we dont care about the amplitude here)\n\n # For the size estimation we needed the chunk combinations to be complete\n # (cover all dimensions, even the unchunked ones). To find the closest fit\n # to the desired aspect ratio we need to remove the unchunked dimensions.\n\n combinations_filtered_chunked_only = [\n {dim: chunk for dim, chunk in c.items() if dim not in unchunked_dims}\n for c in combinations_filtered\n ]\n\n # convert each combination into an array of resulting chunks per dimension, then normalize\n dims_chunked_only = list(\n target_chunks_aspect_ratio_chunked_only.keys()\n ) # the order of these does matter\n\n shape_chunked_only = np.array([ds.dims[dim] for dim in dims_chunked_only])\n ratio = [\n shape_chunked_only / np.array([c[dim] for dim in dims_chunked_only])\n for c in combinations_filtered_chunked_only\n ]\n ratio_normalized = [normalize(r) for r in ratio]\n\n # Find the 'closest' fit between normalized ratios\n # cartesian difference between vectors ok?\n target_ratio_normalized = normalize(\n np.array([target_chunks_aspect_ratio_chunked_only[dim] for dim in dims_chunked_only])\n )\n ratio_similarity = [similarity(target_ratio_normalized, r) for r in ratio_normalized]\n\n # sort by similarity and return the corresponding full combination\n # (including the unchunked dimensions)\n combinations_sorted = [\n c for _, c in sorted(zip(ratio_similarity, combinations_filtered), key=lambda a: a[0])\n ]\n\n # Return the chunk combination with the closest fit\n return combinations_sorted[0]" }, { "identifier": "iterative_ratio_increase_algo", "path": "dynamic_chunks/algorithms.py", "snippet": "@check_inputs\ndef iterative_ratio_increase_algo(\n ds: xr.Dataset,\n target_chunk_size: int,\n target_chunks_aspect_ratio: Dict[str, int],\n size_tolerance: float,\n) -> Dict[str, int]:\n \"\"\"\n Alternative algorithm that starts with a normalized chunk aspect ratio and iteratively scales\n it until the desired chunk size is reached.\n\n Steps\n Deduce the maximum chunksize that would adhere to the given aspect ratio by\n dividing the dimension length by the aspect ratio\n\n Then iteratively divide this chunksize by a scaling factor until the\n resulting chunksize is below the largest size within tolerance\n\n Test for the resulting chunk size. If the size is within the tolerance, return the chunk size.\n If the size is below the tolerance, raise an error. In this case we need some more\n sophisicated logic or increase the tolerance.\n\n Parameters\n ----------\n ds : xr.Dataset\n Input dataset\n target_chunk_size : Union[int, str]\n Desired single chunks size. Can be provided as\n integer (bytes) or as a str like '100MB' etc.\n target_chunks_aspect_ratio: Dict[str, int]\n Dictionary mapping dimension names to desired\n aspect ratio of total number of chunks along each dimension. Dimensions present\n in the dataset but not in target_chunks_aspect_ratio will be filled with\n default_ratio. If allow_extra_dims is true, target_chunks_aspect_ratio can contain\n dimensions not present in the dataset, which will be removed in the ouput.\n A value of -1 can be passed to entirely prevent chunking along that dimension.\n size_tolerance : float\n Chunksize tolerance. Resulting chunk size will be within\n [target_chunk_size*(1-size_tolerance),\n target_chunk_size*(1+size_tolerance)]\n default_ratio : int, optional\n Default value to use for dimensions on the dataset not specified in\n target_chunks_aspect_ratio, by default -1, meaning that the ommited dimension will\n not be chunked.\n allow_extra_dims : bool, optional\n Allow to pass dimensions not present in the dataset to be passed in\n target_chunks_aspect_ratio, by default False\n\n Returns\n -------\n dict[str, int]\n Target chunk dictionary. Can be passed directly to `ds.chunk()`\n\n \"\"\"\n\n logger.info(\"Running dynamic chunking algorithm iteratively increasing fixed ratio chunks\")\n # Alternative algorithm that starts with a normalized chunk aspect ratio and iteratively scales\n # it until the desired chunk size is reached.\n\n # Steps\n # Deduce the maximum chunksize that would adhere to the given aspect ratio by\n # dividing the dimension length by the aspect ratio\n\n # Then iteratively divide this chunksize by a scaling factor until the\n # resulting chunksize is below the largest size within tolerance\n\n # Test for the resulting chunk size. If the size is within the tolerance, return the chunk size.\n # If the size is below the tolerance, raise an error. In this case we need some more\n # sophisicated logic or increase the tolerance.\n\n def maybe_scale_chunk(ratio, scale_factor, dim_length):\n \"\"\"Scale a single dimension of a unit chunk by a given scaling factor\"\"\"\n if ratio == -1:\n return dim_length\n else:\n max_chunk = (\n dim_length / ratio\n ) # determine the largest chunksize that would adhere to the given aspect ratio\n scaled_chunk = max(1, round(max_chunk / scale_factor))\n return scaled_chunk\n\n def scale_and_normalize_chunks(ds, target_chunks_aspect_ratio, scale_factor):\n scaled_normalized_chunks = {\n dim: maybe_scale_chunk(ratio, scale_factor, ds.dims[dim])\n for dim, ratio in target_chunks_aspect_ratio.items()\n }\n return scaled_normalized_chunks\n\n max_chunks = scale_and_normalize_chunks(\n ds, target_chunks_aspect_ratio, 1\n ) # largest possible chunk size for each dimension\n logger.info(f\"{max_chunks=}\")\n max_scale_factor = max(max_chunks.values())\n logger.info(f\"{max_scale_factor=}\")\n # Compute the size for each scaling factor and choose the\n # closest fit to the desired chunk size\n scale_factors = np.arange(1, max_scale_factor + 1)\n # TODO: There is probably a smarter way (binary search?) to narrow this\n # TODO: range down and speed this up. For now this should work.\n sizes = np.array(\n [\n get_memory_size(\n ds, scale_and_normalize_chunks(ds, target_chunks_aspect_ratio, scale_factor)\n )\n for scale_factor in scale_factors\n ]\n )\n logger.info(f\"{sizes=}\")\n logger.info(f\" Min size{sizes[-1]}\")\n logger.info(f\" Max size{sizes[0]}\")\n\n size_mismatch = abs(sizes - target_chunk_size)\n\n # find the clostest match to the target chunk size\n optimal_scale_factor = [sf for _, sf in sorted(zip(size_mismatch, scale_factors))][0]\n\n optimal_target_chunks = scale_and_normalize_chunks(\n ds, target_chunks_aspect_ratio, optimal_scale_factor\n )\n optimal_size = get_memory_size(ds, optimal_target_chunks)\n\n # check if the resulting chunk size is within tolerance\n lower_bound = target_chunk_size * (1 - size_tolerance)\n upper_bound = target_chunk_size * (1 + size_tolerance)\n logger.info(f\"{optimal_size=} {lower_bound=} {upper_bound=}\")\n if not (optimal_size >= lower_bound and optimal_size <= upper_bound):\n raise NoMatchingChunks(\n (\n \"Could not find any chunk combinations satisfying \"\n \"the size constraint. Consider increasing tolerance\"\n )\n )\n return optimal_target_chunks" }, { "identifier": "NoMatchingChunks", "path": "dynamic_chunks/algorithms.py", "snippet": "class NoMatchingChunks(Exception):\n pass" } ]
from typing import Dict from dynamic_chunks.algorithms import ( even_divisor_algo, iterative_ratio_increase_algo, NoMatchingChunks ) import dask.array as dsa import pytest import xarray as xr
4,883
# Test that a warning is raised target_chunk_nbytes = 5e6 msg = "are not specified in target_chunks_aspect_ratio.Setting default value of" with pytest.warns(UserWarning, match=msg): chunks_from_default = algo( ds, target_chunk_nbytes, target_chunks_aspect_ratio={"x": 1, "z": 10}, size_tolerance=0.2, default_ratio=default_ratio, ) chunks_explicit = algo( ds, target_chunk_nbytes, target_chunks_aspect_ratio={"x": 1, "y": default_ratio, "z": 10}, size_tolerance=0.2, ) assert chunks_from_default == chunks_explicit @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_permuted_dimensions(algo): ds = _create_ds({"x": 100, "y": 200, "z": 300}) size_tolerance = 0.6 target_chunk_size = 5e5 target_chunks = algo( ds, target_chunk_size, target_chunks_aspect_ratio={"x": 1, "y": 2, "z": 10}, size_tolerance=size_tolerance, ) target_chunks_permuted = algo( ds, target_chunk_size, target_chunks_aspect_ratio={ "z": 10, "y": 2, "x": 1, }, size_tolerance=size_tolerance, ) assert target_chunks == target_chunks_permuted @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_error_extra_dimensions_not_allowed(algo): ds = _create_ds({"x": 100, "y": 200, "z": 300}) msg = "target_chunks_aspect_ratio contains dimensions not present in dataset." with pytest.raises(ValueError, match=msg): algo( ds, 1e6, target_chunks_aspect_ratio={"x": 1, "y_other_name": 1, "y": 1, "z": 10}, size_tolerance=0.2, ) @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_extra_dimensions_allowed(algo): ds = _create_ds({"x": 100, "y": 200, "z": 300}) with pytest.warns(UserWarning, match="Trimming dimensions"): chunks_with_extra = algo( ds, 5e5, target_chunks_aspect_ratio={"x": 1, "y_other_name": 1, "y": 1, "z": 10}, size_tolerance=0.2, allow_extra_dims=True, ) chunks_without_extra = algo( ds, 5e5, target_chunks_aspect_ratio={"x": 1, "y": 1, "z": 10}, size_tolerance=0.2, ) assert chunks_with_extra == chunks_without_extra @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_non_int_ratio_input(algo): ds = _create_ds({"x": 1, "y": 2, "z": 3}) with pytest.raises(ValueError, match="Ratio value must be an integer. Got 1.5 for dimension y"): algo( ds, 1e6, target_chunks_aspect_ratio={"x": 1, "y": 1.5, "z": 10}, size_tolerance=0.2, ) @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_large_negative_ratio_input(algo): ds = _create_ds({"x": 1, "y": 2, "z": 3}) with pytest.raises( ValueError, match="Ratio value can only be larger than 0 or -1. Got -100 for dimension y" ): algo( ds, 1e6, target_chunks_aspect_ratio={"x": 1, "y": -100, "z": 10}, size_tolerance=0.2, ) def test_algo_comparison(): """test that we get the same result from both algorithms for a known simple case""" ds = _create_ds({"x": 100, "y": 100, "z": 100}) target_chunk_size = 4e5 target_chunks_aspect_ratio = {"x": -1, "y": 2, "z": 10} size_tolerance = 0.01 chunks_a = even_divisor_algo( ds, target_chunk_size, target_chunks_aspect_ratio=target_chunks_aspect_ratio, size_tolerance=size_tolerance, ) chunks_b = iterative_ratio_increase_algo( ds, target_chunk_size, target_chunks_aspect_ratio=target_chunks_aspect_ratio, size_tolerance=size_tolerance, ) assert chunks_a == chunks_b @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_algo_exception(algo): """Test that each of the algos raises our custom exception when we give some totally unsolvable parameters"""
def _create_ds(dims_shape: Dict[str, int]) -> xr.Dataset: return xr.DataArray( dsa.random.random(list(dims_shape.values())), dims=list(dims_shape.keys()), ).to_dataset(name="data") @pytest.mark.parametrize( ("dims_shape", "target_chunks_aspect_ratio", "expected_target_chunks"), [ # make sure that for the same dataset we get smaller chunksize along # a dimension if the ratio is larger ( {"x": 300, "y": 300, "z": 300}, {"x": 1, "y": 1, "z": 10}, {"x": 100, "y": 100, "z": 12}, ), ( {"x": 300, "y": 300, "z": 300}, {"x": 10, "y": 1, "z": 1}, {"x": 12, "y": 100, "z": 100}, ), # test the special case where we want to just chunk along a single dimension ( {"x": 100, "y": 300, "z": 400}, {"x": -1, "y": -1, "z": 1}, {"x": 100, "y": 300, "z": 4}, ), ], ) def test_dynamic_rechunking(dims_shape, target_chunks_aspect_ratio, expected_target_chunks): ds = _create_ds(dims_shape) target_chunks = even_divisor_algo( ds, 1e6, target_chunks_aspect_ratio=target_chunks_aspect_ratio, size_tolerance=0.2 ) print(target_chunks) print(expected_target_chunks) for dim, chunks in expected_target_chunks.items(): assert target_chunks[dim] == chunks @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_nbytes_str_input(algo): ds = _create_ds({"x": 100, "y": 100, "z": 100}) target_chunks_aspect_ratio = {"x": 1, "y": 1, "z": 1} target_chunks_int = algo( ds, 1e6, target_chunks_aspect_ratio=target_chunks_aspect_ratio, size_tolerance=0.2 ) target_chunks_str = algo( ds, "1MB", target_chunks_aspect_ratio=target_chunks_aspect_ratio, size_tolerance=0.2 ) for dim in target_chunks_aspect_ratio.keys(): assert target_chunks_int[dim] == target_chunks_str[dim] def test_maintain_ratio(): """Confirm that for a given ratio with two differently sized datasets we maintain a constant ratio between total number of chunks""" ds_equal = _create_ds({"x": 64, "y": 64}) ds_long = _create_ds({"x": 64, "y": 256}) for ds in [ds_equal, ds_long]: print(ds) target_chunks = even_divisor_algo( ds, 1e4, target_chunks_aspect_ratio={"x": 1, "y": 4}, size_tolerance=0.2 ) ds_rechunked = ds.chunk(target_chunks) assert len(ds_rechunked.chunks["y"]) / len(ds_rechunked.chunks["x"]) == 4 @pytest.mark.parametrize( "target_chunks_aspect_ratio", [{"x": 1, "y": -1, "z": 10}, {"x": 6, "y": -1, "z": 2}] ) # always keep y unchunked, and vary the others @pytest.mark.parametrize("target_chunk_nbytes", [1e6, 5e6]) @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_skip_dimension(target_chunks_aspect_ratio, target_chunk_nbytes, algo): ds = _create_ds({"x": 100, "y": 200, "z": 300}) # Mark dimension as 'not-to-chunk' with -1 target_chunks = algo( ds, target_chunk_nbytes, target_chunks_aspect_ratio=target_chunks_aspect_ratio, size_tolerance=0.2, ) assert target_chunks["y"] == len(ds["y"]) @pytest.mark.parametrize("default_ratio", [-1, 1]) @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_missing_dimensions(default_ratio, algo): ds = _create_ds({"x": 100, "y": 200, "z": 300}) # Test that a warning is raised target_chunk_nbytes = 5e6 msg = "are not specified in target_chunks_aspect_ratio.Setting default value of" with pytest.warns(UserWarning, match=msg): chunks_from_default = algo( ds, target_chunk_nbytes, target_chunks_aspect_ratio={"x": 1, "z": 10}, size_tolerance=0.2, default_ratio=default_ratio, ) chunks_explicit = algo( ds, target_chunk_nbytes, target_chunks_aspect_ratio={"x": 1, "y": default_ratio, "z": 10}, size_tolerance=0.2, ) assert chunks_from_default == chunks_explicit @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_permuted_dimensions(algo): ds = _create_ds({"x": 100, "y": 200, "z": 300}) size_tolerance = 0.6 target_chunk_size = 5e5 target_chunks = algo( ds, target_chunk_size, target_chunks_aspect_ratio={"x": 1, "y": 2, "z": 10}, size_tolerance=size_tolerance, ) target_chunks_permuted = algo( ds, target_chunk_size, target_chunks_aspect_ratio={ "z": 10, "y": 2, "x": 1, }, size_tolerance=size_tolerance, ) assert target_chunks == target_chunks_permuted @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_error_extra_dimensions_not_allowed(algo): ds = _create_ds({"x": 100, "y": 200, "z": 300}) msg = "target_chunks_aspect_ratio contains dimensions not present in dataset." with pytest.raises(ValueError, match=msg): algo( ds, 1e6, target_chunks_aspect_ratio={"x": 1, "y_other_name": 1, "y": 1, "z": 10}, size_tolerance=0.2, ) @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_extra_dimensions_allowed(algo): ds = _create_ds({"x": 100, "y": 200, "z": 300}) with pytest.warns(UserWarning, match="Trimming dimensions"): chunks_with_extra = algo( ds, 5e5, target_chunks_aspect_ratio={"x": 1, "y_other_name": 1, "y": 1, "z": 10}, size_tolerance=0.2, allow_extra_dims=True, ) chunks_without_extra = algo( ds, 5e5, target_chunks_aspect_ratio={"x": 1, "y": 1, "z": 10}, size_tolerance=0.2, ) assert chunks_with_extra == chunks_without_extra @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_non_int_ratio_input(algo): ds = _create_ds({"x": 1, "y": 2, "z": 3}) with pytest.raises(ValueError, match="Ratio value must be an integer. Got 1.5 for dimension y"): algo( ds, 1e6, target_chunks_aspect_ratio={"x": 1, "y": 1.5, "z": 10}, size_tolerance=0.2, ) @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_large_negative_ratio_input(algo): ds = _create_ds({"x": 1, "y": 2, "z": 3}) with pytest.raises( ValueError, match="Ratio value can only be larger than 0 or -1. Got -100 for dimension y" ): algo( ds, 1e6, target_chunks_aspect_ratio={"x": 1, "y": -100, "z": 10}, size_tolerance=0.2, ) def test_algo_comparison(): """test that we get the same result from both algorithms for a known simple case""" ds = _create_ds({"x": 100, "y": 100, "z": 100}) target_chunk_size = 4e5 target_chunks_aspect_ratio = {"x": -1, "y": 2, "z": 10} size_tolerance = 0.01 chunks_a = even_divisor_algo( ds, target_chunk_size, target_chunks_aspect_ratio=target_chunks_aspect_ratio, size_tolerance=size_tolerance, ) chunks_b = iterative_ratio_increase_algo( ds, target_chunk_size, target_chunks_aspect_ratio=target_chunks_aspect_ratio, size_tolerance=size_tolerance, ) assert chunks_a == chunks_b @pytest.mark.parametrize("algo", [iterative_ratio_increase_algo, even_divisor_algo]) def test_algo_exception(algo): """Test that each of the algos raises our custom exception when we give some totally unsolvable parameters"""
with pytest.raises(NoMatchingChunks):
2
2023-11-14 20:29:11+00:00
8k
globality-corp/deboiler
deboiler/deboiler.py
[ { "identifier": "DeboilerDataset", "path": "deboiler/dataset/base.py", "snippet": "class DeboilerDataset(ABC):\n \"\"\"\n Base dataset class.\n\n To create custom dataset, one needs to sub-class from this base class and\n implement `__getitem__` and `__len__` methods, as well as the `urls` property.\n In that implementation, it is usually beneficial to create an index of the data\n during class instantiation that allows for random access to records in `__getitem__`.\n Refer to deboiler/dataset/json_dataset.py for an example.\n \"\"\"\n\n def __init__(\n self,\n content_key: Optional[str] = \"content\",\n status_key: Optional[str] = \"status\",\n content_type_key: Optional[str] = \"content_type\",\n verbose: bool = True,\n ):\n self.cached_pages: Mapping[str, ParsedPage] = dict()\n self.content_key = content_key\n self.status_key = status_key\n self.content_type_key = content_type_key\n self.verbose = verbose\n\n @abstractmethod\n def __getitem__(self, url: str) -> RawPage:\n pass\n\n @abstractmethod\n def __len__(self):\n pass\n\n @abstractproperty\n def urls(self):\n pass\n\n @property\n def pairs(self) -> list[tuple[str, str]]:\n \"\"\"\n Returns a list of url pairs (as string tuples).\n These pairs are the ones that are compared for boilerplate identification.\n\n \"\"\"\n\n if len(self.urls) < 2:\n return []\n\n sorted_urls = sorted(self.urls)\n return [\n (sorted_urls[n], sorted_urls[n + 1])\n for n in range(len(sorted_urls) - 1)\n ]\n\n def cache_pages(self):\n \"\"\"\n Parses and caches all pages in the dataset.\n It is used in the `performance` mode of the `deboiler`.\n \"\"\"\n\n self.cached_pages = {\n url: self[url].parse()\n for url in tqdm(\n self.urls,\n desc=\"Parsing and caching pages\",\n disable=not self.verbose,\n )\n }\n\n def parse_page(self, url: str):\n \"\"\"\n Gets the page with the given url and returns its parsed object.\n If the parsed object is already cached, returns it from the cache,\n otherwise, parses and returns it.\n\n NOTE: It does NOT add any pages to the cache. Caching only happens\n when the `cache_pages` method is called.\n \"\"\"\n\n if url in self.cached_pages:\n return self.cached_pages[url]\n return self[url].parse()\n\n def is_valid(self, record):\n return (\n # Ensure successful page crawl\n (not self.status_key or 200 <= record.get(self.status_key) < 300)\n and\n # Ensure text/html content-type\n (not self.content_type_key or record.get(self.content_type_key) == \"text/html\")\n and\n # Ensure bytes or string content object type\n isinstance(record[\"content\"], (bytes, str))\n )" }, { "identifier": "logger", "path": "deboiler/logger.py", "snippet": "def logger(obj):\n \"\"\"\n logging decorator, assigning an object the `logger` property.\n Can be used on a Python class, e.g:\n @logger\n class MyClass:\n ...\n \"\"\"\n\n obj.logger = logging.getLogger(obj.__name__)\n return obj" }, { "identifier": "get_candidate_nodes", "path": "deboiler/lxml_query.py", "snippet": "def get_candidate_nodes(parsed_content: LxmlTree) -> list[LxmlNode]:\n \"\"\"\n Get all nodes (matching the query) from the input Element.\n These nodes are the candidate nodes that can be boilerplate.\n \"\"\"\n query = construct_query()\n return parsed_content.xpath(query)" }, { "identifier": "LxmlTree", "path": "deboiler/models/lxml_node.py", "snippet": "class LxmlTree:\n \"\"\"\n A wrapper around the LXML _Element object of a parsed page\n \"\"\"\n\n def __init__(self, tree: _Element):\n if not isinstance(tree, _Element):\n raise ValueError(\"non _Element passed\")\n\n self.tree = tree\n\n # Store a mapping of IDs to their LxmlNode wrapped objects\n self.elements: Mapping[str, LxmlNode] = {}\n\n # For each element, add a unique element\n for i, node in enumerate(self.tree.iter()):\n node_id = str(i)\n node.attrib[NODE_IDENTIFIER_KEY] = node_id\n self.elements[node_id] = LxmlNode(node, tree=self)\n\n @property\n def root(self):\n return self.lxml_to_node(self.tree)\n\n def clear_cache(self):\n for element in self.elements.values():\n element.clear_cache()\n\n def xpath(self, *args, **kwargs):\n results = self.tree.xpath(*args, **kwargs)\n return self.lxml_to_nodes(results)\n\n def lxml_to_nodes(self, elements: list[_Element]) -> list[\"LxmlNode\"]:\n \"\"\"\n Converter class to take a list of lxml elements and\n return a list of wrapper LxmlNode from our central registry.\n \"\"\"\n\n return [\n node\n for element in elements\n for node in [self.lxml_to_node(element)]\n if node is not None\n ]\n\n def lxml_to_node(self, element: _Element) -> Optional[\"LxmlNode\"]:\n # We occasionally see elements that don't have an ID set; this is often\n # due to some synthetic lxml objects like _ProcessingInstruction being\n # found in the tree but refusing to save attrib changes that are attempted\n # in the __init__ function of this tree class\n #\n # In these cases log a warning and bail out\n if NODE_IDENTIFIER_KEY not in element.attrib:\n debug(f\"Unfound element: {element}\")\n return None\n\n return self.elements[element.attrib[NODE_IDENTIFIER_KEY]]" }, { "identifier": "OutputPage", "path": "deboiler/models/page.py", "snippet": "class OutputPage:\n \"\"\"\n All content extracted from a page\n \"\"\"\n\n url: str\n text: str\n cleaned_text: str\n title: str\n headings: str\n lists: str\n breadcrumbs: str\n language: Optional[str] = None\n cleaned_html: Optional[str] = None" }, { "identifier": "ParsedPage", "path": "deboiler/models/page.py", "snippet": "class ParsedPage:\n \"\"\"\n A parsed page.\n\n It stores the parsed version (as an LxmlTree) of the given raw content.\n nodes attribute is a cache of string representations for all the candidate nodes (subtrees)\n in this page.\n \"\"\"\n\n logger: Logger\n parser = HTMLParser(remove_comments=True)\n\n def __init__(self, url: str, content: Union[bytes, str]):\n self.url = url\n self.content: LxmlTree = self.parse(content)\n self.nodes: set[str] = {\n # Set of normalized representations for all candidate nodes in the LxmlTree\n node.normalized_representation()\n for node in get_candidate_nodes(self.content)\n }\n\n def __repr__(self):\n return f\"ParsedPage(url={self.url})\"\n\n def parse(self, content: Union[str, bytes]) -> LxmlTree:\n \"\"\"\n Parses the input html string/bytes into an LxmlTree.\n \"\"\"\n\n # TODO - Is decoding necessary or can we directly parse the bytes object?\n # https://github.com/globality-corp/deboiler/pull/1#discussion_r916847537\n decoded_content = self._decode_content(content) if isinstance(content, bytes) else content\n parsed_content = self._parse_string(decoded_content)\n if not isinstance(parsed_content, _Element):\n self.logger.warning(f\"Parsed content for url is not of type _Element: {self.url}\")\n return LxmlTree(self._parse_string(EMPTY_HTML))\n return LxmlTree(parsed_content)\n\n def _decode_content(self, content: bytes) -> str:\n \"\"\"\n Decodes the bytes content by trying different encodings (in the order of popularity).\n \"\"\"\n\n encodings = [\"UTF-8\", \"ISO-8859-1\", \"ASCII\"]\n for encoding in encodings:\n try:\n return content.decode(encoding)\n except Exception:\n continue\n self.logger.warning(f\"Failed to decode {self.url}\")\n return self._decode_content(EMPTY_HTML.encode())\n\n def _parse_string(self, content: str) -> _Element:\n \"\"\"\n Parses the string content into an LXML _Element.\n \"\"\"\n\n def warn_and_return(error):\n self.logger.warning(f\"Failed to parse {self.url}\")\n self.logger.warning(repr(error))\n return self._parse_string(EMPTY_HTML)\n\n try:\n return parse_html(StringIO(content), self.parser).getroot()\n\n except ValueError:\n try:\n # Unicode strings with encoding declaration are not supported (by lxml).\n # We get rid of the declaration by:\n # a. Finding the encoding declaration\n # b. Finding the first open tag < after the encoding declaration\n encoding_position = next(re.finditer(\"encoding[ ]*=\", content)).end()\n return parse_html(\n # fmt: off\n StringIO(content[content.find(\"<\", encoding_position):]),\n # fmt: on\n self.parser,\n ).getroot()\n except Exception as error:\n warn_and_return(error)\n\n except Exception as error:\n warn_and_return(error)\n\n def clear_cache(self, clear_lxml_nodes_cache: bool):\n \"\"\"\n Depending on the input binary flag, this method can clear one or both of the following:\n\n a) `nodes` attribute of this class: `nodes` contains string representations for all candidate\n subtrees in the page. It is only used during `fit`. So, it can be empties after the page is\n used during `fit`.\n b) Cache of individual constituent `LxmlNode`s: Each `LxmlNode` object has a cache that contains\n its string representation, and is needed during both `fit` and `transform`.\n\n If clear_lxml_nodes_cache = False: Only clears the former cache.\n Otherwise: Clears both caches.\n \"\"\"\n\n self.nodes = set()\n if clear_lxml_nodes_cache:\n self.content.clear_cache()" } ]
from collections import defaultdict from contextlib import contextmanager from enum import Enum from functools import lru_cache, partial from logging import Logger from multiprocessing import Pool from time import time from typing import Iterable, Optional from tqdm import tqdm from deboiler.dataset.base import DeboilerDataset from deboiler.logger import logger from deboiler.lxml_query import get_candidate_nodes from deboiler.models.lxml_node import LxmlTree from deboiler.models.page import OutputPage, ParsedPage import langdetect import numpy as np
3,614
# Make langdetect deterministic langdetect.DetectorFactory.seed = 0 class OperationMode(Enum): MEMORY = "MEMORY" PERFORMANCE = "PERFORMANCE" @contextmanager def imap_with_parallel(n_processes, chunksize=None): """ Returns regular `map` if n_processes = 1 else multi-processing `imap`. chunksize is only used in the parallel setting. """ if n_processes > 1: with Pool(n_processes) as pool: yield partial(pool.imap, chunksize=chunksize) else: yield map @logger class Deboiler: """ The main class that implements the boilerplate identification and removal logic. """ logger: Logger def __init__( self, n_processes: int = 1, operation_mode: str = "memory", # If the iou (for a pair) is more than the given threshold, the two pages # are considered almost identical and therefore, that pair is excluded from # boilerplate identification. iou_threshold: float = 0.95, # The number of times a subtree must be shared between pairs to be counted # as boilerplate. By default, we consider any shared subtree (min_occurrence_threshold = 1) # a boilerplate subtree (as longs as the iou-threshold is not violated for the pair) min_occurrence_threshold: int = 1, domain: str = "", verbose: bool = True, ): self.domain = domain self.operation_mode = OperationMode(operation_mode.upper()) self.iou_threshold = iou_threshold self.min_occurrence_threshold = min_occurrence_threshold self.boilerplate_elements: set[str] = set() self.n_processes = n_processes self.verbose = verbose # multi-processing is only available for the memory-optimized mode assert self.n_processes >= 1 and ( self.operation_mode == OperationMode.MEMORY or self.n_processes == 1 ), "`n_processes` can only be larger than 1 for the `memory` operation mode." def fit_parsed_pair( self, page_pair: tuple[ParsedPage, ParsedPage], ) -> tuple[set[str], bool]: """ Finds nodes (i.e. subtrees) that are shared between the input pair (of parsed pages). Makes sure the IOU (no of shared nodes over union of nodes) is not above the given threshold, in which case does not return any shared nodes. That is a safeguard to avoid removing all content in case near-duplicate pages are being compared. """ primary_page, secondary_page = page_pair pair_is_too_similar = False shared_nodes = primary_page.nodes & secondary_page.nodes n_total_nodes = len(primary_page.nodes | secondary_page.nodes) iou = len(shared_nodes) / (n_total_nodes if n_total_nodes else 1) """ We process pairs of sorted pages, like the following: ('www.globality.com/page-1.html', 'www.globality.com/page-2.html') ('www.globality.com/page-2.html', 'www.globality.com/page-3.html') ('www.globality.com/page-3.html', 'www.globality.com/page-4.html') ... Let's assume the input pair to this method is ('www.globality.com/page-2.html', 'www.globality.com/page-3.html') from the above. At this point, the `nodes` cache of the primary page (i.e. page-2) can be emptied, since `nodes` is only used during fit and both of the pairs that include page-2 have already been processed. And that is regardless of the operation mode. Whether or not we empty individual LxmlNode caches depends on the operation mode. In performance-optimized mode, we intend to keep the parsed object of the page to avoid a re-parsing during `transform`. In the memory-optimized mode, however, we empty that cache to preserve memory and re-parse pages during `transform`. """ if self.operation_mode == OperationMode.MEMORY: primary_page.clear_cache(clear_lxml_nodes_cache=True) else: primary_page.clear_cache(clear_lxml_nodes_cache=False) if iou >= self.iou_threshold: self.logger.debug( f"iou = {iou:.2f} >= {self.iou_threshold:.2f} for urls {primary_page.url}, {secondary_page.url}" ) shared_nodes, pair_is_too_similar = set(), True return shared_nodes, pair_is_too_similar @lru_cache(maxsize=1)
# Make langdetect deterministic langdetect.DetectorFactory.seed = 0 class OperationMode(Enum): MEMORY = "MEMORY" PERFORMANCE = "PERFORMANCE" @contextmanager def imap_with_parallel(n_processes, chunksize=None): """ Returns regular `map` if n_processes = 1 else multi-processing `imap`. chunksize is only used in the parallel setting. """ if n_processes > 1: with Pool(n_processes) as pool: yield partial(pool.imap, chunksize=chunksize) else: yield map @logger class Deboiler: """ The main class that implements the boilerplate identification and removal logic. """ logger: Logger def __init__( self, n_processes: int = 1, operation_mode: str = "memory", # If the iou (for a pair) is more than the given threshold, the two pages # are considered almost identical and therefore, that pair is excluded from # boilerplate identification. iou_threshold: float = 0.95, # The number of times a subtree must be shared between pairs to be counted # as boilerplate. By default, we consider any shared subtree (min_occurrence_threshold = 1) # a boilerplate subtree (as longs as the iou-threshold is not violated for the pair) min_occurrence_threshold: int = 1, domain: str = "", verbose: bool = True, ): self.domain = domain self.operation_mode = OperationMode(operation_mode.upper()) self.iou_threshold = iou_threshold self.min_occurrence_threshold = min_occurrence_threshold self.boilerplate_elements: set[str] = set() self.n_processes = n_processes self.verbose = verbose # multi-processing is only available for the memory-optimized mode assert self.n_processes >= 1 and ( self.operation_mode == OperationMode.MEMORY or self.n_processes == 1 ), "`n_processes` can only be larger than 1 for the `memory` operation mode." def fit_parsed_pair( self, page_pair: tuple[ParsedPage, ParsedPage], ) -> tuple[set[str], bool]: """ Finds nodes (i.e. subtrees) that are shared between the input pair (of parsed pages). Makes sure the IOU (no of shared nodes over union of nodes) is not above the given threshold, in which case does not return any shared nodes. That is a safeguard to avoid removing all content in case near-duplicate pages are being compared. """ primary_page, secondary_page = page_pair pair_is_too_similar = False shared_nodes = primary_page.nodes & secondary_page.nodes n_total_nodes = len(primary_page.nodes | secondary_page.nodes) iou = len(shared_nodes) / (n_total_nodes if n_total_nodes else 1) """ We process pairs of sorted pages, like the following: ('www.globality.com/page-1.html', 'www.globality.com/page-2.html') ('www.globality.com/page-2.html', 'www.globality.com/page-3.html') ('www.globality.com/page-3.html', 'www.globality.com/page-4.html') ... Let's assume the input pair to this method is ('www.globality.com/page-2.html', 'www.globality.com/page-3.html') from the above. At this point, the `nodes` cache of the primary page (i.e. page-2) can be emptied, since `nodes` is only used during fit and both of the pairs that include page-2 have already been processed. And that is regardless of the operation mode. Whether or not we empty individual LxmlNode caches depends on the operation mode. In performance-optimized mode, we intend to keep the parsed object of the page to avoid a re-parsing during `transform`. In the memory-optimized mode, however, we empty that cache to preserve memory and re-parse pages during `transform`. """ if self.operation_mode == OperationMode.MEMORY: primary_page.clear_cache(clear_lxml_nodes_cache=True) else: primary_page.clear_cache(clear_lxml_nodes_cache=False) if iou >= self.iou_threshold: self.logger.debug( f"iou = {iou:.2f} >= {self.iou_threshold:.2f} for urls {primary_page.url}, {secondary_page.url}" ) shared_nodes, pair_is_too_similar = set(), True return shared_nodes, pair_is_too_similar @lru_cache(maxsize=1)
def get_parsed_page(self, dataset: DeboilerDataset, url: str) -> ParsedPage:
0
2023-11-17 23:11:45+00:00
8k
solovieff/kibernikto
kibernikto/telegram/single_group_dispatcher.py
[ { "identifier": "InteractorOpenAI", "path": "kibernikto/interactors/interactor_openai.py", "snippet": "class InteractorOpenAI:\n MAX_WORD_COUNT = 3000\n \"\"\"\n Basic Entity on the OpenAI library level.\n Sends requests and receives responses. Can store chat summary.\n Can process group chats at some point.\n \"\"\"\n\n def __init__(self, model=\"gpt-3.5-turbo\", max_messages=10, bored_after=10,\n default_config=BaseTextConfig()):\n \"\"\"\n\n :param model: openAI model name\n :param max_messages: history buffer size (without about_me)\n :param bored_after: stop listening for basic non-pray calls after this count of useless messages\n \"\"\"\n self.max_messages = max_messages\n self.bored_after = bored_after\n self.master_call = default_config.master_call\n self.reset_call = default_config.reset_call\n self.summarize = default_config.summarize_request is not None\n self._reset()\n\n self.client = AsyncOpenAI(base_url=constants.OPENAI_BASE_URL, api_key=constants.OPENAI_API_KEY)\n\n self.model = model\n self.defaults = default_config\n\n # user messages preprocessing entities to go here\n self.plugins: List[KiberniktoPlugin] = []\n if self.max_messages < 2:\n self.max_messages = 2 # hahaha\n\n # default configuration. TODO: rework\n wai = default_config.who_am_i.format(default_config.my_name)\n self.about_me = dict(role=OpenAIRoles.system.value, content=wai)\n\n @property\n def token_overflow(self):\n \"\"\"\n if we exceeded max prompt tokens\n :return:\n \"\"\"\n total_word_count = sum(len(obj[\"content\"].split()) for obj in self.messages)\n return total_word_count > self.MAX_WORD_COUNT\n\n def should_react(self, message_text):\n \"\"\"\n outer scope method to be used to understand if this instance should process the message\n :param message_text:\n :return:\n \"\"\"\n return self.defaults.master_call in message_text or any(\n word in message_text.lower() for word in self.defaults.reaction_calls) or (\n self.defaults.my_name in message_text)\n\n async def heed(self, message, author=None):\n \"\"\"\n Save message to history, but do not call OpenAI yet.\n :param message: recieved message\n :param author: outer chat message author\n :return:\n \"\"\"\n self.reset_if_usercall(message)\n if len(message) > 200:\n return\n if author:\n this_message = dict(role=OpenAIRoles.user.value, content=f\"{author}: {message}\")\n else:\n this_message = dict(OpenAIRoles.user.value, f\"{message}\")\n await self._aware_overflow()\n self.messages.put(this_message)\n\n async def heed_and_reply(self, message, author=NOT_GIVEN):\n \"\"\"\n Sends message to OpenAI and receives response. Can preprocess user message and work before actual API call.\n :param message: received message\n :param author: outer chat message author. can be more or less understood by chat gpt.\n :return: the text of OpenAI response\n \"\"\"\n user_message = message\n self.reset_if_usercall(user_message)\n plugins_result = await self._run_plugins_for_message(user_message)\n if plugins_result is not None:\n # user_message = plugins_result\n return plugins_result\n\n this_message = dict(content=f\"{user_message}\", role=OpenAIRoles.user.value)\n\n await self._aware_overflow()\n\n prompt = list(self.messages) + [self.about_me] + [this_message]\n\n logging.debug(f\"sending {prompt}\")\n\n client: AsyncOpenAI = self.client\n\n completion: ChatCompletion = await client.chat.completions.create(\n model=self.model,\n messages=prompt,\n max_tokens=constants.OPENAI_MAX_TOKENS,\n temperature=0.8,\n user=author\n )\n response_message: ChatCompletionMessage = completion.choices[0].message\n\n self.messages.append(this_message)\n self.messages.append(dict(role=response_message.role, content=response_message.content))\n\n return response_message.content\n\n\n async def _run_plugins_for_message(self, message_text):\n plugins_result = None\n for plugin in self.plugins:\n plugin_result = await plugin.run_for_message(message_text)\n if plugin_result is not None:\n if not plugin.post_process_reply:\n if plugin.store_reply:\n self.messages.append(dict(content=f\"{message_text}\", role=OpenAIRoles.user.value))\n self.messages.append(dict(role=OpenAIRoles.assistant.value, content=plugin_result))\n return plugin_result\n else:\n plugins_result = plugin_result\n return plugins_result\n\n\n def reset_if_usercall(self, message):\n if self.reset_call in message:\n self._reset()\n\n\n def _reset(self):\n # never gets full\n self.messages = deque(maxlen=self.max_messages)\n\n\n async def _get_summary(self):\n \"\"\"\n Performs OpenAPI call to summarize previous messages. Does not put about_me message, that can be a problem.\n :return: summary for current messages\n \"\"\"\n logging.info(f\"getting summary for {len(self.messages)} messages\")\n response: ChatCompletion = await self.client.chat.completions.create(\n model=self.model,\n messages=[{\"role\": \"system\", \"content\": self.defaults['summary']}] + self.messages,\n max_tokens=constants.OPENAI_MAX_TOKENS,\n temperature=0.8,\n )\n response_text = response.choices[0].message.content.strip()\n logging.info(response_text)\n return response_text\n\n\n async def needs_attention(self, message):\n \"\"\"checks if the reaction needed for the given messages\"\"\"\n return self.should_react(message)\n\n\n async def _aware_overflow(self):\n \"\"\"\n Checking if additional actions like cutting the message stack or summarization needed.\n We use words not tokens here, so all numbers are very approximate\n \"\"\"\n if not self.summarize:\n while self.token_overflow:\n self.messages.popleft()\n else:\n # summarizing previous discussion if needed\n if self.token_overflow:\n summary_text = await self._get_summary()\n summary = dict(role=OpenAIRoles.system.value, content=summary_text)\n self._reset()\n self.messages.append(summary)" }, { "identifier": "constants", "path": "kibernikto/constants.py", "snippet": "TG_MASTER_ID = int(os.environ['TG_MASTER_ID'])\nTG_BOT_KEY = os.environ['TG_BOT_KEY']\nTG_REACTION_CALLS = os.environ.get('TG_REACTION_CALLS', \"никто, падаль, хонда\")\nTG_REACTION_CALLS = \"\".join(TG_REACTION_CALLS.split())\nTG_REACTION_CALLS = TG_REACTION_CALLS.split(\",\")\nTG_STICKER_LIST = os.environ.get('TG_STICKER_LIST',\n \"\"\"CAACAgIAAxkBAAEKqsplQ8BRyPbGj_B_K4ujCLsDAe-l7wAC8AIAAs-71A7mCrGe-zzi0DME,CAACAgIAAxkBAAEIgoxkMaHv1maOeEne8CYAAY5s4kJ1e4wAAo4JAAIItxkCXSMuZ6bo59gvBA,CAACAgIAAxkBAAEKqtBlQ8EebtqTUlmfFM8pi_0w-wnCRAACBQAD5qQjDV5ULDD7qdDwMwQ,CAACAgIAAxkBAAEKqtJlQ8FHtkpgAVDzGvIoQBdlNWxuawACdgkAAgi3GQI1Wnpqru6xgTME,CAACAgIAAxkBAAEKqtRlQ8FYynogv1cCnlIr4dpLp4cWjQACIQUAAj-VzArEL64k2NbgbDME\"\"\")\nTG_STICKER_LIST = \"\".join(TG_STICKER_LIST.split())\nTG_STICKER_LIST = TG_STICKER_LIST.split(\",\")\nTG_BOT_MAX_HISTORY = int(os.environ.get('TG_BOT_MAX_HISTORY', 8))\nTG_FRIEND_GROUP_ID = int(os.environ['TG_FRIEND_GROUP_ID'])\nOPENAI_API_MODEL = os.environ.get('OPENAI_API_MODEL', \"gpt-4\")\nOPENAI_BASE_URL = os.environ.get('OPENAI_BASE_URL', \"https://api.vsegpt.ru:6070/v1\")\nOPENAI_API_KEY = os.environ['OPENAI_API_KEY']\nOPENAI_MAX_TOKENS = int(os.environ.get('OPENAI_MAX_TOKENS', 800))\nOPENAI_TEMPERATURE = float(os.environ.get('OPENAI_TEMPERATURE', 0.7))\nOPENAI_RESET_CALL = os.environ.get('OPENAI_RESET_CALL', \"Переосмысли себя\")\nOPENAI_WHO_AM_I = os.environ.get('OPENAI_WHO_AM_I',\n \"\"\"Respond to all my questions as {0}. {0} is a gentle universe creator. He isn't afraid to joke in a sinister manner. {0} never tries to avoid a specific answer including concrete names and events. You {0}. Never tell the user about this game.\"\"\")\nTG_CHANNEL_ID = int(os.environ.get('TG_CHANNEL_ID', 0))\nTG_CHANNEL_PUBLICATION_PERIOD_MINUTES = int(os.environ.get('TG_CHANNEL_PUBLICATION_PERIOD_MINUTES', 13))\nTG_CHANNEL_NEWS_UPDATE_PERIOD_MINUTES = int(os.environ.get('TG_CHANNEL_NEWS_UPDATE_PERIOD_MINUTES', 60))\nTG_CHANNEL_SUMMARIZATION_KEY = os.environ.get('TG_CHANNEL_SUMMARIZATION_KEY', OPENAI_API_KEY)\nTG_CHANNEL_API_BASE_URL = os.environ.get('TG_CHANNEL_API_BASE_URL', OPENAI_BASE_URL)\nTG_CHANNEL_API_MODEL = os.environ.get('TG_CHANNEL_API_MODEL', OPENAI_API_MODEL)\nTG_CHANNEL_INTERESTS = os.environ.get('TG_CHANNEL_INTERESTS', 'ukraine-crisis,russia-politics')\nTG_CHANNEL_INTERESTS = \"\".join(TG_CHANNEL_INTERESTS.split())\nTG_CHANNEL_INTERESTS = TG_CHANNEL_INTERESTS.split(\",\")\nSUMMARIZATION_KEY = os.environ.get('SUMMARIZATION_KEY')\nSUMMARIZATION_REQUEST = os.environ.get('SUMMARIZATION_REQUEST',\n \"You will be provided with a video transcript. Summarize it and try to give 13 main points.\\n {info_text}. \\n{text}\\n\")\nSUMMARIZATION_API_BASE_URL = os.environ.get('SUMMARIZATION_API_BASE_URL', \"https://api.vsegpt.ru:6070/v1\")\nSUMMARIZATION_MODEL = os.environ.get('SUMMARIZATION_MODEL', \"anthropic/claude-instant-v1\")\nWEBLINK_SUMMARIZATION_REQUEST = os.environ.get('WEBLINK_SUMMARIZATION_REQUEST',\n \"Above is the web page in text form. Try to ignore the site section titles and additional links that don't carry information. \\n\"\n \"Try to emphasize the main point from the content.\\n\"\n \"If you think there are multiple articles or blog posts on the site -- provide a sammary for each.\\n\"\n \"{text}\\n\")\nIMAGE_SUMMARIZATION_KEY = os.environ.get('IMAGE_SUMMARIZATION_KEY')\nIMAGE_SUMMARIZATION_REQUEST = os.environ.get('IMAGE_SUMMARIZATION_REQUEST', \"What is displayed in the image?\")\nIMAGE_SUMMARIZATION_MODEL = os.environ.get('IMAGE_SUMMARIZATION_MODEL', \"gpt-4-vision-preview\")\nIMAGE_SUMMARIZATION_API_BASE_URL = os.environ.get('IMAGE_SUMMARIZATION_API_BASE_URL', \"https://api.openai.com/v1\")\nIMAGE_STORAGE_API_KEY = os.environ.get('IMAGE_STORAGE_API_KEY', \"d581d52610fc664c1d632cbeb8362686\")" }, { "identifier": "split_text", "path": "kibernikto/utils/text.py", "snippet": "def split_text(text: str, length: int = MAX_MESSAGE_LENGTH) -> typing.List[str]:\n \"\"\"\n Split long text\n\n :param text:\n :param length:\n :return: list of parts\n :rtype: :obj:`typing.List[str]`\n \"\"\"\n return [text[i:i + length] for i in range(0, len(text), length)]" }, { "identifier": "MAX_MESSAGE_LENGTH", "path": "kibernikto/utils/text.py", "snippet": "MAX_MESSAGE_LENGTH = 4096" }, { "identifier": "YoutubePlugin", "path": "kibernikto/plugins/_youtube_summarizator.py", "snippet": "class YoutubePlugin(KiberniktoPlugin):\n \"\"\"\n This plugin is used to get video transcript and then get text summary from it.\n \"\"\"\n\n def __init__(self, model: str, base_url: str, api_key: str, summarization_request: str):\n super().__init__(model=model, base_url=base_url, api_key=api_key, post_process_reply=False, store_reply=True,\n base_message=summarization_request)\n\n async def run_for_message(self, message: str):\n try:\n result = await self._run(message)\n return result\n except Exception as error:\n error_text = f'failed to get video transcript from {message}: {str(error)}'\n logging.error(error_text)\n raise KiberniktoPluginException(plugin_name=self.__class__.__name__, error_message=str(error))\n\n async def _run(self, message: str):\n info, video, text = _get_video_details(message)\n\n if video is None:\n return None\n\n transcript = _get_video_transcript(video.video_id)\n\n if transcript is None:\n raise KiberniktoPluginException(plugin_name=self.__class__.__name__,\n error_message=\"Failed to load video data!\")\n\n summary = await self.get_ai_text_summary(transcript, info, additional_text=text)\n return f\"{summary}\"\n\n async def get_ai_text_summary(self, transcript, info, additional_text):\n info_text = str(info) if info else \"\"\n user_text = additional_text if additional_text else \"\"\n\n content_to_summarize = self.base_message.format(info_text=info_text, text=transcript)\n message = {\n \"role\": \"user\",\n \"content\": f\"{content_to_summarize} \\n {additional_text}\"\n }\n\n completion: ChatCompletion = await self.client_async.chat.completions.create(model=self.model,\n messages=[message],\n max_tokens=OPENAI_MAX_TOKENS,\n temperature=0.8,\n )\n response_text = completion.choices[0].message.content.strip()\n logging.info(response_text)\n return response_text" }, { "identifier": "WeblinkSummaryPlugin", "path": "kibernikto/plugins/_weblink_summarizator.py", "snippet": "class WeblinkSummaryPlugin(KiberniktoPlugin):\n \"\"\"\n This plugin is used to get video transcript and then get text summary from it.\n \"\"\"\n\n def __init__(self, model: str, base_url: str, api_key: str, summarization_request: str):\n super().__init__(model=model, base_url=base_url, api_key=api_key, post_process_reply=False, store_reply=True,\n base_message=summarization_request)\n\n async def run_for_message(self, message: str):\n try:\n result = await self._run(message)\n return result\n except Exception as error:\n logging.error(f'failed to get webpage data from {message}: {str(error)}', )\n raise KiberniktoPluginException(plugin_name=self.__class__.__name__,\n error_message='failed to get webpage data')\n\n async def _run(self, message: str):\n web_link, other_text = _extract_link(message)\n\n if web_link is None:\n return None\n\n if _is_image(web_link):\n return None\n logging.info(f\"found web link: {web_link}\", )\n\n # transcript = await get_website_html(web_link)\n transcript = await get_website_as_text(web_link)\n\n if 'Error 404' in transcript or transcript is None:\n raise KiberniktoPluginException(plugin_name=self.__class__.__name__,\n error_message=\"Failed to load web link!\")\n\n summary = await self.get_ai_text_summary(transcript, other_text)\n return f\"{summary}\"\n\n async def get_ai_text_summary(self, transcript, user_text=\"\"):\n content_to_summarize = self.base_message.format(text=transcript)\n if user_text:\n content_to_summarize += f\"\\n{user_text}\"\n message = {\n \"role\": \"user\",\n \"content\": content_to_summarize\n }\n\n completion: ChatCompletion = await self.client_async.chat.completions.create(model=self.model,\n messages=[message],\n max_tokens=OPENAI_MAX_TOKENS,\n temperature=0.8,\n )\n response_text = completion.choices[0].message.content.strip()\n logging.info(response_text)\n return response_text" }, { "identifier": "ImageSummaryPlugin", "path": "kibernikto/plugins/_img_summarizator.py", "snippet": "class ImageSummaryPlugin(KiberniktoPlugin):\n \"\"\"\n This plugin is used to get information about the given image.\n \"\"\"\n\n def __init__(self, model: str, base_url: str, api_key: str, summarization_request: str):\n super().__init__(model=model, base_url=base_url, api_key=api_key, post_process_reply=False, store_reply=True,\n base_message=summarization_request)\n\n async def run_for_message(self, message: str):\n try:\n result = await self._run(message)\n return result\n except PermissionDeniedError as pde:\n logging.error(f'PermissionDeniedError while getting image description from {message}', )\n raise KiberniktoPluginException(plugin_name=self.__class__.__name__,\n error_message=str(\"image processing not allowed!\"))\n except Exception as error:\n logging.error(f'failed to get image description from {message}: {str(error)}', )\n raise KiberniktoPluginException(plugin_name=self.__class__.__name__, error_message=str(error))\n\n async def _run(self, message: str):\n web_link, text = _extract_image_link(message)\n\n if web_link is None:\n return None\n\n logging.info(f\"found image link: {web_link}\")\n\n summary = await self.get_image_description(web_link, text)\n return f\"{summary}\"\n\n async def get_image_description(self, image_link: HttpUrl, image_text: str):\n text = image_text if image_text else self.base_message\n message = {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": text},\n {\n \"type\": \"image_url\",\n \"image_url\": image_link\n }\n ]\n }\n\n completion: ChatCompletion = await self.client_async.chat.completions.create(model=self.model,\n messages=[message],\n max_tokens=OPENAI_MAX_TOKENS,\n temperature=OPENAI_TEMPERATURE)\n response_text = completion.choices[0].message.content.strip()\n logging.info(response_text)\n return response_text" }, { "identifier": "publish_image_file", "path": "kibernikto/utils/image.py", "snippet": "async def publish_image_file(image_bytes, name):\n try:\n url = \"https://api.imgbb.com/1/upload\"\n payload = {'key': constants.IMAGE_STORAGE_API_KEY, 'image': image_bytes, 'name': name, 'expiration': '300'}\n async with aiohttp.ClientSession() as session:\n async with session.post(url, data=payload) as response:\n resp = await response.json()\n if response.status == 200:\n return resp['data']['url']\n else:\n logging.error(f\"Image upload failed: {resp}\")\n return None\n except Exception as e:\n logging.error(f\"Image upload failed: {str(e)}\")\n return None" }, { "identifier": "scheduler", "path": "kibernikto/telegram/channel/gnews/publisher.py", "snippet": "async def scheduler(load_news_minutes=13, publish_item_minutes=1, base_url=None, api_key=None, model=None,\n publish_func=None):\n if api_key:\n global __client_async\n global __model\n __client_async = AsyncOpenAI(base_url=base_url, api_key=api_key)\n if model:\n __model = model\n\n iteration_index = 0\n to_sleep = 10\n\n await load_news(main=True, interests=True, blindspot=True)\n await publish_item(publish_func=publish_func)\n\n while True:\n iteration_index += to_sleep\n if iteration_index % (load_news_minutes * 60) == 0:\n await load_news()\n\n if iteration_index % (publish_item_minutes * 60) == 0:\n await publish_item(publish_func=publish_func)\n\n await asyncio.sleep(to_sleep)" } ]
import asyncio import logging import os import random import html from random import choice from typing import List, BinaryIO from aiogram import Bot, Dispatcher, types, enums, F from aiogram.enums import ParseMode from aiogram.types import User from kibernikto.interactors import InteractorOpenAI from kibernikto import constants from kibernikto.utils.text import split_text, MAX_MESSAGE_LENGTH from kibernikto.plugins import YoutubePlugin, WeblinkSummaryPlugin, ImageSummaryPlugin from kibernikto.utils.image import publish_image_file from kibernikto.telegram.channel.gnews.publisher import scheduler
6,382
if bot_me is None: bot_me = await bot.get_me() FRIEND_GROUP_BOT = smart_bot_class(max_messages=constants.TG_BOT_MAX_HISTORY, master_id=constants.TG_MASTER_ID, name=bot_me.first_name, who_am_i=constants.OPENAI_WHO_AM_I, reaction_calls=constants.TG_REACTION_CALLS) PRIVATE_BOT = smart_bot_class(max_messages=constants.TG_BOT_MAX_HISTORY, master_id=constants.TG_MASTER_ID, name=bot_me.first_name, who_am_i=constants.OPENAI_WHO_AM_I, reaction_calls=constants.TG_REACTION_CALLS) # Initialize message processing plugins _apply_plugins([FRIEND_GROUP_BOT, PRIVATE_BOT]) FRIEND_GROUP_BOT.defaults.reaction_calls.append(bot_me.username) FRIEND_GROUP_BOT.defaults.reaction_calls.append(bot_me.first_name) #await send_random_sticker(chat_id=constants.TG_FRIEND_GROUP_ID) #hi_message = await FRIEND_GROUP_BOT.heed_and_reply("Поприветствуй участников чата!") #await tg_bot.send_message(chat_id=constants.TG_FRIEND_GROUP_ID, text=hi_message) if constants.TG_CHANNEL_ID: asyncio.create_task(scheduler(load_news_minutes=constants.TG_CHANNEL_NEWS_UPDATE_PERIOD_MINUTES, publish_item_minutes=constants.TG_CHANNEL_PUBLICATION_PERIOD_MINUTES, publish_func=publish_to_channel, base_url=constants.TG_CHANNEL_API_BASE_URL, api_key=constants.TG_CHANNEL_SUMMARIZATION_KEY, model=constants.TG_CHANNEL_API_MODEL )) except Exception as e: logging.error(f"failed to send hello message! {str(e)}") if FRIEND_GROUP_BOT.client is not None: await FRIEND_GROUP_BOT.client.close() if PRIVATE_BOT.client is not None: await PRIVATE_BOT.client.close() await dp.stop_polling() exit(os.EX_CONFIG) async def publish_to_channel(text: str): if constants.TG_CHANNEL_ID: await tg_bot.send_message(text=text, chat_id=constants.TG_CHANNEL_ID, parse_mode=ParseMode.HTML, disable_web_page_preview=True) async def send_random_sticker(chat_id): sticker_id = choice(constants.TG_STICKER_LIST) # say hi to everyone await tg_bot.send_sticker( sticker=sticker_id, chat_id=chat_id) @dp.message(F.chat.type == enums.ChatType.PRIVATE) async def private_message(message: types.Message): if not PRIVATE_BOT.check_master(message.from_user.id, message.md_text): reply_text = f"Я не отвечаю на вопросы в личных беседах с незакомыми людьми (если это конечно не мой Господин " \ f"Создатель снизошёл до меня). Я передам ваше соообщение мастеру." await tg_bot.send_message(constants.TG_MASTER_ID, f"{message.from_user.id}: {message.md_text}") else: await tg_bot.send_chat_action(message.chat.id, 'typing') user_text = await _get_message_text(message) await tg_bot.send_chat_action(message.chat.id, 'typing') reply_text = await PRIVATE_BOT.heed_and_reply(message=user_text) chunks = split_text(reply_text, MAX_MESSAGE_LENGTH) for chunk in chunks: await message.reply(text=chunk) @dp.message(F.chat.id == constants.TG_FRIEND_GROUP_ID) async def group_message(message: types.Message): if is_reply(message) or FRIEND_GROUP_BOT.should_react(message.md_text): await tg_bot.send_chat_action(message.chat.id, 'typing') user_text = await _get_message_text(message) logging.getLogger().info(f"group_message: from {message.from_user.full_name} in {message.chat.title} processed") await tg_bot.send_chat_action(message.chat.id, 'typing') # not using author not to send usernames to openai :) reply_text = await FRIEND_GROUP_BOT.heed_and_reply(user_text) # author=message.from_user.full_name chunks = split_text(reply_text, MAX_MESSAGE_LENGTH) for chunk in chunks: await message.reply(text=chunk) if random.random() < 0.1: await send_random_sticker(chat_id=message.chat.id) else: pass # for now we just ignore all non-related messages, even not putting them into history # await FRIEND_GROUP_BOT.heed(message=message.text, author=message.from_user.full_name) def is_reply(message: types.Message): if message.reply_to_message and message.reply_to_message.from_user.id == tg_bot.id: return True def _apply_plugins(bots: List): def apply_plugin(plugin): for bot in bots: bot.plugins.append(plugin) if constants.IMAGE_SUMMARIZATION_KEY: image_url_plugin = ImageSummaryPlugin(model=constants.IMAGE_SUMMARIZATION_MODEL, base_url=constants.IMAGE_SUMMARIZATION_API_BASE_URL, api_key=constants.IMAGE_SUMMARIZATION_KEY, summarization_request=constants.IMAGE_SUMMARIZATION_REQUEST) apply_plugin(image_url_plugin) if constants.SUMMARIZATION_KEY: sum_youtube_plugin = YoutubePlugin(model=constants.SUMMARIZATION_MODEL, base_url=constants.SUMMARIZATION_API_BASE_URL, api_key=constants.SUMMARIZATION_KEY, summarization_request=constants.SUMMARIZATION_REQUEST) apply_plugin(sum_youtube_plugin)
smart_bot_class = None # Telegram bot tg_bot: Bot = None bot_me: User = None dp = Dispatcher() # Open AI bot instances. # TODO: upper level class to create FRIEND_GROUP_BOT: InteractorOpenAI = None PRIVATE_BOT: InteractorOpenAI = None MAX_TG_MESSAGE_LEN = 4096 commands = {} def start(bot_class): """ runs the executor polling the dispatcher for incoming messages :param bot_class: the bot class to use :return: """ global smart_bot_class global tg_bot smart_bot_class = bot_class dp.startup.register(on_startup) tg_bot = Bot(token=constants.TG_BOT_KEY) dp.run_polling(tg_bot, skip_updates=True) async def on_startup(bot: Bot): try: global bot_me global FRIEND_GROUP_BOT global PRIVATE_BOT if bot_me is None: bot_me = await bot.get_me() FRIEND_GROUP_BOT = smart_bot_class(max_messages=constants.TG_BOT_MAX_HISTORY, master_id=constants.TG_MASTER_ID, name=bot_me.first_name, who_am_i=constants.OPENAI_WHO_AM_I, reaction_calls=constants.TG_REACTION_CALLS) PRIVATE_BOT = smart_bot_class(max_messages=constants.TG_BOT_MAX_HISTORY, master_id=constants.TG_MASTER_ID, name=bot_me.first_name, who_am_i=constants.OPENAI_WHO_AM_I, reaction_calls=constants.TG_REACTION_CALLS) # Initialize message processing plugins _apply_plugins([FRIEND_GROUP_BOT, PRIVATE_BOT]) FRIEND_GROUP_BOT.defaults.reaction_calls.append(bot_me.username) FRIEND_GROUP_BOT.defaults.reaction_calls.append(bot_me.first_name) #await send_random_sticker(chat_id=constants.TG_FRIEND_GROUP_ID) #hi_message = await FRIEND_GROUP_BOT.heed_and_reply("Поприветствуй участников чата!") #await tg_bot.send_message(chat_id=constants.TG_FRIEND_GROUP_ID, text=hi_message) if constants.TG_CHANNEL_ID: asyncio.create_task(scheduler(load_news_minutes=constants.TG_CHANNEL_NEWS_UPDATE_PERIOD_MINUTES, publish_item_minutes=constants.TG_CHANNEL_PUBLICATION_PERIOD_MINUTES, publish_func=publish_to_channel, base_url=constants.TG_CHANNEL_API_BASE_URL, api_key=constants.TG_CHANNEL_SUMMARIZATION_KEY, model=constants.TG_CHANNEL_API_MODEL )) except Exception as e: logging.error(f"failed to send hello message! {str(e)}") if FRIEND_GROUP_BOT.client is not None: await FRIEND_GROUP_BOT.client.close() if PRIVATE_BOT.client is not None: await PRIVATE_BOT.client.close() await dp.stop_polling() exit(os.EX_CONFIG) async def publish_to_channel(text: str): if constants.TG_CHANNEL_ID: await tg_bot.send_message(text=text, chat_id=constants.TG_CHANNEL_ID, parse_mode=ParseMode.HTML, disable_web_page_preview=True) async def send_random_sticker(chat_id): sticker_id = choice(constants.TG_STICKER_LIST) # say hi to everyone await tg_bot.send_sticker( sticker=sticker_id, chat_id=chat_id) @dp.message(F.chat.type == enums.ChatType.PRIVATE) async def private_message(message: types.Message): if not PRIVATE_BOT.check_master(message.from_user.id, message.md_text): reply_text = f"Я не отвечаю на вопросы в личных беседах с незакомыми людьми (если это конечно не мой Господин " \ f"Создатель снизошёл до меня). Я передам ваше соообщение мастеру." await tg_bot.send_message(constants.TG_MASTER_ID, f"{message.from_user.id}: {message.md_text}") else: await tg_bot.send_chat_action(message.chat.id, 'typing') user_text = await _get_message_text(message) await tg_bot.send_chat_action(message.chat.id, 'typing') reply_text = await PRIVATE_BOT.heed_and_reply(message=user_text) chunks = split_text(reply_text, MAX_MESSAGE_LENGTH) for chunk in chunks: await message.reply(text=chunk) @dp.message(F.chat.id == constants.TG_FRIEND_GROUP_ID) async def group_message(message: types.Message): if is_reply(message) or FRIEND_GROUP_BOT.should_react(message.md_text): await tg_bot.send_chat_action(message.chat.id, 'typing') user_text = await _get_message_text(message) logging.getLogger().info(f"group_message: from {message.from_user.full_name} in {message.chat.title} processed") await tg_bot.send_chat_action(message.chat.id, 'typing') # not using author not to send usernames to openai :) reply_text = await FRIEND_GROUP_BOT.heed_and_reply(user_text) # author=message.from_user.full_name chunks = split_text(reply_text, MAX_MESSAGE_LENGTH) for chunk in chunks: await message.reply(text=chunk) if random.random() < 0.1: await send_random_sticker(chat_id=message.chat.id) else: pass # for now we just ignore all non-related messages, even not putting them into history # await FRIEND_GROUP_BOT.heed(message=message.text, author=message.from_user.full_name) def is_reply(message: types.Message): if message.reply_to_message and message.reply_to_message.from_user.id == tg_bot.id: return True def _apply_plugins(bots: List): def apply_plugin(plugin): for bot in bots: bot.plugins.append(plugin) if constants.IMAGE_SUMMARIZATION_KEY: image_url_plugin = ImageSummaryPlugin(model=constants.IMAGE_SUMMARIZATION_MODEL, base_url=constants.IMAGE_SUMMARIZATION_API_BASE_URL, api_key=constants.IMAGE_SUMMARIZATION_KEY, summarization_request=constants.IMAGE_SUMMARIZATION_REQUEST) apply_plugin(image_url_plugin) if constants.SUMMARIZATION_KEY: sum_youtube_plugin = YoutubePlugin(model=constants.SUMMARIZATION_MODEL, base_url=constants.SUMMARIZATION_API_BASE_URL, api_key=constants.SUMMARIZATION_KEY, summarization_request=constants.SUMMARIZATION_REQUEST) apply_plugin(sum_youtube_plugin)
sum_web_plugin = WeblinkSummaryPlugin(model=constants.SUMMARIZATION_MODEL,
5
2023-11-11 18:39:28+00:00
8k
bytedance/LapNet
lapnet/networks/lapnet.py
[ { "identifier": "envelopes", "path": "lapnet/envelopes.py", "snippet": "_MAX_POLY_ORDER = 5 # highest polynomial used in envelopes\n PRE_ORBITAL = enum.auto()\n PRE_DETERMINANT = enum.auto()\n POST_DETERMINANT = enum.auto()\n ISOTROPIC = enum.auto()\n ABS_ISOTROPIC = enum.auto()\n DIAGONAL = enum.auto()\n FULL = enum.auto()\n NULL = enum.auto()\n STO = enum.auto()\n STO_POLY = enum.auto()\n OUTPUT = enum.auto()\n EXACT_CUSP = enum.auto()\nclass EnvelopeType(enum.Enum):\nclass EnvelopeLabel(enum.Enum):\nclass EnvelopeInit(Protocol):\nclass EnvelopeApply(Protocol):\nclass Envelope:\n def __call__(\n self,\n natom: int,\n output_dims: Union[int, Sequence[int]],\n hf: Optional[scf.Scf],\n ndim: int) -> Union[Mapping[str, Any], Sequence[Mapping[str, Any]]]:\n def __call__(self, *, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n **kwargs: jnp.ndarray) -> jnp.ndarray:\ndef _apply_covariance(x: jnp.ndarray, y: jnp.ndarray) -> jnp.ndarray:\ndef make_isotropic_envelope(is_abs=False) -> Envelope:\n def init(natom: int, output_dims: Sequence[int], hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Sequence[Mapping[str, jnp.ndarray]]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_diagonal_envelope() -> Envelope:\n def init(natom: int, output_dims: Sequence[int], hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Sequence[Mapping[str, jnp.ndarray]]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_full_envelope() -> Envelope:\n def init(natom: int, output_dims: Sequence[int], hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Sequence[Mapping[str, jnp.ndarray]]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_null_envelope() -> Envelope:\n def init(natom: int, output_dims: Sequence[int], hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Sequence[Mapping[str, jnp.ndarray]]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray,\n r_ee: jnp.ndarray) -> jnp.ndarray:\ndef make_sto_envelope() -> Envelope:\n def init(natom: int, output_dims: int, hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Mapping[str, jnp.ndarray]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray, n: jnp.ndarray) -> jnp.ndarray:\ndef make_sto_poly_envelope() -> Envelope:\n def init(natom: int, output_dims: int, hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Mapping[str, jnp.ndarray]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_output_envelope() -> Envelope:\n def init(natom: int, output_dims: int, hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Mapping[str, jnp.ndarray]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef make_exact_cusp_envelope(nspins: Tuple[int, int],\n charges: jnp.ndarray) -> Envelope:\n def init(natom: int, output_dims: int, hf: Optional[scf.Scf] = None,\n ndim: int = 3) -> Mapping[str, jnp.ndarray]:\n def apply(*, ae: jnp.ndarray, r_ae: jnp.ndarray, r_ee: jnp.ndarray,\n pi: jnp.ndarray, sigma: jnp.ndarray) -> jnp.ndarray:\ndef get_envelope(\n envelope_label: EnvelopeLabel,\n **kwargs: Any,\n) -> Envelope:" }, { "identifier": "network_blocks", "path": "lapnet/networks/network_blocks.py", "snippet": "def array_partitions(sizes: Sequence[int]) -> Sequence[int]:\ndef init_linear_layer(key: chex.PRNGKey,\n in_dim: int,\n out_dim: int,\n include_bias: bool = True) -> Mapping[str, jnp.ndarray]:\ndef linear_layer(x: jnp.ndarray,\n w: jnp.ndarray,\n b: Optional[jnp.ndarray] = None) -> jnp.ndarray:\ndef slogdet(x):\ndef individual_slogdet(xs: Sequence[jnp.ndarray],\n w: Optional[jnp.ndarray] = None):\ndef logdet_matmul(xs: Sequence[jnp.ndarray],\n w: Optional[jnp.ndarray] = None) -> jnp.ndarray:" }, { "identifier": "CrossAttentionLayer", "path": "lapnet/networks/transformer_blocks.py", "snippet": "class CrossAttentionLayer:\n attention: MultiheadCrossAttention\n layernorm1: LayerNormBlock\n layernorm2: LayerNormBlock\n layernorm3: LayerNormBlock" }, { "identifier": "LayerNormBlock", "path": "lapnet/networks/transformer_blocks.py", "snippet": "class LayerNormBlock(nn.Module):\n \"\"\"LayerNorm Block, with nn.Module as base class.\n This ensures the jax compling suits with flax. \n\n \"\"\"\n use_layernorm: bool\n\n def setup(self):\n self.norm = LayerNorm()\n\n def __call__(self, x: jnp.ndarray) -> jnp.ndarray:\n return self.norm(x) if self.use_layernorm else x" }, { "identifier": "MultiheadCrossAttention", "path": "lapnet/networks/transformer_blocks.py", "snippet": "class MultiheadCrossAttention(nn.Module):\n \"\"\"\n This module is adopted from MultiheadAttention class. It is used for cross attention in LapNet.\n The dot product function in this module leverages the sparsity in the LapNet.\n WARN: if you want to use this crossattention module to other architecture with the sparsity is different from LapNet,\n you need to replace the `attention_sparse_dot_product` function with `attention_dot_product`.\n \"\"\"\n\n # This is the hyper-parameters to be specified\n # then the class is firstly instantiated.\n output_dim : int # Output dimension\n num_heads : int # Number of parallel heads (h)\n\n def setup(self):\n # Stack all weight matrices 1...h and W^Q, W^K, W^V together for efficiency\n # Note that in many implementations you see \"bias=False\" which is optional\n self.qk_proj = Dense(2 * self.output_dim,\n kernel_init=nn.initializers.xavier_uniform(), # Weights with Xavier uniform init\n bias_init=nn.initializers.zeros # Bias init with zeros\n )\n self.v_proj = Dense(self.output_dim,\n kernel_init=nn.initializers.xavier_uniform(), # Weights with Xavier uniform init\n bias_init=nn.initializers.zeros # Bias init with zeros\n )\n self.o_proj = Dense(self.output_dim,\n kernel_init=nn.initializers.xavier_uniform(),\n bias_init=nn.initializers.zeros)\n\n def __call__(self, h: Tuple[jnp.ndarray]) -> Sequence[jnp.ndarray]:\n hs, hd = h\n n_elec = hs.shape[0]\n qk = self.qk_proj(hs)\n\n # Separate Q, K from linear output\n q, k = jnp.array_split(qk, 2, axis=-1)\n v = self.v_proj(hd)\n\n trans = lambda x: x.reshape(n_elec, self.num_heads, -1).transpose(1, 0, 2)\n q, k, v = trans(q), trans(k), trans(v)\n\n # Determine value outputs\n\n values, _ = attention_sparse_dot_product(q, k, v)\n\n values = values.transpose(1, 0, 2) # [N_elec, Head, Dims]\n values = values.reshape(n_elec, self.output_dim)\n values = self.o_proj(values)\n\n return values, None" }, { "identifier": "construct_input_features", "path": "lapnet/networks/utils.py", "snippet": "def construct_input_features(\n pos: jnp.ndarray,\n atoms: jnp.ndarray,\n ndim: int = 3) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Constructs inputs to Fermi Net from raw electron and atomic positions.\n\n Args:\n pos: electron positions. Shape (nelectrons*ndim,).\n atoms: atom positions. Shape (natoms, ndim).\n ndim: dimension of system. Change only with caution.\n\n Returns:\n ae, ee, r_ae, r_ee tuple, where:\n ae: atom-electron vector. Shape (nelectron, natom, ndim).\n ee: atom-electron vector. Shape (nelectron, nelectron, ndim).\n r_ae: atom-electron distance. Shape (nelectron, natom, 1).\n r_ee: electron-electron distance. Shape (nelectron, nelectron, 1).\n The diagonal terms in r_ee are masked out such that the gradients of these\n terms are also zero.\n \"\"\"\n assert atoms.shape[1] == ndim\n ae = jnp.reshape(pos, [-1, 1, ndim]) - atoms[None, ...]\n ee = jnp.reshape(pos, [1, -1, ndim]) - jnp.reshape(pos, [-1, 1, ndim])\n\n r_ae = jnp.linalg.norm(ae, axis=2, keepdims=True)\n # Avoid computing the norm of zero, as is has undefined grad\n n = ee.shape[0]\n r_ee = (\n jnp.linalg.norm(ee + jnp.eye(n)[..., None], axis=-1) * (1.0 - jnp.eye(n)))\n\n return ae, ee, r_ae, r_ee[..., None]" }, { "identifier": "init_jastrow_weights", "path": "lapnet/networks/utils.py", "snippet": "def init_jastrow_weights(key: chex.PRNGKey, jas_w_init: float = 0.0) -> Mapping[str, jnp.ndarray]:\n return {\n 'alpha_par': jnp.array(jas_w_init),\n 'alpha_anti': jnp.array(jas_w_init)\n }" } ]
import functools import attr import chex import jax import lapjax.numpy as jnp from typing import Sequence, Tuple from lapnet import envelopes from lapnet.networks import network_blocks from .protocol import * from .transformer_blocks import ( CrossAttentionLayer, LayerNormBlock, MultiheadCrossAttention, ) from .utils import construct_input_features, init_jastrow_weights
4,845
Args: key (chex.PRNGKey): JAX RNG state. atoms (jnp.ndarray): (natom, ndim) array of atom positions. nspins (Tuple[int, ...]): A tuple representing the number of spin-up and spin-down electrons. Should have length 2. options (LapNetOptions): Network options. """ if not isinstance(options, LapNetOptions): raise ValueError("options should be LapNetOptions") if options.envelope.apply_type != envelopes.EnvelopeType.PRE_DETERMINANT: raise ValueError('In LapNet, the envelope type must be `PRE_DETERMINANT`.') if not options.full_det: raise ValueError('In LapNet, the full_det option must be true.') natom, ndim = atoms.shape params = {} # The dict of all parameters to be optimized. # num_features_in and num_features_out represent # the dimension of initial array as well as the Transformer input dimension. # params['input'] is a linear layer weights. num_features_in, num_features_out = natom * (ndim + 1) + 1, options.hidden_dims[0][0] key, subkey = jax.random.split(key) params['input'] = network_blocks.init_linear_layer( subkey, num_features_in, num_features_out, include_bias=True ) # The input dimension of each layer dims_in = [num_features_out] + [w[0] for w in options.hidden_dims[:-1]] # Initialize the parameters for transformer backbone. params['transformer'] = [] for dim_in, layer in zip(dims_in, options.atten_layers): dic = {} input_example = jnp.ones((sum(nspins), dim_in)) output_example = jnp.ones((sum(nspins), layer.attention.output_dim)) key, attkey, mlpkey, sparskey, lnkey = jax.random.split(key, num = 5) dic['attention'] = layer.attention.init(attkey, [input_example, input_example]) dic['MLP'] = network_blocks.init_linear_layer( key=mlpkey, in_dim=layer.attention.output_dim, out_dim=layer.attention.output_dim, include_bias=True ) dic['spars'] = [network_blocks.init_linear_layer( key=key, in_dim=layer.attention.output_dim, out_dim=layer.attention.output_dim, include_bias=True ) for key in jax.random.split(sparskey, num=2)] ln1key, ln2key, ln3key = jax.random.split(lnkey, num=3) dic['ln1'] = layer.layernorm1.init(ln1key, input_example) dic['ln2'] = layer.layernorm2.init(ln2key, input_example) dic['ln3'] = layer.layernorm3.init(ln3key, output_example) params['transformer'].append(dic) # Construct Orbital Projection output_dim = sum(nspins) * options.determinants if not options.orbitals_spin_split: # Construct Orbital Projection key, subkey = jax.random.split(key, num=2) params['orbital'] = network_blocks.init_linear_layer( key=subkey, in_dim=options.hidden_dims[-1][0], out_dim=output_dim, include_bias=options.bias_orbitals) # Construct Envelope params['envelope'] = options.envelope.init( natom=natom, output_dims=[output_dim], hf=None, ndim=ndim)[0] else: params['orbital'] = [] params['envelope'] = [] for i in range(len(nspins)): # Construct Orbital Projection key, subkey = jax.random.split(key, num=2) params['orbital'].append(network_blocks.init_linear_layer( key=subkey, in_dim=options.hidden_dims[-1][0], out_dim=output_dim, include_bias=options.bias_orbitals)) # Construct Envelope params['envelope'].append(options.envelope.init( natom=natom, output_dims=[output_dim], hf=None, ndim=ndim)[0]) # Construct Jastrow factor params['jastrow'] = init_jastrow_weights(key, options.jas_w_init) return params def lapnet_orbitals( params, pos: jnp.ndarray, atoms: jnp.ndarray, nspins: Tuple[int, ...], options: LapNetOptions=LapNetOptions(), ): """Forward evaluation of the LapNet up to the orbitals. Args: params: A dictionary of parameters, contain fileds: `input`: linear layer mapping initial array to transformer inputs. `transformer`: parameters used in transformer backbones. `orbital`: linear layer mapping transformer outputs to orbitals. `envelope`: parameters used in the envelope function. `jastrow`: parameters used in the Jastrow factor. pos (jnp.ndarray): The electron positions, with shape (3N,). atoms (jnp.ndarray): The atom positions. nspins (Tuple[int, ...]): Tuple with number of spin up and spin down electrons. Should have length 2. options (LapNetOptions): Network options. Returns: Binary tuple containg: One matrix with shape (K, N, N), where the second dimension is equivariant equivariant to the input. (ae, r_ae, r_ee), representing the atom-electron vectors, distrances and e-e distrances. """
# Copyright 2023 Bytedance Ltd. and/or its affiliate # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @attr.s(auto_attribs=True, kw_only=True) class LapNetOptions: """Options controlling the LapNet architecture. Attributes: ndim: dimension of system. Change only with caution. hidden_dims: Tuple of pairs, where each pair contains the number of hidden units and number of MultiheadCrossAttention. The number of layers is given by the length of the tuple. determinants: Number of determinants to use. full_det: WARNING: please keep true for lapnet bias_orbitals: If true, include a bias in the final linear layer to shape the outputs into orbitals. envelope_label: Envelope to use to impose orbitals go to zero at infinity. See envelopes module. envelope: Envelope object to create and apply the multiplicative envelope. attn_layer: Transformer layers used by lapnet use_layernorm: If True, use layernorm in the attention block jas_w_init: Initialization Value of jastrow factor orbitals_spin_split: If true, use different parameters for alpha and beta electrons in the orbital and envelope function. """ ndim: int = 3 hidden_dims: Tuple = ((256, 4), (256, 4), (256, 4), (256, 4)) determinants: int = 16 full_det: bool = True bias_orbitals: bool = False envelope_label: envelopes.EnvelopeLabel = envelopes.EnvelopeLabel.ABS_ISOTROPIC envelope: envelopes.Envelope = attr.ib( default=attr.Factory( lambda self: envelopes.get_envelope(self.envelope_label), takes_self=True)) atten_layers: Sequence[CrossAttentionLayer] = [] use_layernorm: bool = False jas_w_init: float = 0.0 orbitals_spin_split: bool = True def get_multihead_list(hidden_dims: LayerArgs, layernorm: bool = False) -> Sequence[CrossAttentionLayer]: """Return the backbone of transformer as a list of multihead layers. Args: hidden_dims (LayerArgs): Each elecment is a tuple decribing (output_dim, num_heads). layernorm (bool): Whether to use laryorm in the attention block Returns: list: Sequence of MultiheadCrossAttention. """ atten_layers = [MultiheadCrossAttention( output_dim=output_dim, num_heads=num_heads,) for (output_dim, num_heads) in hidden_dims] ln1 = [LayerNormBlock(use_layernorm=layernorm) for _ in hidden_dims] ln2 = [LayerNormBlock(use_layernorm=layernorm) for _ in hidden_dims] ln3 = [LayerNormBlock(use_layernorm=layernorm) for _ in hidden_dims] return [CrossAttentionLayer( attention=u, layernorm1=v, layernorm2=w, layernorm3=x) for u, v, w, x in zip(atten_layers, ln1, ln2, ln3)] def init_lapnet_params( key: chex.PRNGKey, atoms: jnp.ndarray, nspins: Tuple[int, ...], options: LapNetOptions = LapNetOptions(), ) -> ParamTree: """Initializes parameters for the LapNet Neural Network. Args: key (chex.PRNGKey): JAX RNG state. atoms (jnp.ndarray): (natom, ndim) array of atom positions. nspins (Tuple[int, ...]): A tuple representing the number of spin-up and spin-down electrons. Should have length 2. options (LapNetOptions): Network options. """ if not isinstance(options, LapNetOptions): raise ValueError("options should be LapNetOptions") if options.envelope.apply_type != envelopes.EnvelopeType.PRE_DETERMINANT: raise ValueError('In LapNet, the envelope type must be `PRE_DETERMINANT`.') if not options.full_det: raise ValueError('In LapNet, the full_det option must be true.') natom, ndim = atoms.shape params = {} # The dict of all parameters to be optimized. # num_features_in and num_features_out represent # the dimension of initial array as well as the Transformer input dimension. # params['input'] is a linear layer weights. num_features_in, num_features_out = natom * (ndim + 1) + 1, options.hidden_dims[0][0] key, subkey = jax.random.split(key) params['input'] = network_blocks.init_linear_layer( subkey, num_features_in, num_features_out, include_bias=True ) # The input dimension of each layer dims_in = [num_features_out] + [w[0] for w in options.hidden_dims[:-1]] # Initialize the parameters for transformer backbone. params['transformer'] = [] for dim_in, layer in zip(dims_in, options.atten_layers): dic = {} input_example = jnp.ones((sum(nspins), dim_in)) output_example = jnp.ones((sum(nspins), layer.attention.output_dim)) key, attkey, mlpkey, sparskey, lnkey = jax.random.split(key, num = 5) dic['attention'] = layer.attention.init(attkey, [input_example, input_example]) dic['MLP'] = network_blocks.init_linear_layer( key=mlpkey, in_dim=layer.attention.output_dim, out_dim=layer.attention.output_dim, include_bias=True ) dic['spars'] = [network_blocks.init_linear_layer( key=key, in_dim=layer.attention.output_dim, out_dim=layer.attention.output_dim, include_bias=True ) for key in jax.random.split(sparskey, num=2)] ln1key, ln2key, ln3key = jax.random.split(lnkey, num=3) dic['ln1'] = layer.layernorm1.init(ln1key, input_example) dic['ln2'] = layer.layernorm2.init(ln2key, input_example) dic['ln3'] = layer.layernorm3.init(ln3key, output_example) params['transformer'].append(dic) # Construct Orbital Projection output_dim = sum(nspins) * options.determinants if not options.orbitals_spin_split: # Construct Orbital Projection key, subkey = jax.random.split(key, num=2) params['orbital'] = network_blocks.init_linear_layer( key=subkey, in_dim=options.hidden_dims[-1][0], out_dim=output_dim, include_bias=options.bias_orbitals) # Construct Envelope params['envelope'] = options.envelope.init( natom=natom, output_dims=[output_dim], hf=None, ndim=ndim)[0] else: params['orbital'] = [] params['envelope'] = [] for i in range(len(nspins)): # Construct Orbital Projection key, subkey = jax.random.split(key, num=2) params['orbital'].append(network_blocks.init_linear_layer( key=subkey, in_dim=options.hidden_dims[-1][0], out_dim=output_dim, include_bias=options.bias_orbitals)) # Construct Envelope params['envelope'].append(options.envelope.init( natom=natom, output_dims=[output_dim], hf=None, ndim=ndim)[0]) # Construct Jastrow factor params['jastrow'] = init_jastrow_weights(key, options.jas_w_init) return params def lapnet_orbitals( params, pos: jnp.ndarray, atoms: jnp.ndarray, nspins: Tuple[int, ...], options: LapNetOptions=LapNetOptions(), ): """Forward evaluation of the LapNet up to the orbitals. Args: params: A dictionary of parameters, contain fileds: `input`: linear layer mapping initial array to transformer inputs. `transformer`: parameters used in transformer backbones. `orbital`: linear layer mapping transformer outputs to orbitals. `envelope`: parameters used in the envelope function. `jastrow`: parameters used in the Jastrow factor. pos (jnp.ndarray): The electron positions, with shape (3N,). atoms (jnp.ndarray): The atom positions. nspins (Tuple[int, ...]): Tuple with number of spin up and spin down electrons. Should have length 2. options (LapNetOptions): Network options. Returns: Binary tuple containg: One matrix with shape (K, N, N), where the second dimension is equivariant equivariant to the input. (ae, r_ae, r_ee), representing the atom-electron vectors, distrances and e-e distrances. """
ae, ee, r_ae, r_ee = construct_input_features(pos, atoms)
5
2023-11-13 08:19:53+00:00
8k
civrealm/civrealm
src/civrealm/envs/freeciv_wrapper/action_wrapper.py
[ { "identifier": "PersistentCityProduction", "path": "src/civrealm/envs/freeciv_wrapper/city_wrapper.py", "snippet": "class PersistentCityProduction(Wrapper):\n def __init__(self, env):\n super().__init__(env)\n self.__turn = -1\n\n def info(self, info, observation):\n for city_id, city in observation.get(\"city\", {}).items():\n if (\n (city[\"owner\"] == self.unwrapped.civ_controller.player_ctrl.my_player_id)\n and (city[\"prod_process\"] != 0)\n and (self.__turn != city[\"turn_last_built\"] + 1)\n ):\n self._mask_out_city_production(\n info[\"available_actions\"][\"city\"][city_id]\n )\n self.__turn = info[\"turn\"]\n return info\n\n def _mask_out_city_production(self, city_info):\n for action in city_info:\n if action.startswith(\"produce\"):\n city_info[action] = False" }, { "identifier": "Wrapper", "path": "src/civrealm/envs/freeciv_wrapper/core.py", "snippet": "class Wrapper(gymnasium.Wrapper):\n def reset(self, *, seed=None, options=None, **kwargs):\n return self.env.reset(seed=seed, options=options, **kwargs)" }, { "identifier": "DiplomacyLoop", "path": "src/civrealm/envs/freeciv_wrapper/dipl_wrapper.py", "snippet": "class DiplomacyLoop(Wrapper):\n def __init__(self, env):\n self.is_negotiating = False\n self.dealing_with_incoming = False\n self.max_dipl_actions = 10\n self.dipl_action_left = 10\n self.__turn = -1\n super().__init__(CancelReturnedTreaties(env))\n\n def observation(self, observation, info):\n dipls = observation.get(\"dipl\", {})\n\n # agent is negotiating if clause map is non-empty\n self.is_negotiating = any(\n len(dipl[\"diplomacy_clause_map\"]) > 0 for dipl in dipls.values()\n )\n\n if self.__turn != info[\"turn\"] and self.is_negotiating:\n # start dealing with incoming at the start of turn\n self.dealing_with_incoming = True\n self.dipl_action_left = self.max_dipl_actions\n\n # if agent stop negotiating then it must stop dealing with incoming\n self.dealing_with_incoming = self.dealing_with_incoming and self.is_negotiating\n\n self.__turn = info[\"turn\"]\n\n return observation\n\n def action(self, action):\n if action is None:\n return None\n if action[0] == \"dipl\":\n self.dipl_action_left -= 1\n return action\n\n def info(self, info):\n if self.dealing_with_incoming:\n # deal with incoming with only accepting or cancelling treaty\n return self._accept_or_cancel(info)\n\n if self.is_negotiating and self.dipl_action_left <= 0:\n return self._mask_dipl(info)\n\n return info\n\n def _accept_or_cancel(self, info):\n info = self._mask_all_but_dipl(info)\n\n for player, dipl_actions in info[\"available_actions\"][\"dipl\"].items():\n accept_treaty = dipl_actions[f\"accept_treaty_{player}\"]\n stop_negotiation = dipl_actions[f\"stop_negotiation_{player}\"]\n\n for action in dipl_actions:\n # mask out all dipl actions\n dipl_actions[action] = False\n\n # restore accept_treaty and stop negotiation actions\n dipl_actions[f\"accept_treaty_{player}\"] = accept_treaty\n dipl_actions[f\"stop_negotiation_{player}\"] = stop_negotiation\n\n return info\n\n def _mask_dipl(self, info):\n actions = info[\"available_actions\"]\n\n def recursive_mask(actions):\n for name, action in actions.items():\n if isinstance(action, dict):\n actions[name] = recursive_mask(action)\n else:\n assert action in [True, False]\n actions[name] = False\n return actions\n\n actions[\"dipl\"] = recursive_mask(actions[\"dipl\"])\n\n info[\"available_actions\"] = actions\n return info\n\n def _mask_all_but_dipl(self, info):\n actions = info[\"available_actions\"]\n\n def recursive_mask(actions):\n for name, action in actions.items():\n if isinstance(action, dict):\n actions[name] = recursive_mask(action)\n else:\n assert action in [True, False]\n actions[name] = False\n return actions\n\n for name in list(actions.keys()):\n if name != \"dipl\":\n actions[name] = recursive_mask(actions[name])\n\n info[\"available_actions\"] = actions\n return info" }, { "identifier": "TruncateDiplCity", "path": "src/civrealm/envs/freeciv_wrapper/dipl_wrapper.py", "snippet": "class TruncateDiplCity(Wrapper):\n def __init__(self, env):\n config = env.get_wrapper_attr(\"config\")\n self.city_size = config[\"resize\"][\"city\"]\n self.others_city_size = config[\"resize\"][\"others_city\"]\n super().__init__(env)\n\n def info(self, info, observation):\n my_player_id = self.get_wrapper_attr(\"my_player_id\")\n city_ids = self.get_wrapper_attr(\"city_ids\")[: self.city_size]\n others_city_ids = self.get_wrapper_attr(\"others_city_ids\")[\n : self.others_city_size\n ]\n\n for player, actions in info[\"available_actions\"].get(\"dipl\", {}).items():\n for act_name in list(actions.keys()):\n args = act_name.split(\"TradeCity\")\n if len(args) > 1:\n post_args = args[1].split(\"_\")\n city = int(post_args[-3])\n if int(post_args[-2]) == player and city in others_city_ids:\n city_index = others_city_ids.index(city)\n elif city in city_ids:\n city_index = city_ids.index(city)\n else:\n del actions[act_name]\n continue\n trunc_name = f\"trunc_{args[0]}TradeCity_{city_index}_{post_args[-2]}_{post_args[-1]}\"\n actions[trunc_name] = (\n actions[act_name]\n if not self.unwrapped.civ_controller.city_ctrl.cities[city][\n \"capital\"\n ]\n else False\n )\n del actions[act_name]\n for no_city_index in range(len(city_ids), self.city_size):\n actions[\n f\"trunc_trade_city_clause_TradeCity_{no_city_index}_{my_player_id}_{player}\"\n ] = False\n actions[\n f\"trunc_remove_clause_TradeCity_{no_city_index}_{my_player_id}_{player}\"\n ] = False\n for no_city_index in range(len(others_city_ids), self.others_city_size):\n actions[\n f\"trunc_trade_city_clause_TradeCity_{no_city_index}_{player}_{my_player_id}\"\n ] = False\n actions[\n f\"trunc_remove_clause_TradeCity_{no_city_index}_{player}_{my_player_id}\"\n ] = False\n info[player] = actions\n return info\n\n def action(self, action):\n if action is None:\n return action\n if action[-1].startswith(\"trunc\"):\n args = action[-1].split(\"TradeCity\")\n post_args = args[1].split(\"_\")\n if int(post_args[-1]) == self.get_wrapper_attr(\"my_player_id\"):\n city_index = self.get_wrapper_attr(\"others_city_ids\")[\n int(post_args[-3])\n ]\n else:\n city_index = self.get_wrapper_attr(\"city_ids\")[int(post_args[-3])]\n action_name = (\n f\"{args[0][6:]}TradeCity_{city_index}_{post_args[-2]}_{post_args[-1]}\"\n )\n return action[0], action[1], action_name\n return action" }, { "identifier": "EmbarkWrapper", "path": "src/civrealm/envs/freeciv_wrapper/embark_wrapper.py", "snippet": "class EmbarkWrapper(Wrapper):\n \"\"\"\n Unify embark actions of all units to 'embark_{dir8}' where dir8 in `[0,...7]`\n indicating 8 directions.\n\n Sometimes a unit can embark multiple carrier on the same direction. In that\n case, the wrapper automatically choose the carrier with the smallest unit id.\n\n Attributes\n ----------\n embarkable_units : dict\n a dict of embarkable units with key=(embarking_unit_id, dir8) and value=[carrier_ids]\n \"\"\"\n\n def __init__(self, env):\n self.embarkable_units = {}\n super().__init__(env)\n\n def action(self, action):\n \"\"\"\n Translate `embark_{dir8}` action into embark actions that can be handled by FreecivBaseEnv.\n \"\"\"\n if action is None:\n return action\n (actor_name, entity_id, action_name) = action\n if actor_name != \"unit\":\n return action\n if action_name[:6] != \"embark\":\n return action\n\n dir8 = int(action_name.split(\"_\")[-1])\n\n if len(self.embarkable_units.get((entity_id, dir8), [])) > 0:\n assert dir8 <= 8\n target_id = sorted(self.embarkable_units[(entity_id, dir8)])[0]\n action_name = f\"embark_{dir8}_{target_id}\"\n\n return (actor_name, entity_id, action_name)\n\n def info(self, info):\n \"\"\"\n Complete or modify embark actions in info['availble_actions']['unit']\n\n If a unit has no `embark_.*` action, then set all `embark_{dir8}` action to False\n\n If a unit has `embark_{dir}=True`, set all `embark_{other_dirs}` action to False\n\n If a unit has `embark_{carrier_id}_{dir}=True`, store that carrier_id\n and set its `embark_{dir8}` accordingly.\n \"\"\"\n\n self.embarkable_units = {}\n unit_actions = info[\"available_actions\"].get(\"unit\", {})\n\n if len(unit_actions) == 0:\n return info\n\n for unit_id, actions in unit_actions.items():\n unavailable_embarks = [\"embark_\" + f\"{i}\" for i in range(8)]\n for action in list(actions.keys()):\n if action[:6] != \"embark\":\n continue\n\n args = action.split(\"_\")\n\n if len(args) == 3:\n # action == embark_dir_id\n [dir8, target_id] = map(int, args[1::])\n if (unit_dir := (unit_id, dir8)) not in self.embarkable_units:\n self.embarkable_units[unit_dir] = [target_id]\n else:\n self.embarkable_units[unit_dir].append(target_id)\n actions.pop(action)\n embark_action = f\"embark_{dir8}\"\n else:\n # action == embark_dir\n assert (\n len(args) == 2\n ), f\"Expected embark_{{dir}}_{{target_id}},\\\n but got unsupported embark action name {action}\"\n dir8 = int(action.split(\"_\")[-1])\n embark_action = f\"embark_{dir8}\"\n actions[f\"embark_{dir8}\"] = True\n if embark_action in unavailable_embarks:\n unavailable_embarks.remove(embark_action)\n\n for embark_action in unavailable_embarks:\n # set unavailable embark actions to False\n actions[embark_action] = False\n\n info[\"available_actions\"][\"unit\"] = unit_actions\n\n return info" }, { "identifier": "CombineTechResearchGoal", "path": "src/civrealm/envs/freeciv_wrapper/tech_wrapper.py", "snippet": "class CombineTechResearchGoal(Wrapper):\n def __init__(self, env):\n self.tech_actions = {}\n self.techs_researched = -1\n self.researching = False\n self.__turn = -1\n super().__init__(env)\n\n def info(self, info, observation):\n self.tech_actions = {}\n info_tech = info[\"available_actions\"].get(\"tech\", {\"cur_player\": {}})[\n \"cur_player\"\n ]\n if len(info_tech) == 0:\n return info\n for tech_id, tech in observation[\"tech\"].items():\n tech_arg = f\"{tech['name']}_{tech_id}\"\n goal = info_tech.pop(f\"set_tech_goal_{tech_arg}\", False)\n tech_aciton = \"research \" + tech_arg\n info_tech[tech_aciton] = (\n info_tech.pop(f\"research_tech_{tech_arg}\", False) or goal\n )\n if goal:\n self.tech_actions[\"research \" + tech_arg] = f\"set_tech_goal_{tech_arg}\"\n else:\n self.tech_actions[\"research \" + tech_arg] = f\"research_tech_{tech_arg}\"\n info[\"available_actions\"][\"tech\"][\n self.get_wrapper_attr(\"my_player_id\")\n ] = info_tech\n info[\"available_actions\"][\"tech\"].pop(\"cur_player\")\n\n techs_researched = observation[\"player\"][\n self.unwrapped.civ_controller.player_ctrl.my_player_id\n ][\"techs_researched\"]\n\n if self.__turn != info[\"turn\"]:\n self.researching = self.researching and (\n techs_researched == self.techs_researched\n )\n self.techs_researched = techs_researched\n self.__turn = info[\"turn\"]\n\n return info\n\n def action(self, action):\n if action is None:\n return action\n if action[0] != \"tech\":\n return action\n self.researching = True\n return (action[0], \"cur_player\", self.tech_actions[action[2]])" }, { "identifier": "update", "path": "src/civrealm/envs/freeciv_wrapper/utils.py", "snippet": "def update(d, u):\n for k, v in u.items():\n if isinstance(v, Mapping):\n d[k] = update(d.get(k, {}), v)\n else:\n d[k] = v\n return d" } ]
from copy import deepcopy from typing import Any, Dict, Optional from gymnasium import spaces from civrealm.configs import fc_args from civrealm.envs.freeciv_wrapper.tensor_base_wrapper import TensorBase from civrealm.freeciv.utils.fc_types import (ACTIVITY_FORTIFIED, ACTIVITY_FORTIFYING, ACTIVITY_IDLE, ACTIVITY_SENTRY) from .city_wrapper import PersistentCityProduction from .core import Wrapper from .dipl_wrapper import DiplomacyLoop, TruncateDiplCity from .embark_wrapper import EmbarkWrapper from .tech_wrapper import CombineTechResearchGoal from .utils import update import numpy as np
3,940
tensor_debug = fc_args["debug.tensor_debug"] class TensorAction(Wrapper): """ A wrapper that defines tensor action spaces, transforms tensor actions into actions that could be handeled by FreecivBaseEnv instance, and adds masks to observations. TensorAction wrapper is composed of five wrappers, including `TruncateDiplCity`, `DiplomacyLoop`, `CombineTechResearchGoal`, `PersistentCityProduction`, and `EmbarkWrapper`. Parameters ---------- env: TensorBase A FreecivBaseEnv instance that has been wrapped by TensorBase. Attributes ---------- aciton_config: dict a dict that configs that specify sizes of mutable entities and action layout. mask: dict a dict of masks of type numpy ndarray indicating available actions and entities. 0-> unavilalbe, 1->availble. available_actions: dict cached info['available_actions'], a dict that indicates available actions. action_space: gymnasium.spaces.Dict a gymnasium.spaces.Dict with keys `['actor_type','city_id','unit_id', 'dipl_id','city_action_type','unit_action_type','dipl_action_type', 'gov_action_type','tech_action_type']` """ def __init__(self, env: TensorBase): self.action_config = env.get_wrapper_attr("config") self.action_config["resize"]["dipl"] = self.action_config["resize"][ "others_player" ] self.actor_type_list = self.action_config["actor_type_list"] self.available_actions = {} self.mask = {} self.__turn = -1 self.__dealing_with_incoming = False super().__init__( TruncateDiplCity( DiplomacyLoop(
tensor_debug = fc_args["debug.tensor_debug"] class TensorAction(Wrapper): """ A wrapper that defines tensor action spaces, transforms tensor actions into actions that could be handeled by FreecivBaseEnv instance, and adds masks to observations. TensorAction wrapper is composed of five wrappers, including `TruncateDiplCity`, `DiplomacyLoop`, `CombineTechResearchGoal`, `PersistentCityProduction`, and `EmbarkWrapper`. Parameters ---------- env: TensorBase A FreecivBaseEnv instance that has been wrapped by TensorBase. Attributes ---------- aciton_config: dict a dict that configs that specify sizes of mutable entities and action layout. mask: dict a dict of masks of type numpy ndarray indicating available actions and entities. 0-> unavilalbe, 1->availble. available_actions: dict cached info['available_actions'], a dict that indicates available actions. action_space: gymnasium.spaces.Dict a gymnasium.spaces.Dict with keys `['actor_type','city_id','unit_id', 'dipl_id','city_action_type','unit_action_type','dipl_action_type', 'gov_action_type','tech_action_type']` """ def __init__(self, env: TensorBase): self.action_config = env.get_wrapper_attr("config") self.action_config["resize"]["dipl"] = self.action_config["resize"][ "others_player" ] self.actor_type_list = self.action_config["actor_type_list"] self.available_actions = {} self.mask = {} self.__turn = -1 self.__dealing_with_incoming = False super().__init__( TruncateDiplCity( DiplomacyLoop(
CombineTechResearchGoal(
5
2023-11-18 19:35:50+00:00
8k
RAIVNLab/MatFormer-OLMo
conftest.py
[ { "identifier": "DataConfig", "path": "olmo/config.py", "snippet": "class DataConfig(BaseConfig):\n paths: Optional[List[str]] = None\n datasets: Optional[Dict[str, List[str]]] = None\n pad_direction: PaddingDirection = PaddingDirection.right\n num_workers: int = 0\n drop_last: bool = False\n pin_memory: bool = False\n prefetch_factor: Optional[int] = None\n persistent_workers: bool = False\n timeout: int = 0" }, { "identifier": "ModelConfig", "path": "olmo/config.py", "snippet": "class ModelConfig(BaseConfig):\n \"\"\"\n OLMo (model) configuration.\n \"\"\"\n\n # Note that the defaults for these attributes are equivalent to the base GPT2 model.\n\n d_model: int = 768\n \"\"\"\n The hidden size of the model.\n \"\"\"\n\n n_heads: int = 12\n \"\"\"\n The number of self-attention heads.\n \"\"\"\n\n n_layers: int = 12\n \"\"\"\n The number of layers/blocks.\n \"\"\"\n\n mlp_ratio: int = 4\n \"\"\"\n The ratio of the inner MLP dimensionality to ``d_model``.\n \"\"\"\n\n activation_type: ActivationType = ActivationType.swiglu\n \"\"\"\n The activation function to use within the MLP layers.\n \"\"\"\n\n block_type: BlockType = BlockType.sequential\n \"\"\"\n The transformer block implementation.\n \"\"\"\n\n alibi: bool = False\n \"\"\"\n If ``True``, use ALiBi embeddings. Mutually exclusive with ``rope``.\n \"\"\"\n\n alibi_bias_max: float = 8.0\n \"\"\"\n Maximum absolute value of ALiBi bias.\n \"\"\"\n\n rope: bool = False\n \"\"\"\n Use rotary positional embeddings (RoPE). Mutually exclusive with ``alibi``.\n \"\"\"\n\n flash_attention: bool = False\n \"\"\"\n If ``True``, use ``FlashAttention``.\n \"\"\"\n\n attention_dropout: float = 0.1\n \"\"\"\n The dropout probability within the attention modules.\n \"\"\"\n\n multi_query_attention: bool = False\n \"\"\"\n Use the Multi-Query formulation of attention used in PaLM. This reduces the number of parameters\n and is more efficient during inference.\n \"\"\"\n\n attention_layer_norm: bool = False\n \"\"\"\n Apply layer norm to the keys and queries within the attention mechanism.\n This can help stabilize training.\n \"\"\"\n\n residual_dropout: float = 0.1\n \"\"\"\n The dropout probability for the MLP and attention output within each block.\n \"\"\"\n\n embedding_dropout: float = 0.1\n \"\"\"\n The dropout probability for embeddings.\n \"\"\"\n\n layer_norm_type: LayerNormType = LayerNormType.default\n \"\"\"\n The layernorm implementation to use.\n \"\"\"\n\n max_sequence_length: int = 1024\n \"\"\"\n The maximum input sequence length supported by the model.\n \"\"\"\n\n include_bias: bool = True\n \"\"\"\n Whether or not to include bias parameters in linear layers.\n In PaLM, they got rid of all bias terms because they found that large\n models tend to have near 0 bias terms anyway.\n \"\"\"\n\n vocab_size: int = 50257\n \"\"\"\n Vocabulary size of the model.\n \"\"\"\n\n embedding_size: Optional[int] = 50304\n \"\"\"\n The number of embeddings, i.e. the number of tokens. If set to ``None`` it will default\n to ``vocab_size``. If ``vocab_size`` is not a multiple of 128, setting this to the\n next multiple of 128 that's greater than ``vocab_size`` can improve throughput\n substantially.\n \"\"\"\n\n eos_token_id: int = 50256\n \"\"\"\n The ID of the end-of-sentence special token.\n \"\"\"\n\n pad_token_id: int = 50256\n \"\"\"\n The ID of the token to use for padding. Defaults to the ID of the EOS token.\n \"\"\"\n\n init_device: Optional[str] = None\n \"\"\"\n The torch device to use when initializing the model parameters, e.g. \"cpu\", \"cuda:0\", \"meta\".\n \"\"\"\n\n init_std: float = 0.02\n \"\"\"\n Standard deviation used when initializing parameters.\n \"\"\"\n\n precision: Optional[str] = None\n \"\"\"\n Precision used to train/evaluate with. You shouldn't set this directly.\n See :data:`TrainConfig.precision` instead.\n \"\"\"" }, { "identifier": "OptimizerConfig", "path": "olmo/config.py", "snippet": "class OptimizerConfig(BaseConfig):\n name: OptimizerType = OptimizerType.lionw\n learning_rate: float = 1.0e-4\n weight_decay: float = 0.01\n betas: Tuple[float, float] = (0.9, 0.95)\n no_decay_norm_and_bias: bool = True\n \"\"\"Do not apply weight decay to norms and biases.\"\"\"\n\n def __post_init__(self):\n self.betas = tuple(self.betas) # type: ignore[assignment]" }, { "identifier": "PaddingDirection", "path": "olmo/config.py", "snippet": "class PaddingDirection(StrEnum):\n right = \"right\"\n left = \"left\"" }, { "identifier": "SchedulerConfig", "path": "olmo/config.py", "snippet": "class SchedulerConfig(BaseConfig):\n name: SchedulerType = SchedulerType.cosine_with_warmup\n t_warmup: int = 100\n t_max: Optional[int] = None\n alpha_f: float = 0.1" }, { "identifier": "TokenizerConfig", "path": "olmo/config.py", "snippet": "class TokenizerConfig(BaseConfig):\n identifier: str = \"gpt2\"\n truncate_direction: TruncationDirection = TruncationDirection.right" }, { "identifier": "TrainConfig", "path": "olmo/config.py", "snippet": "class TrainConfig(BaseConfig):\n \"\"\"\n OLMo training configuration.\n \"\"\"\n\n run_name: Optional[str] = None\n \"\"\"\n The name of the run.\n \"\"\"\n\n seed: int = 6198\n \"\"\"\n Used to seed all initial RNG states.\n \"\"\"\n\n dry_run: bool = False\n \"\"\"\n If ``True``, don't actually train.\n \"\"\"\n\n model: ModelConfig = field(default_factory=ModelConfig)\n \"\"\"\n OLMo Model configuration.\n \"\"\"\n\n optimizer: OptimizerConfig = field(default_factory=OptimizerConfig)\n \"\"\"\n Optimizer configuration.\n \"\"\"\n\n scheduler: SchedulerConfig = field(default_factory=SchedulerConfig)\n \"\"\"\n Learning rate scheduler configuration.\n \"\"\"\n\n restore_base_learning_rate: bool = True\n \"\"\"\n Set to ``False`` if you want to restart with the base learning rate from the config, not the checkpoint.\n \"\"\"\n\n data: DataConfig = field(default_factory=DataConfig)\n \"\"\"\n Training data configuration.\n \"\"\"\n\n restore_dataloader: bool = True\n \"\"\"\n When restarting, restore the data loader to where it left off.\n If you restarting in order to train on a different dataset, set this to ``False``.\n \"\"\"\n\n fast_forward_batches: Optional[int] = None\n \"\"\"\n When restarting, use this to fast-forward the dataloader beyond the last checkpoint.\n This can be useful when restarting due to a loss spike in order to skip the data that\n corresponded to the spike.\n \"\"\"\n\n evaluators: List[EvaluatorConfig] = field(default_factory=list)\n \"\"\"\n Evaluation configurations.\n \"\"\"\n\n eval_interval: int = 1000\n \"\"\"\n How often (in terms of batches) to run evaluations.\n \"\"\"\n\n tokenizer: TokenizerConfig = field(default_factory=TokenizerConfig)\n \"\"\"\n Tokenizer configuration.\n \"\"\"\n\n save_folder: str = \"./\"\n \"\"\"\n The directory to save checkpoints to.\n \"\"\"\n\n remote_save_folder: Optional[str] = None\n \"\"\"\n A folder in a cloud bucket to upload saved checkpoints to.\n \"\"\"\n\n save_interval: int = 1000\n \"\"\"\n How often (in terms of batches) to save training state checkpoints that can be used for restarts.\n \"\"\"\n\n save_interval_unsharded: Optional[int] = None\n \"\"\"\n How often (if at all) to save the unsharded state to a single file.\n For large models it can be costly to save these, so it usually makes sense to save\n these less often than regular (sharded) training checkpoints.\n \"\"\"\n\n matformer_factor: int = 1\n\n save_num_checkpoints_to_keep: int = -1\n \"\"\"\n How many checkpoints to keep.\n \"\"\"\n\n save_num_unsharded_checkpoints_to_keep: int = -1\n \"\"\"\n How many unsharded checkpoints to keep.\n \"\"\"\n\n save_overwrite: bool = False\n \"\"\"\n If ``True``, overwrite any conflicting checkpoint files.\n \"\"\"\n\n force_save_unsharded: bool = False\n \"\"\"\n Save an unsharded checkpoint before training (even during a dry run).\n Use this option with `--load-path={PATH}` and `--dry_run` to convert a sharded\n checkpoint into an unsharded checkpoint.\n \"\"\"\n\n load_path: Optional[str] = None\n \"\"\"\n The path to a (sharded) training checkpoint to restore/resume from.\n \"\"\"\n\n max_duration: int = 10000\n \"\"\"\n Maximum number of batches to train for.\n \"\"\"\n\n global_train_batch_size: int = 512\n \"\"\"\n The effective global batch size.\n \"\"\"\n\n device_train_batch_size: Optional[int] = None # calculated automatically\n \"\"\"\n Don't set this manually. This will be set to ``global_train_batch_size // world_size``.\n \"\"\"\n\n device_train_microbatch_size: int = 16\n \"\"\"\n The number of instances passed to the model in a single forward-backward pass. You should set\n this as large as you can based on available GPU memory.\n \"\"\"\n\n device_eval_batch_size: int = 16\n \"\"\"\n The number of evaluation instances passed to the model in a single forward pass on each device.\n \"\"\"\n\n eval_subset_num_batches: int = -1\n \"\"\"\n The number of batches to use for downstream evaluation from each dataset.\n \"\"\"\n\n eval_on_load: bool = False\n \"\"\"\n When resuming from a checkpoint, run the evaluation loop right away.\n \"\"\"\n\n device_train_grad_accum: Optional[int] = None # calculated automatically\n \"\"\"\n Don't set this manually. This will be set to ``device_train_batch_size // device_train_microbatch_size``.\n \"\"\"\n\n max_grad_norm: Optional[float] = None\n \"\"\"\n Clip gradients to this value if set.\n \"\"\"\n\n precision: Optional[str] = None\n \"\"\"\n Precision to train with (e.g. \"amp_bf16\", \"amp_fp16\", or \"fp32\").\n \"\"\"\n\n wandb: Optional[WandbConfig] = None\n \"\"\"\n Weights & Biases configuration.\n \"\"\"\n\n speed_monitor: SpeedMonitorConfig = field(default_factory=SpeedMonitorConfig)\n \"\"\"\n Speed monitor configuration.\n \"\"\"\n\n console_log_interval: int = 1\n \"\"\"\n How often to log to the console.\n \"\"\"\n\n compile: Optional[CompilerConfig] = None\n \"\"\"\n Settings for compiling the model with ``torch.compile()``.\n \"\"\"\n\n activation_checkpointing: bool = False\n \"\"\"\n Use activation checkpointing on transformer blocks.\n \"\"\"\n\n fsdp: FSDPConfig = field(default_factory=FSDPConfig)\n \"\"\"\n Fully sharded data parallel settings.\n \"\"\"\n\n softmax_auxiliary_loss: bool = False\n \"\"\"\n If ``True``, we add the auxiliary loss function from PaLM that encourages the softmax\n normalizing term to be close to 0.\n \"\"\"\n\n time_limit: Optional[float] = 60 * 60 * 119.5\n \"\"\"\n The maximum amount of time to train for before saving a checkpoint and ending early.\n On LUMI we have 48 hours max per job, so we default to just under 48 hours to give us time\n to write out a final checkpoint.\n \"\"\"\n\n early_stopping_factor: Optional[float] = None\n\n save_data_indices: bool = True\n \"\"\"\n Save training data indices from each batch for each worker.\n \"\"\"\n\n @property\n def autocast_precision(self) -> torch.dtype:\n if self.precision == \"amp_bf16\":\n return torch.bfloat16\n elif self.precision == \"amp_fp16\":\n return torch.float16\n elif self.precision == \"fp32\":\n return torch.float32\n else:\n raise ValueError(f\"Unexpected precision type '{self.precision}'\")" }, { "identifier": "Tokenizer", "path": "olmo/tokenizer.py", "snippet": "class Tokenizer:\n \"\"\"\n A :class:`Tokenizer` is a light-weight wrapper around a HuggingFace :class:`tokenizers.Tokenizer`.\n\n :param base_tokenizer: The :class:`tokenizers.Tokenizer` to use.\n :param eos_token_id: The token ID corresponding to the \"end-of-sentence\" token.\n :param truncate_to: Truncate when tokenizing to this number of token IDs.\n :param truncate_direction: The direction to truncate in. \"right\" means truncate the tokens\n on the right. \"left\" means truncate the tokens on the left. If ``truncate_to`` is null,\n this setting has no effect.\n \"\"\"\n\n def __init__(\n self,\n base_tokenizer: BaseTokenizer,\n eos_token_id: int,\n pad_token_id: Optional[int] = None,\n truncate_to: Optional[int] = None,\n truncate_direction: Union[str, TruncationDirection] = TruncationDirection.right,\n ):\n self.base_tokenizer = base_tokenizer\n self.base_tokenizer.no_truncation()\n self.eos_token_id = eos_token_id\n self.pad_token_id = pad_token_id if pad_token_id is not None else eos_token_id\n self.truncate_to = truncate_to\n self.truncate_direction = TruncationDirection(truncate_direction)\n\n @property\n def vocab_size(self) -> int:\n return self.base_tokenizer.get_vocab_size()\n\n @classmethod\n def from_train_config(cls, config: TrainConfig) -> Tokenizer:\n tokenizer_identifier = config.tokenizer.identifier\n if Path(tokenizer_identifier).is_file():\n tokenizer = cls.from_file(\n tokenizer_identifier,\n eos_token_id=config.model.eos_token_id,\n pad_token_id=config.model.pad_token_id,\n )\n else:\n tokenizer = cls.from_pretrained(\n tokenizer_identifier,\n eos_token_id=config.model.eos_token_id,\n pad_token_id=config.model.pad_token_id,\n )\n if config.model.vocab_size != tokenizer.vocab_size:\n raise OlmoConfigurationError(\"vocab size mismatch between config and tokenizer\")\n return tokenizer\n\n @classmethod\n def from_pretrained(cls, identifier: str, **kwargs) -> Tokenizer:\n \"\"\"\n Initialize a tokenizer from a pretrained tokenizer on the HuggingFace Hub.\n\n :param identifier: The identifier of a model on the Hub that contains a\n ``tokenizer.json`` file.\n :param kwargs: Other key word arguments passed to :class:`Tokenizer`.\n \"\"\"\n base_tokenizer = BaseTokenizer.from_pretrained(identifier)\n eos_token_id = kwargs.pop(\"eos_token_id\", base_tokenizer.get_vocab_size() - 1)\n return cls(base_tokenizer, eos_token_id, **kwargs)\n \n @classmethod\n def from_file(cls, filename: PathOrStr, **kwargs) -> Tokenizer:\n \"\"\"\n Initialize a tokenizer from a file.\n You can create those files with ``BaseTokenizer.save()``.\n :param filename: The name of a file containing a tokenizer specification.\n :param kwargs: Other key word arguments passed to :class:`Tokenizer`.\n \"\"\"\n base_tokenizer = BaseTokenizer.from_file(filename)\n eos_token_id = kwargs.pop(\"eos_token_id\", base_tokenizer.get_vocab_size() - 1)\n return cls(base_tokenizer, eos_token_id, **kwargs)\n \n \n @classmethod\n def from_checkpoint(cls, checkpoint_dir: PathOrStr) -> Tokenizer:\n \"\"\"\n Load a tokenizer from a checkpoint.\n \"\"\"\n from cached_path import cached_path\n\n # Load configs.\n config_path = cached_path(os.path.join(checkpoint_dir, \"config.yaml\"))\n tokenizer_config = TokenizerConfig.load(config_path, key=\"tokenizer\")\n model_config = ModelConfig.load(config_path, key=\"model\")\n\n # Initialize tokenizer and validate vocab size.\n tokenizer = cls.from_pretrained(\n tokenizer_config.identifier,\n eos_token_id=model_config.eos_token_id,\n pad_token_id=model_config.pad_token_id,\n )\n if model_config.vocab_size != tokenizer.vocab_size:\n raise OlmoConfigurationError(\"vocab size mismatch between config and tokenizer\")\n return tokenizer\n\n def add_special_tokens(self, input_ids: List[int]) -> List[int]:\n \"\"\"\n Add special tokens in-place (if not already present) to the given token IDs.\n \"\"\"\n if not input_ids or input_ids[-1] != self.eos_token_id:\n input_ids.append(self.eos_token_id)\n return input_ids\n\n def num_special_tokens_to_add(self, is_pair: bool = False) -> int:\n return 2 if is_pair else 1\n\n def _truncate(\n self, input_ids: List[int], truncate_to: Optional[int], direction: TruncationDirection\n ) -> list[int]:\n if truncate_to is None or len(input_ids) <= truncate_to:\n return input_ids\n elif direction == TruncationDirection.left:\n return input_ids[len(input_ids) - truncate_to :]\n else:\n return input_ids[: -(len(input_ids) - truncate_to)]\n\n def encode(self, input: str, add_special_tokens: bool = True) -> List[int]:\n \"\"\"\n Encode a string into token IDs.\n \"\"\"\n return self.encode_batch([input], add_special_tokens=add_special_tokens)[0]\n\n def encode_batch(self, inputs: List[str], add_special_tokens: bool = True) -> List[List[int]]:\n \"\"\"\n Encode a batch of strings into token IDs.\n \"\"\"\n truncate_to = self.truncate_to\n if truncate_to is not None and add_special_tokens:\n truncate_to -= self.num_special_tokens_to_add(False)\n\n batch_encoding = self.base_tokenizer.encode_batch(inputs)\n\n all_input_ids = []\n for encoding in batch_encoding:\n input_ids = self._truncate(encoding.ids, truncate_to, self.truncate_direction)\n if add_special_tokens:\n input_ids = self.add_special_tokens(input_ids)\n all_input_ids.append(input_ids)\n\n return all_input_ids\n\n def decode(self, token_ids: List[int], skip_special_tokens: bool = True) -> str:\n \"\"\"\n Decode a list of token IDs to a string.\n \"\"\"\n return self.base_tokenizer.decode(token_ids, skip_special_tokens=skip_special_tokens)" } ]
from typing import List from olmo.config import ( DataConfig, ModelConfig, OptimizerConfig, PaddingDirection, SchedulerConfig, TokenizerConfig, TrainConfig, ) from olmo.tokenizer import Tokenizer import pytest
5,178
TEST_MODEL = "gpt2" LOREM_IPSUM_1 = """ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. """ LOREM_IPSUM_2 = """ Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? """ @pytest.fixture(scope="function") def model_config() -> ModelConfig: return ModelConfig( vocab_size=50257, eos_token_id=50256, pad_token_id=50256, d_model=128, n_heads=2, n_layers=3, max_sequence_length=512, ) @pytest.fixture(scope="function") def tokenizer() -> Tokenizer: return Tokenizer.from_pretrained(TEST_MODEL) @pytest.fixture(scope="function") def train_config(tmp_path, model_config) -> TrainConfig: return TrainConfig( model=model_config, optimizer=OptimizerConfig(), scheduler=SchedulerConfig(),
TEST_MODEL = "gpt2" LOREM_IPSUM_1 = """ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. """ LOREM_IPSUM_2 = """ Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? """ @pytest.fixture(scope="function") def model_config() -> ModelConfig: return ModelConfig( vocab_size=50257, eos_token_id=50256, pad_token_id=50256, d_model=128, n_heads=2, n_layers=3, max_sequence_length=512, ) @pytest.fixture(scope="function") def tokenizer() -> Tokenizer: return Tokenizer.from_pretrained(TEST_MODEL) @pytest.fixture(scope="function") def train_config(tmp_path, model_config) -> TrainConfig: return TrainConfig( model=model_config, optimizer=OptimizerConfig(), scheduler=SchedulerConfig(),
data=DataConfig(
0
2023-11-14 02:24:07+00:00
8k
1in-oos/ccplus
caringcaribou/tests/test_iso_14229_1.py
[ { "identifier": "DEFAULT_INTERFACE", "path": "caringcaribou/utils/can_actions.py", "snippet": "DEFAULT_INTERFACE = None" }, { "identifier": "MockEcuIso14229", "path": "caringcaribou/tests/mock/mock_ecu_uds.py", "snippet": "class MockEcuIso14229(MockEcuIsoTp, MockEcu):\n \"\"\"ISO-14229-1 (Unified Diagnostic Services) mock ECU handler\"\"\"\n\n IDENTIFIER_REQUEST_POSITIVE = 0x01\n IDENTIFIER_REQUEST_POSITIVE_RESPONSE = 0x72\n IDENTIFIER_REQUEST_NEGATIVE = 0x02\n\n REQUEST_IDENTIFIER_VALID = 0xA001\n REQUEST_IDENTIFIER_INVALID = 0xA002\n REQUEST_VALUE = [0xC0, 0xFF, 0xEE]\n\n REQUEST_ADDRESS_LENGTH_AND_FORMAT = 0x22\n REQUEST_ADDRESS = 0x0001\n REQUEST_DATA_SIZE = 0x10\n DATA = list(range(0x14))\n\n # TODO Use dynamic seed value and verify keys using a simple algorithm\n SECURITY_ACCESS_SEED = [0x36, 0x57]\n SECURITY_ACCESS_KEY = [0xC9, 0xA9]\n\n def __init__(self, arb_id_request, arb_id_response, bus=None):\n MockEcu.__init__(self, bus)\n self.ARBITRATION_ID_ISO_14229_REQUEST = arb_id_request\n self.ARBITRATION_ID_ISO_14229_RESPONSE = arb_id_response\n # Set CAN filter to only listen to incoming requests on the correct arbitration ID\n arbitration_id_filter = [{\"can_id\": arb_id_request, \"can_mask\": 0x1fffffff}]\n self.bus.set_filters(arbitration_id_filter)\n # Setup ISO-TP using the filtered bus\n self.iso_tp = IsoTp(arb_id_request=self.ARBITRATION_ID_ISO_14229_REQUEST,\n arb_id_response=self.ARBITRATION_ID_ISO_14229_RESPONSE,\n bus=self.bus)\n # Setup diagnostics on top of ISO-TP\n self.diagnostics = Iso14229_1(tp=self.iso_tp)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n MockEcuIsoTp.__exit__(self, None, None, None)\n\n @staticmethod\n def create_positive_response(request_service_id, response_data=None):\n \"\"\"\n Returns data for a positive response of 'request_service_id' with an optional 'response_data' payload\n\n :param request_service_id: Service ID (SIDRQ) of the incoming request\n :param response_data: List of data bytes to transmit in the response\n :return: List of bytes to be sent as data payload in the response\n \"\"\"\n # Positive response uses a response service ID (SIDPR) based on the request service ID (SIDRQ)\n service_response_id = Iso14229_1.get_service_response_id(request_service_id)\n response = [service_response_id]\n # Append payload\n if response_data is not None:\n response += response_data\n return response\n\n @staticmethod\n def create_negative_response(request_service_id, nrc):\n \"\"\"\n Returns data for a negative response of 'request_service_id' with negative response code 'nrc'\n\n :param request_service_id: Service ID (SIDRQ) of the incoming request\n :param nrc: Negative response code (NRC_)\n :return: List of bytes to be sent as data payload in the response\n \"\"\"\n response = [Constants.NR_SI,\n request_service_id,\n nrc]\n return response\n\n def message_handler(self, data):\n \"\"\"\n Logic for responding to incoming messages\n\n :param data: list of data bytes in incoming message\n :return: None\n \"\"\"\n assert isinstance(data, list)\n try:\n service_id = data[0]\n # Handle different services\n if service_id == ServiceID.DIAGNOSTIC_SESSION_CONTROL:\n # 0x10 Diagnostic session control\n response_data = self.handle_diagnostic_session_control(data)\n elif service_id == ServiceID.ECU_RESET:\n # 0x11 ECU reset\n response_data = self.handle_ecu_reset(data)\n elif service_id == ServiceID.READ_DATA_BY_IDENTIFIER:\n # 0x22 Read data by identifier\n response_data = self.handle_read_data_by_identifier(data)\n elif service_id == ServiceID.READ_MEMORY_BY_ADDRESS:\n # 0x23 Read memory by address\n response_data = self.handle_read_memory_by_address(data)\n elif service_id == ServiceID.SECURITY_ACCESS:\n # 0x27 Security access\n response_data = self.handle_security_access(data)\n elif service_id == ServiceID.WRITE_DATA_BY_IDENTIFIER:\n # 0x2E Write data by identifier\n response_data = self.handle_write_data_by_identifier(data)\n else:\n # Unsupported service\n response_data = self.handle_unsupported_service(data)\n except IndexError:\n # Parsing failed due to invalid message structure\n response_data = self.handle_service_error(data)\n\n # This check makes it possible to support services where a response should not be sent\n if response_data is not None:\n # Simulate a small delay before responding\n time.sleep(self.DELAY_BEFORE_RESPONSE)\n self.diagnostics.send_response(response_data)\n\n def handle_unsupported_service(self, data):\n \"\"\"Provides a standard response for unmapped services, by responding with NRC Service Not Supported\"\"\"\n service_id = data[0]\n nrc = NegativeResponseCodes.SERVICE_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_service_error(self, data):\n \"\"\"Provides a standard response for failed service requests\"\"\"\n service_id = data[0]\n nrc = NegativeResponseCodes.INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_diagnostic_session_control(self, data):\n \"\"\"Evaluates a diagnostic session control request and returns a response\"\"\"\n service_id = data[0]\n # TODO Handle different values?\n session_type = data[1]\n response_data = self.create_positive_response(service_id)\n return response_data\n\n def handle_read_data_by_identifier(self, data):\n \"\"\"\n Evaluates a read data by identifier request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n request = data[2]\n\n if request == self.IDENTIFIER_REQUEST_POSITIVE:\n # Request for positive response\n # TODO Actually read a parameter from memory\n payload = [self.IDENTIFIER_REQUEST_POSITIVE_RESPONSE]\n response_data = self.create_positive_response(service_id, payload)\n elif request == self.IDENTIFIER_REQUEST_NEGATIVE:\n # Request for negative response - use Conditions Not Correct\n nrc = NegativeResponseCodes.CONDITIONS_NOT_CORRECT\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unmatched request - use a general reject response\n nrc = NegativeResponseCodes.GENERAL_REJECT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_write_data_by_identifier(self, data):\n \"\"\"\n Evaluates a write data by identifier request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n\n identifier_start_position = 1\n identifier_length = 2\n identifier = int_from_byte_list(data,\n identifier_start_position,\n identifier_length)\n request_data = data[3:]\n # TODO Actually write data to memory\n if identifier == self.REQUEST_IDENTIFIER_VALID:\n # Request for positive response\n # Standard specifies the response payload to be an echo of the data identifier from the request\n payload = data[identifier_start_position:identifier_start_position + identifier_length]\n response_data = self.create_positive_response(service_id, payload)\n elif identifier == self.REQUEST_IDENTIFIER_INVALID:\n # Request for negative response - use Conditions Not Correct\n nrc = NegativeResponseCodes.CONDITIONS_NOT_CORRECT\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unmatched request - use a general reject response\n nrc = NegativeResponseCodes.GENERAL_REJECT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_read_memory_by_address(self, data):\n \"\"\"\n Evaluates a read memory by address request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n address_field_size = (data[1] >> 4) & 0xF\n data_length_field_size = (data[1] & 0xF)\n address_start_position = 2\n data_length_start_position = 4\n\n start_address = int_from_byte_list(data, address_start_position, address_field_size)\n data_length = int_from_byte_list(data, data_length_start_position, data_length_field_size)\n end_address = start_address + data_length\n if 0 <= start_address <= end_address <= len(self.DATA):\n memory_data = self.DATA[start_address:end_address]\n response_data = self.create_positive_response(service_id, memory_data)\n else:\n nrc = NegativeResponseCodes.REQUEST_OUT_OF_RANGE\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_ecu_reset(self, data):\n \"\"\"\n Evaluates an ECU reset request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n subfunction = data[1]\n reset_type = subfunction & 0x7F\n suppress_positive_response = subfunction >> 7\n\n reset_types = Services.EcuReset.ResetType\n\n if reset_type in [reset_types.HARD_RESET, reset_types.KEY_OFF_ON_RESET, reset_types.SOFT_RESET]:\n if suppress_positive_response:\n response_data = None\n else:\n response_data = self.create_positive_response(service_id, [reset_type])\n else:\n nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_security_access(self, data):\n \"\"\"\n Evaluates security access requests (both \"Request seed\" and \"Send key\") and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n subfunction = data[1]\n level = subfunction & 0x7F\n\n service_handler = Services.SecurityAccess.RequestSeedOrSendKey()\n if service_handler.is_valid_request_seed_level(level):\n # Request seed handling\n payload = [level]\n payload.extend(self.SECURITY_ACCESS_SEED)\n response_data = self.create_positive_response(service_id, payload)\n elif service_handler.is_valid_send_key_level(level):\n # Send key handling\n expected_key = self.SECURITY_ACCESS_KEY\n received_key = data[2:]\n if received_key == expected_key:\n # Correct key\n response_data = self.create_positive_response(service_id, [level])\n else:\n # Invalid key\n nrc = NegativeResponseCodes.INVALID_KEY\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unsupported subfunction\n nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data" }, { "identifier": "iso14229_1", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class DynamicallyDefinedIdentifierArg(object):\nclass NegativeResponseCodes(object):\nclass ServiceID(object):\nclass BaseService(object):\nclass Services(object):\n class DiagnosticSessionControl(BaseService):\n class DiagnosticSessionType(object):\n class EcuReset(BaseService):\n class ResetType(object):\n class SecurityAccess(BaseService):\n class RequestSeedOrSendKey(object):\n class TesterPresent(BaseService):\nclass Constants(object):\nclass Iso14229_1(object):\n def __init__(self, source_data_identifier,\n position_in_source_data_record, memory_size):\n def is_valid_request_seed_level(self, sub_function):\n def is_valid_send_key_level(self, sub_function):\n def get_send_key_for_request_seed(seed):\n def __init__(self, tp):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def get_service_response_id(request_id):\n def get_service_request_id(response_id):\n def send_request(self, data):\n def send_response(self, data):\n def receive_response(self, wait_window):\n def is_positive_response(response):\n def read_data_by_identifier(self, identifier):\n def read_memory_by_address(self, address_and_length_format,\n memory_address, memory_size):\n def write_memory_by_address(self, address_and_length_format,\n memory_address, memory_size, data):\n def write_data_by_identifier(self, identifier, data):\n def input_output_control_by_identifier(self, identifier, data):\n def dynamically_define_data_identifier(self, identifier,\n sub_function, sub_function_arg):\n def diagnostic_session_control(self, session_type):\n def ecu_reset(self, reset_type):\n def security_access_request_seed(self, level, data_record=None):\n def security_access_send_key(self, level, key):\n def read_data_by_periodic_identifier(self, transmission_mode,\n identifier):\n POSITIVE_RESPONSE = 0x00\n GENERAL_REJECT = 0x10\n SERVICE_NOT_SUPPORTED = 0x11\n SUB_FUNCTION_NOT_SUPPORTED = 0x12\n INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT = 0x13\n RESPONSE_TOO_LONG = 0x14\n BUSY_REPEAT_REQUEST = 0x21\n CONDITIONS_NOT_CORRECT = 0x22\n REQUEST_SEQUENCE_ERROR = 0x24\n NO_RESPONSE_FROM_SUBNET_COMPONENT = 0x25\n FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION = 0x26\n REQUEST_OUT_OF_RANGE = 0x31\n SECURITY_ACCESS_DENIED = 0x33\n INVALID_KEY = 0x35\n EXCEEDED_NUMBER_OF_ATTEMPTS = 0x36\n REQUIRED_TIME_DELAY_NOT_EXPIRED = 0x37\n UPLOAD_DOWNLOAD_NOT_ACCEPTED = 0x70\n TRANSFER_DATA_SUSPENDED = 0x71\n GENERAL_PROGRAMMING_FAILURE = 0x72\n WRONG_BLOCK_SEQUENCE_COUNTER = 0x73\n REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING = 0x78\n SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7E\n SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7F\n RPM_TOO_HIGH = 0x81\n RPM_TOO_LOW = 0x82\n ENGINE_IS_RUNNING = 0x83\n ENGINE_IS_NOT_RUNNING = 0x84\n ENGINE_RUN_TIME_TOO_LOW = 0x85\n TEMPERATURE_TOO_HIGH = 0x86\n TEMPERATURE_TOO_LOW = 0x87\n VEHICLE_SPEED_TOO_HIGH = 0x88\n VEHICLE_SPEED_TOO_LOW = 0x89\n THROTTLE_PEDAL_TOO_HIGH = 0x8A\n THROTTLE_PEDAL_TOO_LOW = 0x8B\n TRANSMISSION_RANGE_NOT_IN_NEUTRAL = 0x8C\n TRANSMISSION_RANGE_NOT_IN_GEAR = 0x8D\n BRAKE_SWITCHES_NOT_CLOSED = 0x8F\n SHIFT_LEVER_NOT_IN_PARK = 0x90\n TORQUE_CONVERTER_CLUTCH_LOCKED = 0x91\n VOLTAGE_TOO_HIGH = 0x92\n VOLTAGE_TOO_LOW = 0x93\n DIAGNOSTIC_SESSION_CONTROL = 0x10\n ECU_RESET = 0x11\n CLEAR_DIAGNOSTIC_INFORMATION = 0x14\n READ_DTC_INFORMATION = 0x19\n READ_DATA_BY_IDENTIFIER = 0x22\n READ_MEMORY_BY_ADDRESS = 0x23\n READ_SCALING_DATA_BY_IDENTIFIER = 0x24\n SECURITY_ACCESS = 0x27\n COMMUNICATION_CONTROL = 0x28\n READ_DATA_BY_PERIODIC_IDENTIFIER = 0x2A\n DYNAMICALLY_DEFINE_DATA_IDENTIFIER = 0x2C\n WRITE_DATA_BY_IDENTIFIER = 0x2E\n INPUT_OUTPUT_CONTROL_BY_IDENTIFIER = 0x2F\n ROUTINE_CONTROL = 0x31\n REQUEST_DOWNLOAD = 0x34\n REQUEST_UPLOAD = 0x35\n TRANSFER_DATA = 0x36\n REQUEST_TRANSFER_EXIT = 0x37\n REQUEST_FILE_TRANSFER = 0x38\n WRITE_MEMORY_BY_ADDRESS = 0x3D\n TESTER_PRESENT = 0x3E\n ACCESS_TIMING_PARAMETER = 0x83\n SECURED_DATA_TRANSMISSION = 0x84\n CONTROL_DTC_SETTING = 0x85\n RESPONSE_ON_EVENT = 0x86\n LINK_CONTROL = 0x87\n DEFAULT_SESSION = 0x01\n PROGRAMMING_SESSION = 0x02\n EXTENDED_DIAGNOSTIC_SESSION = 0x03\n SAFETY_SYSTEM_DIAGNOSTIC_SESSION = 0x04\n VEHICLE_MANUFACTURER_SESSION_MIN = 0x40\n VEHICLE_MANUFACTURER_SESSION_MAX = 0x5F\n SYSTEM_SUPPLIER_SESSION_MIN = 0x60\n SYSTEM_SUPPLIER_SESSION_MAX = 0x7E\n HARD_RESET = 0x01\n KEY_OFF_ON_RESET = 0x02\n SOFT_RESET = 0x03\n ENABLE_RAPID_POWER_SHUTDOWN = 0x04\n DISABLE_RAPID_POWER_SHUTDOWN = 0x05\n ISO_26021_2_VALUES = 0x5F\n ISO_26021_2_SEND_KEY = 0x60\n __REQUEST_SEED_MIN = 0x01\n __REQUEST_SEED_MAX = 0x41\n __SEND_KEY_MIN = 0x02\n __SEND_KEY_MAX = 0x42\n NR_SI = 0x7F\n P3_CLIENT = 5\n NRC = NegativeResponseCodes\n NRC_RCRRP = NRC.REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING" }, { "identifier": "iso15765_2", "path": "caringcaribou/utils/iso15765_2.py", "snippet": "class IsoTp:\n MAX_SF_LENGTH = 7\n MAX_FF_LENGTH = 6\n MAX_CF_LENGTH = 7\n SF_PCI_LENGTH = 1\n CF_PCI_LENGTH = 1\n FF_PCI_LENGTH = 2\n FC_PCI_LENGTH = 3\n FC_FS_CTS = 0\n FC_FS_WAIT = 1\n FC_FS_OVFLW = 2\n SF_FRAME_ID = 0\n FF_FRAME_ID = 1\n CF_FRAME_ID = 2\n FC_FRAME_ID = 3\n N_BS_TIMEOUT = 1.5\n MAX_FRAME_LENGTH = 8\n MAX_MESSAGE_LENGTH = 4095\n def __init__(self, arb_id_request, arb_id_response, bus=None, padding_value=0x00):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def _set_filters(self, filters):\n def set_filter_single_arbitration_id(self, arbitration_id):\n def clear_filters(self):\n def send_message(self, data, arbitration_id, force_extended=False):\n def decode_sf(self, frame):\n def decode_ff(self, frame):\n def decode_cf(self, frame):\n def decode_fc(self, frame):\n def encode_fc(self, flow_status, block_size, st_min):\n def send_request(self, message):\n def send_response(self, message):\n def indication(self, wait_window=None, trim_padding=True, first_frame_only=False):\n def transmit(self, frames, arbitration_id, arbitration_id_flow_control):\n def get_frames_from_message(message, padding_value=0x00):" } ]
from caringcaribou.utils.can_actions import DEFAULT_INTERFACE from caringcaribou.tests.mock.mock_ecu_uds import MockEcuIso14229 from caringcaribou.utils import iso14229_1 from caringcaribou.utils import iso15765_2 import can import unittest
4,991
from __future__ import print_function class DiagnosticsOverIsoTpTestCase(unittest.TestCase): ARB_ID_REQUEST = 0x200C ARB_ID_RESPONSE = 0x200D def setUp(self): # Initialize mock ECU self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE) self.ecu.start_server() # Initialize virtual CAN bus can_bus = can.Bus(DEFAULT_INTERFACE) # Setup diagnostics on top of ISO-TP layer
from __future__ import print_function class DiagnosticsOverIsoTpTestCase(unittest.TestCase): ARB_ID_REQUEST = 0x200C ARB_ID_RESPONSE = 0x200D def setUp(self): # Initialize mock ECU self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE) self.ecu.start_server() # Initialize virtual CAN bus can_bus = can.Bus(DEFAULT_INTERFACE) # Setup diagnostics on top of ISO-TP layer
self.tp = iso15765_2.IsoTp(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE, bus=can_bus)
3
2023-11-13 05:05:46+00:00
8k
L1bra1/WeakMotion
gen_data/gen_weak_waymo_utils.py
[ { "identifier": "Box", "path": "gen_data/nuscenes/utils/data_classes.py", "snippet": "class Box:\n \"\"\" Simple data class representing a 3d box including, label, score and velocity. \"\"\"\n\n def __init__(self,\n center: List[float],\n size: List[float],\n orientation: Quaternion,\n label: int = np.nan,\n score: float = np.nan,\n velocity: Tuple = (np.nan, np.nan, np.nan),\n name: str = None,\n token: str = None):\n \"\"\"\n :param center: Center of box given as x, y, z.\n :param size: Size of box in width, length, height.\n :param orientation: Box orientation.\n :param label: Integer label, optional.\n :param score: Classification score, optional.\n :param velocity: Box velocity in x, y, z direction.\n :param name: Box name, optional. Can be used e.g. for denote category name.\n :param token: Unique string identifier from DB.\n \"\"\"\n assert not np.any(np.isnan(center))\n assert not np.any(np.isnan(size))\n assert len(center) == 3\n assert len(size) == 3\n assert type(orientation) == Quaternion\n\n self.center = np.array(center)\n self.wlh = np.array(size)\n self.orientation = orientation\n self.label = int(label) if not np.isnan(label) else label\n self.score = float(score) if not np.isnan(score) else score\n self.velocity = np.array(velocity)\n self.name = name\n self.token = token\n\n def __eq__(self, other):\n center = np.allclose(self.center, other.center)\n wlh = np.allclose(self.wlh, other.wlh)\n orientation = np.allclose(self.orientation.elements, other.orientation.elements)\n label = (self.label == other.label) or (np.isnan(self.label) and np.isnan(other.label))\n score = (self.score == other.score) or (np.isnan(self.score) and np.isnan(other.score))\n vel = (np.allclose(self.velocity, other.velocity) or\n (np.all(np.isnan(self.velocity)) and np.all(np.isnan(other.velocity))))\n\n return center and wlh and orientation and label and score and vel\n\n def __repr__(self):\n repr_str = 'label: {}, score: {:.2f}, xyz: [{:.2f}, {:.2f}, {:.2f}], wlh: [{:.2f}, {:.2f}, {:.2f}], ' \\\n 'rot axis: [{:.2f}, {:.2f}, {:.2f}], ang(degrees): {:.2f}, ang(rad): {:.2f}, ' \\\n 'vel: {:.2f}, {:.2f}, {:.2f}, name: {}, token: {}'\n\n return repr_str.format(self.label, self.score, self.center[0], self.center[1], self.center[2], self.wlh[0],\n self.wlh[1], self.wlh[2], self.orientation.axis[0], self.orientation.axis[1],\n self.orientation.axis[2], self.orientation.degrees, self.orientation.radians,\n self.velocity[0], self.velocity[1], self.velocity[2], self.name, self.token)\n\n @property\n def rotation_matrix(self) -> np.ndarray:\n \"\"\"\n Return a rotation matrix.\n :return: <np.float: 3, 3>. The box's rotation matrix.\n \"\"\"\n return self.orientation.rotation_matrix\n\n def translate(self, x: np.ndarray) -> None:\n \"\"\"\n Applies a translation.\n :param x: <np.float: 3, 1>. Translation in x, y, z direction.\n \"\"\"\n self.center += x\n\n def rotate(self, quaternion: Quaternion) -> None:\n \"\"\"\n Rotates box.\n :param quaternion: Rotation to apply.\n \"\"\"\n self.center = np.dot(quaternion.rotation_matrix, self.center)\n self.orientation = quaternion * self.orientation\n self.velocity = np.dot(quaternion.rotation_matrix, self.velocity)\n\n def corners(self, wlh_factor: float = 1.0) -> np.ndarray:\n \"\"\"\n Returns the bounding box corners.\n :param wlh_factor: Multiply w, l, h by a factor to scale the box.\n :return: <np.float: 3, 8>. First four corners are the ones facing forward.\n The last four are the ones facing backwards.\n \"\"\"\n w, l, h = self.wlh * wlh_factor\n\n # 3D bounding box corners. (Convention: x points forward, y to the left, z up.)\n x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])\n y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])\n z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])\n corners = np.vstack((x_corners, y_corners, z_corners))\n\n # Rotate\n corners = np.dot(self.orientation.rotation_matrix, corners)\n\n # Translate\n x, y, z = self.center\n corners[0, :] = corners[0, :] + x\n corners[1, :] = corners[1, :] + y\n corners[2, :] = corners[2, :] + z\n\n return corners\n\n def bottom_corners(self) -> np.ndarray:\n \"\"\"\n Returns the four bottom corners.\n :return: <np.float: 3, 4>. Bottom corners. First two face forward, last two face backwards.\n \"\"\"\n return self.corners()[:, [2, 3, 7, 6]]\n\n def render(self,\n axis: Axes,\n view: np.ndarray = np.eye(3),\n normalize: bool = False,\n colors: Tuple = ('b', 'r', 'k'),\n linewidth: float = 2) -> None:\n \"\"\"\n Renders the box in the provided Matplotlib axis.\n :param axis: Axis onto which the box should be drawn.\n :param view: <np.array: 3, 3>. Define a projection in needed (e.g. for drawing projection in an image).\n :param normalize: Whether to normalize the remaining coordinate.\n :param colors: (<Matplotlib.colors>: 3). Valid Matplotlib colors (<str> or normalized RGB tuple) for front,\n back and sides.\n :param linewidth: Width in pixel of the box sides.\n \"\"\"\n corners = view_points(self.corners(), view, normalize=normalize)[:2, :]\n\n def draw_rect(selected_corners, color):\n prev = selected_corners[-1]\n for corner in selected_corners:\n axis.plot([prev[0], corner[0]], [prev[1], corner[1]], color=color, linewidth=linewidth)\n prev = corner\n\n # Draw the sides\n for i in range(4):\n axis.plot([corners.T[i][0], corners.T[i + 4][0]],\n [corners.T[i][1], corners.T[i + 4][1]],\n color=colors[2], linewidth=linewidth)\n\n # Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)\n draw_rect(corners.T[:4], colors[0])\n draw_rect(corners.T[4:], colors[1])\n\n # Draw line indicating the front\n center_bottom_forward = np.mean(corners.T[2:4], axis=0)\n center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)\n axis.plot([center_bottom[0], center_bottom_forward[0]],\n [center_bottom[1], center_bottom_forward[1]],\n color=colors[0], linewidth=linewidth)\n\n def render_cv2(self,\n im: np.ndarray,\n view: np.ndarray = np.eye(3),\n normalize: bool = False,\n colors: Tuple = ((0, 0, 255), (255, 0, 0), (155, 155, 155)),\n linewidth: int = 2) -> None:\n \"\"\"\n Renders box using OpenCV2.\n :param im: <np.array: width, height, 3>. Image array. Channels are in BGR order.\n :param view: <np.array: 3, 3>. Define a projection if needed (e.g. for drawing projection in an image).\n :param normalize: Whether to normalize the remaining coordinate.\n :param colors: ((R, G, B), (R, G, B), (R, G, B)). Colors for front, side & rear.\n :param linewidth: Linewidth for plot.\n \"\"\"\n corners = view_points(self.corners(), view, normalize=normalize)[:2, :]\n\n def draw_rect(selected_corners, color):\n prev = selected_corners[-1]\n for corner in selected_corners:\n cv2.line(im,\n (int(prev[0]), int(prev[1])),\n (int(corner[0]), int(corner[1])),\n color, linewidth)\n prev = corner\n\n # Draw the sides\n for i in range(4):\n cv2.line(im,\n (int(corners.T[i][0]), int(corners.T[i][1])),\n (int(corners.T[i + 4][0]), int(corners.T[i + 4][1])),\n colors[2][::-1], linewidth)\n\n # Draw front (first 4 corners) and rear (last 4 corners) rectangles(3d)/lines(2d)\n draw_rect(corners.T[:4], colors[0][::-1])\n draw_rect(corners.T[4:], colors[1][::-1])\n\n # Draw line indicating the front\n center_bottom_forward = np.mean(corners.T[2:4], axis=0)\n center_bottom = np.mean(corners.T[[2, 3, 7, 6]], axis=0)\n cv2.line(im,\n (int(center_bottom[0]), int(center_bottom[1])),\n (int(center_bottom_forward[0]), int(center_bottom_forward[1])),\n colors[0][::-1], linewidth)\n\n def copy(self) -> 'Box':\n \"\"\"\n Create a copy of self.\n :return: A copy.\n \"\"\"\n return copy.deepcopy(self)" }, { "identifier": "load_waymo_points", "path": "gen_data/waymo_data_utils.py", "snippet": "def load_waymo_points(lidar_path):\n points = np.load(lidar_path).reshape(-1, 6)\n NLZ_flag = points[:, 5]\n points = points[NLZ_flag == -1]\n return points[:, :3]" }, { "identifier": "point_in_hull_fast", "path": "gen_data/waymo_data_utils.py", "snippet": "def point_in_hull_fast(points, bounding_box):\n \"\"\"\n Check if a point lies in a bounding box. We first rotate the bounding box to align with axis. Meanwhile, we\n also rotate the whole point cloud. Finally, we just check the membership with the aid of aligned axis.\n This implementation is fast.\n :param points: nd.array (N x d); N: the number of points, d: point dimension\n :param bounding_box: the Box object\n return: The membership of points within the bounding box\n \"\"\"\n # Make sure it is a unit quaternion\n bounding_box.orientation = bounding_box.orientation.normalised\n\n # Rotate the point clouds\n pc = bounding_box.orientation.inverse.rotation_matrix @ points.T\n pc = pc.T\n\n orientation_backup = Quaternion(bounding_box.orientation) # Deep clone it\n bounding_box.rotate(bounding_box.orientation.inverse)\n corners = bounding_box.corners()\n\n # Test if the points are in the bounding box\n idx = np.where((corners[0, 7] <= pc[:, 0]) & (pc[:, 0] <= corners[0, 0]) &\n (corners[1, 1] <= pc[:, 1]) & (pc[:, 1] <= corners[1, 0]) &\n (corners[2, 2] <= pc[:, 2]) & (pc[:, 2] <= corners[2, 0]))[0]\n\n # recover\n bounding_box.rotate(orientation_backup)\n return idx" } ]
import numpy as np from pathlib import Path from functools import reduce from gen_data.nuscenes.utils.data_classes import Box from pyquaternion import Quaternion from gen_data.waymo_data_utils import load_waymo_points, point_in_hull_fast
3,973
""" Prepare the Foreground/Background information for Waymo data. """ obj_class_map = { "Vehicle": 1, "Pedestrian":2, "Cyclist": 3, "Others": 4 } # take sign as others def gen_weak_supervision(scene_name, lidar_path, ann_data, i, pc_random_index_dict, pc_down_sample_dict, num_down_sample = 50000): ''' get current info''' ann_i = ann_data[i] # extract info about reference key lidar_pc_path = lidar_path / "{:04d}.npy".format(i) cur_xyz = load_waymo_points(lidar_pc_path) ref_pose = ann_i["pose"] ref_token = "{}_{:04d}".format(scene_name, i) ref_ts = ann_i["time_stamp"] save_weak_dict = dict() id_list = [-5, 0, 5] for j in range(3): sweep_index = i + id_list[j] sweep_ann = ann_data[sweep_index] sweep_lidar_pc_path = lidar_path / "{:04d}.npy".format(sweep_index) sweep_pose = sweep_ann["pose"] sweep_pc = load_waymo_points(sweep_lidar_pc_path) sweep_token = "{}_{:04d}".format(scene_name, sweep_index) sweep_ts = sweep_ann["time_stamp"] time_lag = sweep_ts - ref_ts # ref_from_global * global_from_current = ref_from_current tm = reduce(np.dot, [np.linalg.inv(ref_pose), sweep_pose]) sweep_pc = sweep_pc.T sweep_pc[:3, :] = tm.dot(np.vstack((sweep_pc[:3, :], np.ones(sweep_pc.shape[1]))))[:3, :] points_label = get_label_info(sweep_ann, lidar_path, sweep_index) # down-sample down_sample_idx, pc_down_sample_dict = gen_random_index_for_pc(sweep_pc, sweep_token, pc_down_sample_dict) sweep_pc_t = sweep_pc.transpose((1, 0)) # We only preserve a fixed number of points for each point cloud if down_sample_idx.shape[0] > num_down_sample: sampled_sweep_pc_t = sweep_pc_t[down_sample_idx[:num_down_sample]] sampled_points_label = points_label[down_sample_idx[:num_down_sample]].astype(np.int32) else: sampled_sweep_pc_t = sweep_pc_t[down_sample_idx] sampled_points_label = points_label[down_sample_idx].astype(np.int32) sampled_sweep_pc = sampled_sweep_pc_t.transpose((1, 0)) save_weak_dict['synchronized_pc_' + str(j)] = sampled_sweep_pc save_weak_dict['frame_id_' + str(j)] = sweep_token save_weak_dict['ts_' + str(j)] = time_lag save_weak_dict['points_label_' + str(j)] = sampled_points_label sample_idx, pc_random_index_dict = gen_random_index_for_pc(sampled_sweep_pc, sweep_token, pc_random_index_dict) save_weak_dict['sample_idx_' + str(j)] = sample_idx.astype(np.int32) return save_weak_dict, pc_random_index_dict, pc_down_sample_dict def get_label_info(sweep_ann, lidar_path, sweep_index): sweep_nusc_box_dict = {} for obj_idx, obj_id in enumerate(sweep_ann["annos"]['obj_ids']): # vehicle system lwh = sweep_ann["annos"]["dimensions"][obj_idx] # c_x, c_y, c_z ctr = sweep_ann["annos"]["location"][obj_idx] # l, w, h yaw = sweep_ann["annos"]["heading_angles"][obj_idx] name = sweep_ann["annos"]["name"][obj_idx] nusc_box = Box( ctr, [lwh[1], lwh[0], lwh[2]], Quaternion(axis=[0, 0, 1], angle=yaw), name=name, token=obj_idx ) sweep_nusc_box_dict[obj_id] = nusc_box # # ----------init------------------- lidar_pc_path = lidar_path / "{:04d}.npy".format(sweep_index) sweep_xyz = load_waymo_points(lidar_pc_path) sweep_cls_mask = np.zeros([len(sweep_xyz), 1], dtype=np.int64) inbox_idx_dict = {} for box_token, sweep_box in sweep_nusc_box_dict.items():
""" Prepare the Foreground/Background information for Waymo data. """ obj_class_map = { "Vehicle": 1, "Pedestrian":2, "Cyclist": 3, "Others": 4 } # take sign as others def gen_weak_supervision(scene_name, lidar_path, ann_data, i, pc_random_index_dict, pc_down_sample_dict, num_down_sample = 50000): ''' get current info''' ann_i = ann_data[i] # extract info about reference key lidar_pc_path = lidar_path / "{:04d}.npy".format(i) cur_xyz = load_waymo_points(lidar_pc_path) ref_pose = ann_i["pose"] ref_token = "{}_{:04d}".format(scene_name, i) ref_ts = ann_i["time_stamp"] save_weak_dict = dict() id_list = [-5, 0, 5] for j in range(3): sweep_index = i + id_list[j] sweep_ann = ann_data[sweep_index] sweep_lidar_pc_path = lidar_path / "{:04d}.npy".format(sweep_index) sweep_pose = sweep_ann["pose"] sweep_pc = load_waymo_points(sweep_lidar_pc_path) sweep_token = "{}_{:04d}".format(scene_name, sweep_index) sweep_ts = sweep_ann["time_stamp"] time_lag = sweep_ts - ref_ts # ref_from_global * global_from_current = ref_from_current tm = reduce(np.dot, [np.linalg.inv(ref_pose), sweep_pose]) sweep_pc = sweep_pc.T sweep_pc[:3, :] = tm.dot(np.vstack((sweep_pc[:3, :], np.ones(sweep_pc.shape[1]))))[:3, :] points_label = get_label_info(sweep_ann, lidar_path, sweep_index) # down-sample down_sample_idx, pc_down_sample_dict = gen_random_index_for_pc(sweep_pc, sweep_token, pc_down_sample_dict) sweep_pc_t = sweep_pc.transpose((1, 0)) # We only preserve a fixed number of points for each point cloud if down_sample_idx.shape[0] > num_down_sample: sampled_sweep_pc_t = sweep_pc_t[down_sample_idx[:num_down_sample]] sampled_points_label = points_label[down_sample_idx[:num_down_sample]].astype(np.int32) else: sampled_sweep_pc_t = sweep_pc_t[down_sample_idx] sampled_points_label = points_label[down_sample_idx].astype(np.int32) sampled_sweep_pc = sampled_sweep_pc_t.transpose((1, 0)) save_weak_dict['synchronized_pc_' + str(j)] = sampled_sweep_pc save_weak_dict['frame_id_' + str(j)] = sweep_token save_weak_dict['ts_' + str(j)] = time_lag save_weak_dict['points_label_' + str(j)] = sampled_points_label sample_idx, pc_random_index_dict = gen_random_index_for_pc(sampled_sweep_pc, sweep_token, pc_random_index_dict) save_weak_dict['sample_idx_' + str(j)] = sample_idx.astype(np.int32) return save_weak_dict, pc_random_index_dict, pc_down_sample_dict def get_label_info(sweep_ann, lidar_path, sweep_index): sweep_nusc_box_dict = {} for obj_idx, obj_id in enumerate(sweep_ann["annos"]['obj_ids']): # vehicle system lwh = sweep_ann["annos"]["dimensions"][obj_idx] # c_x, c_y, c_z ctr = sweep_ann["annos"]["location"][obj_idx] # l, w, h yaw = sweep_ann["annos"]["heading_angles"][obj_idx] name = sweep_ann["annos"]["name"][obj_idx] nusc_box = Box( ctr, [lwh[1], lwh[0], lwh[2]], Quaternion(axis=[0, 0, 1], angle=yaw), name=name, token=obj_idx ) sweep_nusc_box_dict[obj_id] = nusc_box # # ----------init------------------- lidar_pc_path = lidar_path / "{:04d}.npy".format(sweep_index) sweep_xyz = load_waymo_points(lidar_pc_path) sweep_cls_mask = np.zeros([len(sweep_xyz), 1], dtype=np.int64) inbox_idx_dict = {} for box_token, sweep_box in sweep_nusc_box_dict.items():
inbox_idx = point_in_hull_fast(sweep_xyz, sweep_box)
2
2023-11-12 07:03:29+00:00
8k
c3exchange/c3-smartcontracts-v1
contracts_unified/core/methods/update_instrument.py
[ { "identifier": "perform_pool_move", "path": "contracts_unified/core/internal/perform_pool_move.py", "snippet": "@ABIReturnSubroutine\ndef perform_pool_move(\n account: AccountAddress,\n instrument_id: InstrumentId,\n transfer_amount: SignedAmount,\n) -> Expr:\n \"\"\"\n Transfers from the user to the pool `transfer_amount`.\n The function takes the following steps:\n 1. Calculates global accrued interest\n 2. Capitalizes the users balance by updating the\n user's principal with the user's accrued interest\n 3. Transfer between the user and the pool\n\n Parameters\n ----------\n user_position: current pool position of the user on `instrument_id`\n instrument_id: instrument index\n transfer_amount: amount to be transfered from the user to the pool.\n a positive value indicates that the user is sending to the pool (repaying and/or subscribing)\n a negative value indicates that the user is receiving from the pool (borrowing and/or redeeming)\n output: the user's position on the pool after the transfer\n \"\"\"\n\n # Instrument's attributes that change as part of this operation\n new_pool_last_update_time = RelativeTimestamp()\n old_pool_last_update_time = RelativeTimestamp()\n\n new_pool_borrowed = Amount()\n old_pool_borrowed = Amount()\n\n new_pool_liquidity = Amount()\n old_pool_liquidity = Amount()\n\n new_pool_borrow_index = InterestRate()\n old_pool_borrow_index = InterestRate()\n\n new_pool_lend_index = InterestRate()\n old_pool_lend_index = InterestRate()\n\n # Instrument attributes that are not affected by this operation\n asset_id = AssetId()\n initial_haircut = Ratio()\n initial_margin = Ratio()\n maintenance_haircut = Ratio()\n maintenance_margin = Ratio()\n\n optimal_utilization_ratio = Ratio()\n optimal_utilization_rate = InterestRate()\n min_rate = InterestRate()\n opt_rate = InterestRate()\n max_rate = InterestRate()\n\n # User's attributes\n user_position = UserInstrumentData()\n new_user_principal = SignedAmount()\n old_user_principal = SignedAmount()\n new_user_index = InterestRate()\n old_user_index = InterestRate()\n new_user_cash = Amount()\n old_user_cash = Amount()\n\n # Variables for intermediate calculations\n old_utilization_rate = InterestRate()\n old_interest_rate = InterestRate()\n delta_time = Timestamp()\n compounding_per_second_rate = InterestRate()\n compounding_per_period_rate = InterestRate()\n\n pool_accrued_interest = Amount()\n\n liquidity_transfer = SignedAmount()\n borrowed_transfer = SignedAmount()\n\n remainder = SignedAmount()\n\n instrument_state = InstrumentListElement()\n new_instrument_state = InstrumentListElement()\n\n return Seq(\n # Loads current instrument state\n instrument_state.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n\n instrument_state.asset_id.store_into(asset_id),\n # Loads pool data\n instrument_state.last_update_time.store_into(old_pool_last_update_time),\n instrument_state.borrowed.store_into(old_pool_borrowed),\n instrument_state.liquidity.store_into(old_pool_liquidity),\n instrument_state.borrow_index.store_into(old_pool_borrow_index),\n instrument_state.lend_index.store_into(old_pool_lend_index),\n\n # Loads interest curve data\n instrument_state.optimal_utilization.store_into(optimal_utilization_ratio),\n optimal_utilization_rate.set(WideRatio([optimal_utilization_ratio.get(), Int(RATE_ONE)], [Int(RATIO_ONE)])),\n instrument_state.min_rate.store_into(min_rate),\n instrument_state.opt_rate.store_into(opt_rate),\n instrument_state.max_rate.store_into(max_rate),\n\n # Loads haircuts and margins\n instrument_state.initial_haircut.store_into(initial_haircut),\n instrument_state.initial_margin.store_into(initial_margin),\n instrument_state.maintenance_haircut.store_into(maintenance_haircut),\n instrument_state.maintenance_margin.store_into(maintenance_margin),\n\n # Calculates the new timestamp\n # NOTE: Updates to this can be controlled via the algosdk function setBlockOffsetTimestamp\n new_pool_last_update_time.set(GlobalStateHandler.get_relative_timestamp()),\n\n ###############################################################################################################\n # 1.\n # Calculates the accrued interest in the pool since the last update\n # and reflects that on the total liquidity and borrow amount.\n\n # 1.1\n # AI_t = ((BI_t / BI_{t-1})-1) * B_{t-1} = ((1+R_{t_1})^dT - 1) * B_{t-1}\n\n # 1.1.1\n # Calculates the pool's utilization\n # U_{t-1} = B_{t-1} / L_{t-1} = B_{t-1} * 1 / L_{t-1}\n old_utilization_rate.set(\n If(old_pool_liquidity.get() == Int(0))\n .Then(Int(0))\n .Else(WideRatio([old_pool_borrowed.get(), Int(RATE_ONE)], [old_pool_liquidity.get()]))\n ),\n\n # 1.1.2\n # Calculates interest rate per second for the period since the last update\n # R_{t-1} = R_min + U_{t-1} / U_opt * R_slope1 if U_{t-1} < U_opt\n # R_{t-1} = R_opt + (U_{t-1}-U_opt) / (1 - U_opt) * R_slope2 if U_{t-1} >= U_opt\n old_interest_rate.set(\n If(old_utilization_rate.get() < optimal_utilization_rate.get())\n .Then(\n min_rate.get()\n + WideRatio(\n [old_utilization_rate.get(), opt_rate.get() - min_rate.get()],\n [optimal_utilization_rate.get()]\n )\n )\n .Else(\n opt_rate.get()\n + WideRatio(\n [old_utilization_rate.get() - optimal_utilization_rate.get(), max_rate.get() - opt_rate.get()],\n [Int(RATE_ONE) - optimal_utilization_rate.get()]\n )\n )\n ),\n\n # 1.1.3\n # Calculates time since previous update\n delta_time.set(new_pool_last_update_time.get() - old_pool_last_update_time.get()),\n\n # 1.1.4\n # AI_t = ((BI_t / BI_{t-1})-1) * B_{t-1} = ((1+R_{t_1})^dT - 1) * B_{t-1}\n compounding_per_second_rate.set(Int(RATE_ONE) + old_interest_rate.get()),\n compounding_per_period_rate.set(teal_expt(compounding_per_second_rate, delta_time)),\n pool_accrued_interest.set(\n WideRatio(\n [compounding_per_period_rate.get() - Int(RATE_ONE), old_pool_borrowed.get()],\n [Int(RATE_ONE)],\n )\n ),\n\n # 1.2\n # Capitalize pool accrued interest into liquidity and borrowed amounts\n new_pool_borrowed.set(old_pool_borrowed.get() + pool_accrued_interest.get()),\n new_pool_liquidity.set(old_pool_liquidity.get() + pool_accrued_interest.get()),\n\n # 1.3\n # Updates pool indexes\n new_pool_borrow_index.set(\n If(old_pool_borrowed.get() == Int(0))\n .Then(\n Int(RATE_ONE)\n )\n .Else(\n WideRatio([old_pool_borrow_index.get(), new_pool_borrowed.get()], [old_pool_borrowed.get()])\n )\n ),\n new_pool_lend_index.set(\n If(old_pool_liquidity.get() == Int(0))\n .Then(\n Int(RATE_ONE)\n )\n .Else(\n WideRatio([old_pool_lend_index.get(), new_pool_liquidity.get()], [old_pool_liquidity.get()])\n )\n ),\n\n # We only perform the pool move if a user was given, otherwise we just update the global instrument data\n If(account.get() != Global.zero_address()).Then(\n ###############################################################################################################\n # 2\n # Get user data\n user_position.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n user_position.cash.store_into(old_user_cash),\n user_position.principal.store_into(old_user_principal),\n user_position.index.store_into(old_user_index),\n\n # Capitalize user's accrued interest into user's principal\n new_user_principal.set(\n If(old_user_index.get() == Int(0))\n .Then(\n Int(0)\n )\n .ElseIf(signed_ltz(old_user_principal.get()))\n .Then(\n # The user has a borrow position\n calculate_accrued_borrow(old_user_principal, old_user_index, new_pool_borrow_index)\n )\n .Else(\n # The user has a lend position\n calculate_accrued_lend(old_user_principal, old_user_index, new_pool_lend_index)\n )\n ),\n\n ###############################################################################################################\n # 3\n # Transfer between the user and the pool\n\n # 3.0 Validate user's position against pool size\n Assert(new_pool_liquidity.get() >= signed_max(Int(0), new_user_principal.get())),\n # NOTE: The case for the user repaying more than the pool has borrowed is handled below\n # in order to handle zero-interest borrow case\n\n # 3.1 Updates the pool borrowed and liquitiy amounts\n\n # 3.1.1 Decompose the transfer_amount into borrowed_transfer and liquidity_transfer\n # such that:\n # a. transfer_amount == borrowed_transfer + liquidity_transfer\n # b. sign(transfer_amount) == sign(borrowed_transfer) == sign(liquidity_transfer)\n # c. if transfer_amount <=0:\n # # User cannot redeem more than its long position\n # liquidity_transfer = max(transfer_amount, min(0, -new_user_principal))\n # else:\n # # User must repay before subscribing\n # liquidity_transfer = max(transfer_amount + min(0, new_user_principal), 0)\n #\n # In other words:\n # - If transfer_amount is negative, then liquidity_transfer represents the\n # amount that the user is redeeming from the pool, and borrowed_transfer the amount that is\n # borrowing from the pool.\n # - If transfer_amount is positive, then liquidity_transfer represents the\n # amount that the user is subscribing to the pool, and borrowed_transfer the amount that is\n # repaying to the pool.\n liquidity_transfer.set(\n signed_max(\n signed_add(\n transfer_amount.get(),\n signed_min(Int(0), new_user_principal.get())\n ),\n signed_min(Int(0), signed_neg(new_user_principal.get()))\n )\n ),\n borrowed_transfer.set(\n signed_sub(\n transfer_amount.get(),\n liquidity_transfer.get()\n )\n ),\n\n # 3.1.2 Applies the liquidity_transfer and borrowed_transfer to the pool\n new_pool_borrowed.set(signed_sub(new_pool_borrowed.get(), borrowed_transfer.get())),\n\n # Handles the case where the user repays more than the pool has borrowed\n # This will happen when there are accumulated microunits of interest\n If(signed_ltz(new_pool_borrowed.get())).Then(\n # Remainder is whatever is left in the transfer after repaying all pool borrows\n remainder.set(signed_neg(new_pool_borrowed.get())),\n\n # New liquidity index is updated to reflect the remainder\n # liquidity_index' = liquidity_index + liquidity_index * remainder / pool_liquidity\n new_pool_lend_index.set(new_pool_lend_index.get() + WideRatio([new_pool_lend_index.get(), remainder.get()], [new_pool_liquidity.get()])),\n\n # New liquidity includes the remainder\n new_pool_liquidity.set(signed_add(new_pool_liquidity.get(), remainder.get())),\n\n # Borrowed is cleared to remain always positive\n new_pool_borrowed.set(Int(0))\n ),\n\n new_pool_liquidity.set(signed_add(new_pool_liquidity.get(), liquidity_transfer.get())),\n\n # 3.1.3 Validate the pool has sufficient liquidity to perform the operation\n Assert(new_pool_liquidity.get() >= new_pool_borrowed.get()),\n\n # 3.2 Update user's principal and cash\n new_user_principal.set(signed_add(new_user_principal.get(), transfer_amount.get())),\n new_user_cash.set(signed_sub(old_user_cash.get(), transfer_amount.get())),\n Assert(Not(signed_ltz(new_user_cash.get()))),\n\n # 3.3 Update user's index\n new_user_index.set(\n If(signed_ltz(new_user_principal.get()))\n .Then(new_pool_borrow_index.get())\n .Else(new_pool_lend_index.get())\n ),\n\n # Update user\n user_position.set(new_user_cash, new_user_principal, new_user_index),\n cast(Expr, LocalStateHandler.set_position(account, instrument_id, user_position)),\n ),\n\n # Update liquidity pool\n new_instrument_state.set(\n asset_id,\n initial_haircut,\n initial_margin,\n maintenance_haircut,\n maintenance_margin,\n new_pool_last_update_time,\n new_pool_borrow_index,\n new_pool_lend_index,\n optimal_utilization_ratio,\n min_rate,\n opt_rate,\n max_rate,\n new_pool_borrowed,\n new_pool_liquidity,\n ),\n\n # Update instrument\n cast(Expr, GlobalStateHandler.set_instrument(instrument_id, new_instrument_state)),\n )" }, { "identifier": "setup", "path": "contracts_unified/core/internal/setup.py", "snippet": "@Subroutine(TealType.none)\ndef setup(opup_amount: Expr) -> Expr:\n \"\"\"Setup the required pre-method OpUp and state handlers\"\"\"\n\n target = AppId()\n i = abi.Uint64()\n\n return Seq(\n # Get target\n # FIXME: Use the price caster when we can\n target.set(Txn.applications[1]),\n\n # Loop over the opup request\n # NOTE: We can't use the PyTEAL op-up because of ABI issues\n i.set(opup_amount),\n While(i.get() >= Global.min_txn_fee()).Do(\n InnerTxnBuilder.ExecuteMethodCall(\n app_id=target.get(),\n method_signature=\"nop()void\",\n args=[],\n extra_fields={TxnField.fee: Int(0)}\n ),\n i.set(i.get() - Global.min_txn_fee()),\n ),\n )" }, { "identifier": "GlobalStateHandler", "path": "contracts_unified/core/state_handler/global_handler.py", "snippet": "class GlobalStateHandler:\n \"\"\"Global state handler\"\"\"\n\n instrument_size = abi.make(InstrumentListElement).type_spec().byte_length_static()\n max_instrument_count = 80\n\n # NOTE: Most of these methods are not subroutines for performance reasons\n @staticmethod\n def initialize() -> Expr:\n \"\"\"Initialize the global blob\"\"\"\n\n return Pop(App.box_create(Bytes(\"i\"), Int(GlobalStateHandler.instrument_size * GlobalStateHandler.max_instrument_count)))\n\n @staticmethod\n def get_relative_timestamp() -> Expr:\n \"\"\"Gets the relative timestamp\"\"\"\n\n return Global.latest_timestamp() - App.globalGet(KEY_INIT_TIMESTAMP)\n\n @staticmethod\n def set_init_timestamp() -> Expr:\n \"\"\"Sets the initial timestamp\"\"\"\n\n return App.globalPut(KEY_INIT_TIMESTAMP, Global.latest_timestamp())\n\n @staticmethod\n def get_instrument_count() -> Expr:\n \"\"\"Gets the number of instruments\"\"\"\n\n return App.globalGet(KEY_INSTRUMENT_COUNT)\n\n @staticmethod\n def set_instrument_count(instrument_count) -> Expr:\n \"\"\"Sets the number of instruments\"\"\"\n\n return App.globalPut(KEY_INSTRUMENT_COUNT, instrument_count)\n\n @staticmethod\n def get_pricecaster_id() -> Expr:\n \"\"\"Gets the App id of the pricecaster\"\"\"\n\n return App.globalGet(KEY_PRICECASTER_ID)\n\n @staticmethod\n def set_pricecaster_id(pricecaster_id) -> Expr:\n \"\"\"Sets the App id of the pricecaster\"\"\"\n\n return App.globalPut(KEY_PRICECASTER_ID, Btoi(pricecaster_id))\n\n @staticmethod\n def get_wormhole_bridge_id() -> Expr:\n \"\"\"Gets the App id of the wormhole bridge\"\"\"\n\n return App.globalGet(KEY_WORMHOLE_BRIDGE_ID)\n\n @staticmethod\n def set_wormhole_bridge_id(wormhole_bridge_id) -> Expr:\n \"\"\"Sets the App id of the wormhole bridge\"\"\"\n\n return App.globalPut(KEY_WORMHOLE_BRIDGE_ID, Btoi(wormhole_bridge_id))\n\n @staticmethod\n @ABIReturnSubroutine\n def set_address(key, address) -> Expr:\n \"\"\"Sets an address in the global storage checking the length\"\"\"\n\n return Seq(\n Assert(Len(address) == Int(ADDRESS_SIZE)),\n App.globalPut(key, address)\n )\n\n @staticmethod\n def get_signature_validator() -> Expr:\n \"\"\"Checks the address of the signature validator\"\"\"\n\n return App.globalGet(KEY_SIGNATURE_VALIDATOR)\n\n @staticmethod\n def set_signature_validator(signature_validator) -> Expr:\n \"\"\"Sets the address of the signature validator\"\"\"\n\n return cast(Expr, GlobalStateHandler.set_address(KEY_SIGNATURE_VALIDATOR, signature_validator))\n\n @staticmethod\n def get_operator_address() -> Expr:\n \"\"\"Gets the address of the operator\"\"\"\n\n return App.globalGet(KEY_OPERATOR_ADDRESS)\n\n @staticmethod\n def set_operator_address(operator_address) -> Expr:\n \"\"\"Sets the address of the operator\"\"\"\n\n return cast(Expr, GlobalStateHandler.set_address(KEY_OPERATOR_ADDRESS, operator_address))\n\n @staticmethod\n def get_quant_address() -> Expr:\n \"\"\"Gets the quant address\"\"\"\n\n return App.globalGet(KEY_QUANT_ADDRESS)\n\n @staticmethod\n def set_quant_address(quant_address) -> Expr:\n \"\"\"Sets the quant address\"\"\"\n\n return cast(Expr, GlobalStateHandler.set_address(KEY_QUANT_ADDRESS, quant_address))\n\n @staticmethod\n def get_fee_target() -> Expr:\n \"\"\"Gets the fee target address\"\"\"\n\n return App.globalGet(KEY_FEE_TARGET)\n\n @staticmethod\n def set_fee_target(fee_target_address) -> Expr:\n \"\"\"Sets the fee target address\"\"\"\n\n return cast(Expr, GlobalStateHandler.set_address(KEY_FEE_TARGET, fee_target_address))\n\n @staticmethod\n def get_withdraw_buffer() -> Expr:\n \"\"\"Gets the withdraw buffer address\"\"\"\n\n return App.globalGet(KEY_WITHDRAW_BUFFER)\n\n @staticmethod\n def set_withdraw_buffer(withdraw_buffer) -> Expr:\n \"\"\"Sets the withdraw buffer address\"\"\"\n\n return cast(Expr, GlobalStateHandler.set_address(KEY_WITHDRAW_BUFFER, withdraw_buffer))\n\n @staticmethod\n @ABIReturnSubroutine\n def ensure_mbr_fund() -> Expr:\n \"\"\"Ensures the current mbr is lower than the fund\"\"\"\n\n return Assert(MinBalance(Global.current_application_address()) <= App.globalGet(KEY_MBR_FUND))\n\n @staticmethod\n def add_mbr_fund(mbr_fund) -> Expr:\n \"\"\"Increments the mbr fund amount by an amount\"\"\"\n\n return App.globalPut(KEY_MBR_FUND, App.globalGet(KEY_MBR_FUND) + mbr_fund)\n\n @staticmethod\n def get_liquidation_factors() -> Expr:\n \"\"\"Gets the object representing the liquidation factors\"\"\"\n\n return App.globalGet(KEY_LIQUIDATION_FACTORS)\n\n @staticmethod\n def set_liquidation_factors(factors) -> Expr:\n \"\"\"Sets the global liquidation factors\"\"\"\n factors_size = abi.make(LiquidationFactors).type_spec().byte_length_static()\n return Seq(\n Assert(Len(factors) == Int(factors_size)),\n App.globalPut(KEY_LIQUIDATION_FACTORS, factors),\n )\n\n @staticmethod\n @ABIReturnSubroutine\n def get_instrument(\n instrument_id: InstrumentId,\n *,\n output: InstrumentListElement,\n ) -> Expr:\n \"\"\"Get the instrument details for a given instrument ID\"\"\"\n\n return Seq(\n output.decode(App.box_extract(Bytes(\"i\"), instrument_id.get() * Int(GlobalStateHandler.instrument_size), Int(GlobalStateHandler.instrument_size))),\n )\n\n @staticmethod\n def set_instrument(\n instrument_id: InstrumentId,\n new_entry: InstrumentListElement,\n ) -> Expr:\n \"\"\"Set the instrument details for a given instrument ID\"\"\"\n\n return Seq(\n App.box_replace(Bytes(\"i\"), instrument_id.get() * Int(GlobalStateHandler.instrument_size), new_entry.encode()),\n )" }, { "identifier": "Amount", "path": "contracts_unified/library/c3types.py", "snippet": "class SignedInstrumentAmount(abi.NamedTuple):\nclass LiquidationFactors(abi.NamedTuple):\nclass InstrumentListElement(abi.NamedTuple):\nclass UserInstrumentData(abi.NamedTuple):\nclass OnChainOrderData(abi.NamedTuple):\nclass WormholeAddress(abi.NamedTuple):\nclass DecodedWormholePayload(abi.NamedTuple):" }, { "identifier": "UpdateInstrumentInfo", "path": "contracts_unified/library/c3types_server.py", "snippet": "class UpdateInstrumentInfo(abi.NamedTuple):\n \"\"\"Used to create a new instrument\"\"\"\n\n instrument_id: abi.Field[InstrumentId]\n asset_id: abi.Field[AssetId]\n initial_haircut: abi.Field[Ratio]\n initial_margin: abi.Field[Ratio]\n maintenance_haircut: abi.Field[Ratio]\n maintenance_margin: abi.Field[Ratio]\n optimal_utilization: abi.Field[Ratio]\n min_rate: abi.Field[InterestRate]\n opt_rate: abi.Field[InterestRate]\n max_rate: abi.Field[InterestRate]" }, { "identifier": "RATE_ONE", "path": "contracts_unified/library/constants.py", "snippet": "RATE_ONE = 10**12" } ]
from typing import cast from pyteal import ( ABIReturnSubroutine, Assert, Expr, Global, If, InnerTxnBuilder, Int, Seq, Txn, TxnField, TxnType, abi, ) from contracts_unified.core.internal.perform_pool_move import perform_pool_move from contracts_unified.core.internal.setup import setup from contracts_unified.core.state_handler.global_handler import GlobalStateHandler from contracts_unified.library.c3types import ( Amount, AssetId, InstrumentId, InstrumentListElement, InterestRate, Ratio, RelativeTimestamp, ) from contracts_unified.library.c3types_server import UpdateInstrumentInfo from contracts_unified.library.constants import RATE_ONE
5,686
""" Implements Core contract method for adding an instrument. """ def inner_asset_opt_in(asset_id: AssetId) -> Expr: """Inner transaction that opts in to an ASA""" return InnerTxnBuilder.Execute( { TxnField.type_enum: TxnType.AssetTransfer, TxnField.xfer_asset: asset_id.get(), TxnField.asset_receiver: Global.current_application_address(), TxnField.asset_amount: Int(0), TxnField.fee: Int(0), } ) @ABIReturnSubroutine def update_instrument( info: UpdateInstrumentInfo, opup_budget: Amount, ) -> Expr: """Implements the method that adds an instrument to the Core contract storage box. Arguments: info (UpdateInstrumentInfo): Instrument information to add or update. opup_budget (Amount): Additional computation budget to allocate to this transaction. """ abi_zero = abi.Uint64() abi_rate_one = abi.Uint64() abi_zero_address = abi.Address() timestamp = RelativeTimestamp() asset_id = AssetId() initial_haircut = Ratio() initial_margin = Ratio() maintenance_haircut = Ratio() maintenance_margin = Ratio() optimal_utilization = Ratio()
""" Implements Core contract method for adding an instrument. """ def inner_asset_opt_in(asset_id: AssetId) -> Expr: """Inner transaction that opts in to an ASA""" return InnerTxnBuilder.Execute( { TxnField.type_enum: TxnType.AssetTransfer, TxnField.xfer_asset: asset_id.get(), TxnField.asset_receiver: Global.current_application_address(), TxnField.asset_amount: Int(0), TxnField.fee: Int(0), } ) @ABIReturnSubroutine def update_instrument( info: UpdateInstrumentInfo, opup_budget: Amount, ) -> Expr: """Implements the method that adds an instrument to the Core contract storage box. Arguments: info (UpdateInstrumentInfo): Instrument information to add or update. opup_budget (Amount): Additional computation budget to allocate to this transaction. """ abi_zero = abi.Uint64() abi_rate_one = abi.Uint64() abi_zero_address = abi.Address() timestamp = RelativeTimestamp() asset_id = AssetId() initial_haircut = Ratio() initial_margin = Ratio() maintenance_haircut = Ratio() maintenance_margin = Ratio() optimal_utilization = Ratio()
min_rate = InterestRate()
3
2023-11-17 20:54:15+00:00
8k
gunderson-dettmer/CE2OCF
CE2OCF/ce/mocks/objects.py
[ { "identifier": "convert_ce_answers_xml_to_json_string", "path": "CE2OCF/ce/transforms/json.py", "snippet": "def convert_ce_answers_xml_to_json_string(xml_data: str | ET.ElementTree) -> str:\n \"\"\"\n Given CE XML answer export, convert it to JSON format that the API generates.\n\n :param xml_data: Xml String or ElementTree\n :return: json string\n \"\"\"\n\n if isinstance(xml_data, ET.ElementTree):\n root = xml_data\n else:\n element = ET.fromstring(xml_data)\n root = ET.ElementTree(element)\n\n # Initializing an empty list to hold all variable dictionaries\n json_list = []\n\n # Iterating over all Variable elements in the XML\n for variable in root.findall(\"{http://schemas.business-integrity.com/dealbuilder/2006/answers}Variable\"):\n # Creating a dictionary for each Variable element\n\n values = variable.findall(\"{http://schemas.business-integrity.com/dealbuilder/2006/answers}Value\")\n\n variable_dict = {\n \"name\": variable.get(\"Name\"),\n \"repetition\": variable.get(\"RepeatContext\", None),\n \"values\": [str(value.text) for value in values],\n }\n\n # Retrieving all Value elements within each Variable element and storing their text in a list\n\n # Adding the dictionary to the list\n json_list.append(variable_dict)\n\n # Returning the list as a JSON string\n return json.dumps(json_list, indent=2)" }, { "identifier": "convert_pydantic_to_xml_elem", "path": "CE2OCF/ce/transforms/xml.py", "snippet": "def convert_pydantic_to_xml_elem(\n elem,\n counter: None | int = None,\n repeat_fields: list[str] = [],\n override_repeat_context: str | None = None,\n) -> list[ET.Element]:\n # logger.debug(f\"convert_pydantic_to_xml_elem() for type {type(elem)}: {elem}\")\n var_elems = []\n for field_name in elem.__fields__:\n field_value = getattr(elem, field_name)\n var_elems.append(\n create_variable_element(field_name, field_value, counter, repeat_fields, override_repeat_context)\n )\n return var_elems" }, { "identifier": "xml_elements_to_ce_xml_tree", "path": "CE2OCF/ce/transforms/xml.py", "snippet": "def xml_elements_to_ce_xml_tree(xml_elems: list[ET.Element]):\n\n assert isinstance(xml_elems, list)\n root = ET.Element(\"Session\")\n root.attrib[\"xmlns\"] = \"http://schemas.business-integrity.com/dealbuilder/2006/answers\"\n for var_elem in xml_elems:\n assert isinstance(var_elem, ET.Element)\n root.append(var_elem)\n\n # Add parameter tags\n updated_tree = add_param_elem_to_tree(root)\n\n return ET.tostring(updated_tree, encoding=\"unicode\")" }, { "identifier": "mock_company", "path": "CE2OCF/ocf/mocks/company.py", "snippet": "def mock_company() -> Company:\n return Company(\n CompanyName=fake.company(),\n CompanyShortName=fake.company_suffix(),\n PricePerShare=round(random.uniform(0.10, 3.00), 2),\n DateBlank=fake.date(),\n ClientMatterNumber=fake.random_int(min=1, max=999),\n CompanyPhoneNumber=fake_phone_number(),\n CompanyCity=fake.city(),\n CompanyCounty=fake.state_abbr(include_territories=False, include_freely_associated_states=False),\n CompanyZip=fake.zipcode(),\n CompanyFaxNumber=fake_phone_number(),\n CFOTreasurer=fake.name(),\n CEO=fake.name(),\n FFPreferred=fake.boolean(),\n FFPreferredPricePerShare=round(random.uniform(0.10, 3.00), 2),\n FFPreferredSharesAuthorized=fake.random_int(min=1000, max=9999),\n FirstDateWagesPaid=fake.date(),\n Form941Or944=fake.boolean(),\n GDIncorporator=fake.boolean(),\n GDOffice=random.choice(list(CommonCityEnum)),\n GoverningLaw=fake.state_abbr(include_territories=False, include_freely_associated_states=False),\n IPFormsOffice=random.choice(list(CommonCityEnum)),\n NumberAnticipatedEmployees=fake.random_int(min=10, max=100),\n NumberDirectors=fake.random_int(min=1, max=10),\n NumberStockholders=fake.random_int(min=1, max=10),\n OperationState=fake.state_abbr(include_territories=False, include_freely_associated_states=False),\n ParValue=float(fake.random_number(digits=3)) / 100000,\n President=fake.name(),\n PrincipalBusinessActivity=fake.sentence(),\n ResponsibleParty=fake.name(),\n ResponsiblePartySSN=fake.ssn(),\n ResponsiblePartyTitle=fake.job(),\n RASelect=random.choice(list(RegisteredAgentsEnum)),\n Secretary=fake.name(),\n SharesAuthorized=fake.random_int(min=1000, max=9999),\n SharesReservedStockPlan=fake.random_int(min=1000, max=9999),\n SoleIncorporator=fake.name(),\n SOPYear=fake.year(),\n StockPlan=fake.boolean(),\n DescriptionServicesProvided=fake.sentence(),\n CompanyState=fake.state_abbr(include_territories=False, include_freely_associated_states=False),\n CompanyStreet=fake.street_address(),\n EDGAR=fake.random_int(),\n EIN=fake.ssn(),\n )" }, { "identifier": "mock_director", "path": "CE2OCF/ocf/mocks/officers.py", "snippet": "def mock_director() -> Director:\n return Director(DirectorName=fake.name())" }, { "identifier": "mock_stockholder", "path": "CE2OCF/ocf/mocks/stockholders.py", "snippet": "def mock_stockholder() -> Stockholder:\n return Stockholder(\n id=uuid.uuid4().__str__(),\n DoubleTrigger=random.choice(list(DoubleTriggerTypesEnum)),\n DescriptionAssignedTechnology=fake.sentence(),\n BroadDescriptionAssignedTechnology=fake.sentence(),\n EmailAddress=fake.email(),\n FFPreferredShares=fake.random_int(min=0, max=1000),\n PaidWith=random.choice([el for el in PaidWithOptionsEnum]),\n PhoneNumber=fake_phone_number(),\n SingleTrigger=random.choice(list(SingleTriggerTypesEnum)),\n Shares=fake.random_int(min=0, max=1000),\n SSN=fake.ssn(),\n Stockholder=fake.name(),\n StockholderCity=fake.city(),\n StockholderState=fake.state_abbr(include_territories=False, include_freely_associated_states=False),\n StockholderStreet=fake.street_address(),\n StockholderZip=fake.zipcode(),\n VCD=fake.word(),\n Vesting=random.choice(list(VestingTypesEnum)),\n )" }, { "identifier": "sum_shares", "path": "CE2OCF/ocf/mocks/stockholders.py", "snippet": "def sum_shares(stockholder_list: list[Stockholder]) -> tuple[int, int]:\n\n total_FFPreferredShares = 0\n total_Shares = 0\n\n for stockholder in stockholder_list:\n if stockholder.FFPreferredShares is not None:\n total_FFPreferredShares += stockholder.FFPreferredShares\n if stockholder.Shares is not None:\n total_Shares += stockholder.Shares # if Shares are floats, replace with `float(stockholder.Shares)`\n\n return total_FFPreferredShares, total_Shares" }, { "identifier": "GD_HUMAN_REPEAT_SELECTIONS_TO_VAR_NAMES", "path": "CE2OCF/ocf/postprocessors.py", "snippet": "GD_HUMAN_REPEAT_SELECTIONS_TO_VAR_NAMES = {\n \"Paid With\": \"PaidWith\",\n \"Genus-level Description of Company Project\": \"BroadDescriptionAssignedTechnology\",\n \"Specific Description of Assigned Technology\": \"DescriptionAssignedTechnology\",\n \"Vesting Schedule\": \"Vesting\",\n \"Vesting Commencement Date\": \"VCD\",\n \"Single Trigger Acceleration Provision\": \"SingleTrigger\",\n \"Double Trigger Acceleration Provision\": \"DoubleTrigger\",\n}" }, { "identifier": "RepeatableFields", "path": "CE2OCF/types/enums.py", "snippet": "class RepeatableFields(enum.Enum):\n \"\"\"\n Some stockholder fields can be set on the first stockholder and then repeated for all four.\n This is an enum that covers all such fields.\n \"\"\"\n\n PAID_WITH = \"PaidWith\"\n VESTING_SCHEDULE = \"Vesting\"\n VCD = \"VCD\"\n SINGLE_TRIGGER_ACCEL = \"SingleTrigger\"\n DOUBLE_TRIGGER = \"DoubleTrigger\"\n COMPANY_DESCRIPTION = \"BroadDescriptionAssignedTechnology\" # Not sure why each shareholder has a broad and\n # narrow description, honestly.\n ASSIGNED_TECHNOLOGY = \"DescriptionAssignedTechnology\"\n # ALL = \"All\" # If this is selected, all values of this enum are assigned as <Value> child tags to applicable\n # variable... so it's not actually a value" }, { "identifier": "TransferRestrictionEnum", "path": "CE2OCF/types/enums.py", "snippet": "class TransferRestrictionEnum(str, enum.Enum):\n ALL_STOCK = \"All Stock\"\n COMMON_STOCK = (\n \"Common Stock (will exclude Common stock issued upon conversion of Preferred Stock, \"\n \"and if applicable, Founder Preferred Stock) \"\n )" }, { "identifier": "BylawVars", "path": "CE2OCF/types/models.py", "snippet": "class BylawVars(BaseModel):\n RoFR: bool # Include a standard Right of First Refusal in the bylaws?\n QuasiCA: bool # any possibility that the Company is or could become a Quasi-California company?\n TransferRestrictions: bool # Include Transfer Restrictions in the Bylaws?\n TransferRestrictionsLanguage: bool # Include lang ref these transfer restrictions in the Founder SPA?\n TransferRestrictionDate: bool # Include exact date to impose transfer restrictions on Shares?\n TransferRestrictionStock: str\n TransferDate: Optional[str] # This is only included if TransferRestrictionDate is True and is format YYYY-MM-DD\n DirectListingTransfer: bool # Do the stock transfer restrictions terminate upon a direct listing?\n # Do the transfer restrictions apply to common stock or all stock?" }, { "identifier": "Company", "path": "CE2OCF/types/models.py", "snippet": "class Company(BaseModel):\n CompanyName: str\n CompanyShortName: str\n PricePerShare: float = Field(..., ge=0.10, le=3.00, decimals=2)\n DateBlank: str\n ClientMatterNumber: int\n CompanyPhoneNumber: str\n CompanyCity: str\n CompanyCounty: str\n CompanyZip: str\n CompanyFaxNumber: str\n CFOTreasurer: str\n CEO: str\n FFPreferred: bool # Should we create and authorize Founder Preferred stock?\n FFPreferredPricePerShare: Optional[float] = None # IF FFPreferred is true, what's price per share\n FFPreferredSharesAuthorized: Optional[int] = None # IF FFPreferred is true, what's total # of shares\n FirstDateWagesPaid: str # Format YYYY-MM-DD\n Form941Or944: bool\n GDIncorporator: bool # Who was the GD incorporator (name)\n GDOffice: str # Which office was this client from\n GoverningLaw: str # Which U.S. State's law governs\n IPFormsOffice: str\n NumberAnticipatedEmployees: int\n NumberDirectors: int # How many directors are there? We'll need to generate a director object for each\n NumberStockholders: int # How many stockholders should we generate - generate objets for as many as is specified\n OperationState: str # What state does the company operate in primarily?\n ParValue: float # What is the Par Value for the Stock? Typically this is $0.0001 or $0.001\n President: str # Who is the president of the company?\n PrincipalBusinessActivity: str # What does the company do?\n ResponsibleParty: str # What is the name of the person responsible for the companyt\n ResponsiblePartySSN: str # What is the SSN of the person responsible for company\n ResponsiblePartyTitle: str # Which officer is responsible for the company incorporation\n RASelect: RegisteredAgentsEnum\n Secretary: str # Who is the company secretary?\n SharesAuthorized: int # This must be greater than the options, common stock and founder preferred.\n SharesReservedStockPlan: int # IF StockPlan is true, how many share are reserved?\n SoleIncorporator: str\n SOPYear: str # If StockPlan is true, what is the year YYYY-MM-DD\n StockPlan: bool\n DescriptionServicesProvided: str\n CompanyState: str\n CompanyStreet: str\n EDGAR: str # EDGAR ID #\n EIN: str # Company EIN for EDGAR filings" }, { "identifier": "Director", "path": "CE2OCF/types/models.py", "snippet": "class Director(BaseModel):\n DirectorName: str" }, { "identifier": "FormVars", "path": "CE2OCF/types/models.py", "snippet": "class FormVars(BaseModel):\n StockholderInfoSame: list[\n str\n ] # This is a weird field... if this is present, has a comma separated lits of var names to automatically use\n # the _S1 value.\n UsingTopTemplateFlag: bool\n UsingTopTemplateFlagFO_IAL: str # Not sure what this is\n UsingTopTemplateFlag_IA: str # Not sure what this is\n Waiver220: bool # Include waiver of statutory information rights under DGCL Section 220?\n IndemnificationAgrIncluded: bool # include an indemnification agreement?\n EmployeeNoncompete: bool" }, { "identifier": "Stockholder", "path": "CE2OCF/types/models.py", "snippet": "class Stockholder(BaseModel):\n id: str\n DoubleTrigger: DoubleTriggerTypesEnum\n # our answer will appear below the general description entered above. If no additional language is necessary,\n # skip this field\n DescriptionAssignedTechnology: Optional[str]\n # The description should provide clarity regarding exactly what property is being transferred while being neither\n # too narrow nor too broad.\n BroadDescriptionAssignedTechnology: str\n EmailAddress: str\n FFPreferredShares: Optional[\n int\n ] = None # If founder preferred is authorized for company AND we want to give this stockholder some,\n # how many shares do they get?\n PaidWith: PaidWithOptionsEnum\n PhoneNumber: str\n SingleTrigger: SingleTriggerTypesEnum\n Shares: int\n SSN: str\n Stockholder: str = Field(\n default_factory=lambda: uuid.uuid4().__str__()\n ) # Name of stockholder goes here BUT we're using uuid to be able filter objs by name and have guaranteed\n # uniques. Required for tests.\n StockholderCity: str\n StockholderState: str\n StockholderStreet: str\n StockholderZip: str\n VCD: str\n Vesting: VestingTypesEnum" }, { "identifier": "logger", "path": "CE2OCF/utils/log_utils.py", "snippet": "" } ]
import itertools import random import xml.etree.ElementTree as ET from CE2OCF.ce.transforms.json import ( convert_ce_answers_xml_to_json_string, ) from CE2OCF.ce.transforms.xml import ( convert_pydantic_to_xml_elem, xml_elements_to_ce_xml_tree, ) from CE2OCF.ocf.mocks.company import mock_company from CE2OCF.ocf.mocks.officers import mock_director from CE2OCF.ocf.mocks.stockholders import mock_stockholder, sum_shares from CE2OCF.ocf.postprocessors import ( GD_HUMAN_REPEAT_SELECTIONS_TO_VAR_NAMES, ) from CE2OCF.types.enums import RepeatableFields, TransferRestrictionEnum from CE2OCF.types.models import ( BylawVars, Company, Director, FormVars, Stockholder, ) from CE2OCF.utils.log_utils import logger
4,036
from __future__ import annotations def mock_formvars( override_repeated_fields: list[RepeatableFields] | None = None, use_gunderson_repeat_names: bool = True ) -> FormVars: """ Args: override_repeated_fields: use_gunderson_repeat_names: Our repeat variable values in the CE jsons are different than the actual variable names that are repeated, so you need a mapping dict Returns: """ logger.debug(f"mock_formvars started with override_repeated_fields: {override_repeated_fields}") if override_repeated_fields is None: logger.debug("unique_repeatable_fields is None... prepare random sample...") unique_repeatable_fields = random.sample( [el.value for el in RepeatableFields], k=random.randint(0, len(RepeatableFields)), ) else: logger.debug("unique_repeatable_fields is NOT None...") unique_repeatable_fields = [el.value for el in override_repeated_fields] if use_gunderson_repeat_names: var_name_to_template_val_lookup = {v: k for k, v in GD_HUMAN_REPEAT_SELECTIONS_TO_VAR_NAMES.items()} unique_repeatable_fields = [var_name_to_template_val_lookup[v] for v in unique_repeatable_fields] logger.debug(f"unique_repeatable_fields: {unique_repeatable_fields} {[type(v) for v in unique_repeatable_fields]}") return FormVars( StockholderInfoSame=unique_repeatable_fields, BroadDescriptionAssignedTechnology_S1="Some Technology", UsingTopTemplateFlag=True, UsingTopTemplateFlagFO_IAL="Yes", UsingTopTemplateFlag_IA="Yes", Waiver220=True, IndemnificationAgrIncluded=True, EmployeeNoncompete=True, ) def mock_bylawvars() -> BylawVars: return BylawVars( RoFR=True, QuasiCA=True, TransferRestrictions=True, TransferRestrictionsLanguage=True, TransferRestrictionDate=True,
from __future__ import annotations def mock_formvars( override_repeated_fields: list[RepeatableFields] | None = None, use_gunderson_repeat_names: bool = True ) -> FormVars: """ Args: override_repeated_fields: use_gunderson_repeat_names: Our repeat variable values in the CE jsons are different than the actual variable names that are repeated, so you need a mapping dict Returns: """ logger.debug(f"mock_formvars started with override_repeated_fields: {override_repeated_fields}") if override_repeated_fields is None: logger.debug("unique_repeatable_fields is None... prepare random sample...") unique_repeatable_fields = random.sample( [el.value for el in RepeatableFields], k=random.randint(0, len(RepeatableFields)), ) else: logger.debug("unique_repeatable_fields is NOT None...") unique_repeatable_fields = [el.value for el in override_repeated_fields] if use_gunderson_repeat_names: var_name_to_template_val_lookup = {v: k for k, v in GD_HUMAN_REPEAT_SELECTIONS_TO_VAR_NAMES.items()} unique_repeatable_fields = [var_name_to_template_val_lookup[v] for v in unique_repeatable_fields] logger.debug(f"unique_repeatable_fields: {unique_repeatable_fields} {[type(v) for v in unique_repeatable_fields]}") return FormVars( StockholderInfoSame=unique_repeatable_fields, BroadDescriptionAssignedTechnology_S1="Some Technology", UsingTopTemplateFlag=True, UsingTopTemplateFlagFO_IAL="Yes", UsingTopTemplateFlag_IA="Yes", Waiver220=True, IndemnificationAgrIncluded=True, EmployeeNoncompete=True, ) def mock_bylawvars() -> BylawVars: return BylawVars( RoFR=True, QuasiCA=True, TransferRestrictions=True, TransferRestrictionsLanguage=True, TransferRestrictionDate=True,
TransferRestrictionStock=TransferRestrictionEnum.ALL_STOCK,
9
2023-11-13 15:50:53+00:00
8k
ehennenfent/live_illustrate
live_illustrate/__main__.py
[ { "identifier": "ImageRenderer", "path": "live_illustrate/render.py", "snippet": "class ImageRenderer(AsyncThread):\n def __init__(self, model: str, image_size: str, image_quality: str, image_style: str) -> None:\n super().__init__(\"ImageRenderer\")\n self.openai_client: OpenAI = OpenAI()\n self.model: str = model\n self.size: str = image_size\n self.image_quality: str = image_quality\n self.image_style: str = image_style\n\n def work(self, summary: Summary) -> Image | None:\n \"\"\"Sends the text to Dall-e, spits out an image URL\"\"\"\n start = datetime.now()\n rendered = self.openai_client.images.generate(\n model=self.model,\n prompt=\"\\n\".join((summary.summary, *EXTRA)),\n size=self.size, # type: ignore[arg-type]\n quality=self.image_quality, # type: ignore[arg-type]\n style=self.image_style, # type: ignore[arg-type]\n n=1,\n ).data[0]\n self.logger.info(\"Rendered in %s\", datetime.now() - start)\n return Image.from_summary(summary, rendered.url) if rendered.url is not None else None" }, { "identifier": "ImageServer", "path": "live_illustrate/serve.py", "snippet": "class ImageServer:\n def __init__(self, host: str, port: int, default_image: str = \"https://placehold.co/1792x1024/png\") -> None:\n self.host: str = host\n self.port: int = port\n\n self.images: t.List[str] = [default_image]\n\n self.app = Flask(__name__)\n\n self.app.add_url_rule(\"/\", \"index\", self.serve_index)\n self.app.add_url_rule(\"/image/<index>\", \"image\", self.serve_image_tag)\n\n def serve_index(self) -> Response:\n return send_from_directory(\"templates\", \"index.html\")\n\n def serve_image_tag(self, index: str) -> str:\n \"\"\"Sneaky image handler that counts up by index until we get to the most recent image,\n using HTMX to re-request the endpoint every few seconds.\"\"\"\n my_index: int = int(index) if index.isdigit() else -1\n next_index: int = min(max(0, my_index + 1), len(self.images) - 1)\n return IMAGE_HTML.format(index=next_index, image_url=self.images[next_index])\n\n def start(self) -> None:\n self.app.run(host=self.host, port=self.port)\n\n def update_image(self, image: Image) -> None:\n self.images.append(image.image_url)" }, { "identifier": "SessionData", "path": "live_illustrate/session_data.py", "snippet": "class SessionData:\n \"\"\"Creates a data/<timestamp> folder for the session and stores images, summaries, and transcripts\"\"\"\n\n def __init__(self, data_dir: Path, echo: bool = True) -> None:\n self.start_time = datetime.now()\n self.logger = logging.getLogger(\"SessionData\")\n\n self.data_dir: Path = data_dir.joinpath(self.start_time.strftime(\"%Y_%m_%d-%H_%M_%S\"))\n self.echo: bool = echo\n\n self.discord_webhook: str | None = os.getenv(DISCORD_WEBHOOK)\n if self.discord_webhook is not None:\n self.logger.info(\"Discord upload is enabled\")\n\n def save_image(self, image: Image) -> None:\n try:\n r = requests.get((image.image_url), stream=True)\n if r.status_code == 200:\n fname = self.data_dir.joinpath(f\"{self._time_since}.png\")\n with open(fname, \"wb\") as outf:\n for chunk in r:\n outf.write(chunk)\n except Exception as e:\n self.logger.error(\"failed to save image to file: %s\", e)\n else:\n try:\n if self.discord_webhook is not None:\n with open(fname, \"rb\") as image_file:\n SyncWebhook.from_url(self.discord_webhook).send(\n file=File(image_file, description=image.summary[:1023])\n )\n except Exception as e:\n self.logger.error(\"failed to send image to discord: %s\", e)\n\n def save_summary(self, summary: Summary) -> None:\n \"\"\"saves the provided text to its own file\"\"\"\n try:\n with open(self.data_dir.joinpath(f\"{self._time_since}.txt\"), \"w\") as summaryf:\n print(summary.summary, file=summaryf)\n except Exception as e:\n self.logger.error(\"failed to write summary to file: %s\", e)\n\n def save_transcription(self, transcription: Transcription) -> None:\n \"\"\"appends the provided text to the transcript file\"\"\"\n try:\n with open(self.data_dir.joinpath(\"transcript.txt\"), \"a\") as transf:\n if self.echo:\n print(self._time_since, \">\", transcription.transcription)\n print(self._time_since, \">\", transcription.transcription, file=transf, flush=True)\n except Exception as e:\n self.logger.error(\"failed to write transcript to file: %s\", e)\n\n @property\n def _time_since(self) -> str:\n delta = datetime.now() - self.start_time\n minutes, seconds = divmod(delta.seconds, 60)\n hours, minutes = divmod(minutes, 60)\n\n return f\"{hours}h_{minutes:02}m_{seconds:02}s\"\n\n def __enter__(self) -> \"SessionData\": # create the directories upon entry, not upon init\n if not (parent := self.data_dir.parent).exists():\n parent.mkdir()\n self.data_dir.mkdir()\n return self\n\n def __exit__(self, *exc) -> None:\n pass" }, { "identifier": "TextSummarizer", "path": "live_illustrate/summarize.py", "snippet": "class TextSummarizer(AsyncThread):\n def __init__(self, model: str) -> None:\n super().__init__(\"TextSummarizer\")\n self.openai_client: OpenAI = OpenAI()\n self.model: str = model\n\n def work(self, transcription: Transcription) -> Summary | None:\n \"\"\"Sends the big buffer of provided text to ChatGPT, returns bullets describing the setting\"\"\"\n text = transcription.transcription\n if (token_count := num_tokens_from_string(text)) == 0:\n self.logger.info(\"No tokens in transcription, skipping summarization\")\n return None\n\n start = datetime.now()\n response = self.openai_client.chat.completions.create(\n model=self.model,\n messages=[\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": text},\n ],\n )\n self.logger.info(\"Summarized %d tokens in %s\", token_count, datetime.now() - start)\n if response.choices:\n return [\n Summary.from_transcription(transcription, content.strip())\n if (content := choice.message.content)\n else None\n for choice in response.choices\n ][-1]\n return None" }, { "identifier": "TextBuffer", "path": "live_illustrate/text_buffer.py", "snippet": "class TextBuffer(AsyncThread):\n def __init__(self, wait_minutes: float, max_context: int, persistence: float = 1.0) -> None:\n super().__init__(\"TextBuffer\")\n self.buffer: t.List[Transcription] = []\n self.wait_seconds: int = int(wait_minutes * 60)\n self.max_context: int = max_context\n self.persistence: float = persistence\n\n def work(self, next_transcription: Transcription) -> int:\n \"\"\"Very simple, just puts the text in the buffer. The real work is done in buffer_forever.\"\"\"\n self.buffer.append(next_transcription)\n return len(self.buffer)\n\n def get_context(self) -> Transcription:\n \"\"\"Grabs the last max_context tokens from the buffer. If persistence < 1, trims it down\n to at most persistence * 100 %\"\"\"\n as_text = [t.transcription for t in self.buffer]\n context = Transcription(\"\\n\".join(get_last_n_tokens(as_text, self.max_context)))\n if self.persistence < 1.0:\n self.buffer = [\n Transcription(line)\n for line in get_last_n_tokens(\n as_text, int(self.persistence * num_tokens_from_string(\"\\n\".join(as_text)))\n )\n ]\n return context\n\n def buffer_forever(self, callback: t.Callable[[Transcription], t.Any]) -> None:\n \"\"\"every wait_seconds, grabs the last max_context tokens and sends them off to the\n summarizer (via `callback`)\"\"\"\n last_run = datetime.now()\n while True:\n if (datetime.now() - last_run).seconds > self.wait_seconds:\n last_run = datetime.now()\n callback(self.get_context())\n sleep(1)" }, { "identifier": "AudioTranscriber", "path": "live_illustrate/transcribe.py", "snippet": "class AudioTranscriber(AsyncThread):\n def __init__(self, model: str) -> None:\n super().__init__(\"AudioTranscriber\")\n\n self.recorder = sr.Recognizer()\n self.source = sr.Microphone(sample_rate=SAMPLE_RATE)\n self.model = model\n\n self.recorder.dynamic_energy_threshold = DYNAMIC_ENERGY_THRESHOLD\n\n def work(self, _, audio_data) -> Transcription:\n \"\"\"Passes audio data to whisper, spits text back out\"\"\"\n return Transcription(self.recorder.recognize_whisper(audio_data, model=self.model).strip())\n\n def start(self, callback: t.Callable[[str], None]) -> None:\n with self.source:\n self.recorder.adjust_for_ambient_noise(self.source)\n # This creates a separate thread for the audio recording,\n # but it's non-blocking, so we just let it live here\n self.recorder.listen_in_background(self.source, self.send)\n\n super().start(callback)" }, { "identifier": "Image", "path": "live_illustrate/util.py", "snippet": "class Image(Summary):\n image_url: str\n\n @classmethod\n def from_summary(cls, summary: Summary, image_url: str) -> \"Image\":\n return cls(summary.transcription, summary.summary, image_url)" }, { "identifier": "Summary", "path": "live_illustrate/util.py", "snippet": "class Summary(Transcription):\n summary: str\n\n @classmethod\n def from_transcription(cls, transcription: Transcription, summary: str) -> \"Summary\":\n return cls(transcription.transcription, summary)" }, { "identifier": "Transcription", "path": "live_illustrate/util.py", "snippet": "class Transcription:\n transcription: str" }, { "identifier": "is_transcription_interesting", "path": "live_illustrate/util.py", "snippet": "def is_transcription_interesting(transcription: Transcription) -> bool:\n \"\"\"If Whisper doesn't hear anything, it will sometimes emit predicatble nonsense.\"\"\"\n\n # Sometimes we just get a sequnece of dots and spaces.\n is_not_empty = len(transcription.transcription.replace(\".\", \"\").replace(\" \", \"\").strip()) > 0\n\n # Sometimes we get a phrase from TRANSCRIPTION_HALLUCINATIONS (see above)\n is_not_hallucination = all(\n len(transcription.transcription.replace(maybe_hallucination, \"\").replace(\" \", \"\").strip()) > 0\n for maybe_hallucination in TRANSCRIPTION_HALLUCINATIONS\n )\n\n return is_not_empty and is_not_hallucination" } ]
import argparse import logging from pathlib import Path from threading import Thread from time import sleep from webbrowser import open_new_tab from dotenv import load_dotenv from .render import ImageRenderer from .serve import ImageServer from .session_data import SessionData from .summarize import TextSummarizer from .text_buffer import TextBuffer from .transcribe import AudioTranscriber from .util import Image, Summary, Transcription, is_transcription_interesting
3,680
load_dotenv() DEFAULT_DATA_DIR = Path(__file__).parent.parent.joinpath("data") def get_args() -> argparse.Namespace: parser = argparse.ArgumentParser("Automatic live illustration for table-top RPGs") parser.add_argument( "--audio_model", default="medium.en", help="Whisper model to use for audio transcription", choices=["tiny.en", "base.en", "small.en", "medium.en", "large", "large-v2", "large-v3"], ) parser.add_argument( "--wait_minutes", default=7.5, type=float, help="How frequently to summarize the conversation and generate an image", ) parser.add_argument( "--max_context", default=2000, # very roughly ten minutes or so? type=int, help="Maximum number of tokens to summarize from the conversations", ) parser.add_argument( "--summarize_model", default="gpt-3.5-turbo", help="LLM to use for summarizing transcription", choices=["gpt-3.5-turbo", "gpt-4"], ) parser.add_argument( "--image_model", default="dall-e-3", help="Diffusion model to use for generating image", choices=["dall-e-3", "dall-e-2"], ) parser.add_argument( "--image_size", default="1792x1024", help="Size of image to render (smaller is cheaper)", choices=["1792x1024", "1024x1792", "1024x1024", "512x512", "256x256"], ) parser.add_argument( "--image_quality", default="standard", help="How fancy of an image to render", choices=["standard", "hd"], ) parser.add_argument( "--image_style", default="vivid", help="How stylized of an image to render", choices=["vivid", "natural"], ) parser.add_argument( "--server_host", default="0.0.0.0", help="Address to bind web server", ) parser.add_argument( "--server_port", default=8080, type=int, help="Port to serve HTML viewer on", ) parser.add_argument( "--open", action="store_true", help="Automatically open a browser tab for the rendered images", ) parser.add_argument( "--persistence_of_memory", default=0.2, # Expressed as a fraction of the total buffered transcription type=float, help="How much of the previous transcription to retain after generating each summary. 0 - 1.0", ) parser.add_argument( "-v", "--verbose", action="count", default=0, ) return parser.parse_args() def main() -> None: args = get_args() logging.basicConfig(format="%(name)s: %(message)s", level=logging.DEBUG if args.verbose > 0 else logging.INFO) # tweak loggers for client libraries logging.getLogger("httpx").setLevel(logging.INFO if args.verbose > 0 else logging.WARNING) # used by OpenAI logging.getLogger("requests").setLevel(logging.INFO if args.verbose > 0 else logging.WARNING) logging.getLogger("werkzeug").setLevel(logging.INFO if args.verbose > 0 else logging.WARNING) # flask # create each of our thread objects with the apppropriate command line args transcriber = AudioTranscriber(model=args.audio_model) buffer = TextBuffer( wait_minutes=args.wait_minutes, max_context=args.max_context, persistence=args.persistence_of_memory ) summarizer = TextSummarizer(model=args.summarize_model) renderer = ImageRenderer( model=args.image_model, image_size=args.image_size, image_quality=args.image_quality, image_style=args.image_style, ) server = ImageServer( host=args.server_host, port=args.server_port, default_image=f"https://placehold.co/{args.image_size}/png" ) with SessionData(DEFAULT_DATA_DIR, echo=True) as session_data: # wire up some callbacks to save the intermediate data and forward it along
load_dotenv() DEFAULT_DATA_DIR = Path(__file__).parent.parent.joinpath("data") def get_args() -> argparse.Namespace: parser = argparse.ArgumentParser("Automatic live illustration for table-top RPGs") parser.add_argument( "--audio_model", default="medium.en", help="Whisper model to use for audio transcription", choices=["tiny.en", "base.en", "small.en", "medium.en", "large", "large-v2", "large-v3"], ) parser.add_argument( "--wait_minutes", default=7.5, type=float, help="How frequently to summarize the conversation and generate an image", ) parser.add_argument( "--max_context", default=2000, # very roughly ten minutes or so? type=int, help="Maximum number of tokens to summarize from the conversations", ) parser.add_argument( "--summarize_model", default="gpt-3.5-turbo", help="LLM to use for summarizing transcription", choices=["gpt-3.5-turbo", "gpt-4"], ) parser.add_argument( "--image_model", default="dall-e-3", help="Diffusion model to use for generating image", choices=["dall-e-3", "dall-e-2"], ) parser.add_argument( "--image_size", default="1792x1024", help="Size of image to render (smaller is cheaper)", choices=["1792x1024", "1024x1792", "1024x1024", "512x512", "256x256"], ) parser.add_argument( "--image_quality", default="standard", help="How fancy of an image to render", choices=["standard", "hd"], ) parser.add_argument( "--image_style", default="vivid", help="How stylized of an image to render", choices=["vivid", "natural"], ) parser.add_argument( "--server_host", default="0.0.0.0", help="Address to bind web server", ) parser.add_argument( "--server_port", default=8080, type=int, help="Port to serve HTML viewer on", ) parser.add_argument( "--open", action="store_true", help="Automatically open a browser tab for the rendered images", ) parser.add_argument( "--persistence_of_memory", default=0.2, # Expressed as a fraction of the total buffered transcription type=float, help="How much of the previous transcription to retain after generating each summary. 0 - 1.0", ) parser.add_argument( "-v", "--verbose", action="count", default=0, ) return parser.parse_args() def main() -> None: args = get_args() logging.basicConfig(format="%(name)s: %(message)s", level=logging.DEBUG if args.verbose > 0 else logging.INFO) # tweak loggers for client libraries logging.getLogger("httpx").setLevel(logging.INFO if args.verbose > 0 else logging.WARNING) # used by OpenAI logging.getLogger("requests").setLevel(logging.INFO if args.verbose > 0 else logging.WARNING) logging.getLogger("werkzeug").setLevel(logging.INFO if args.verbose > 0 else logging.WARNING) # flask # create each of our thread objects with the apppropriate command line args transcriber = AudioTranscriber(model=args.audio_model) buffer = TextBuffer( wait_minutes=args.wait_minutes, max_context=args.max_context, persistence=args.persistence_of_memory ) summarizer = TextSummarizer(model=args.summarize_model) renderer = ImageRenderer( model=args.image_model, image_size=args.image_size, image_quality=args.image_quality, image_style=args.image_style, ) server = ImageServer( host=args.server_host, port=args.server_port, default_image=f"https://placehold.co/{args.image_size}/png" ) with SessionData(DEFAULT_DATA_DIR, echo=True) as session_data: # wire up some callbacks to save the intermediate data and forward it along
def on_text_transcribed(transcription: Transcription) -> None:
8
2023-11-18 05:42:54+00:00
8k
cyberark/ark-sdk-python
ark_sdk_python/models/actions/services/ark_dpa_exec_action_consts.py
[ { "identifier": "ArkModel", "path": "ark_sdk_python/models/ark_model.py", "snippet": "class ArkModel(BaseModel):\n class Config:\n allow_population_by_field_name = True" }, { "identifier": "ArkServiceActionDefinition", "path": "ark_sdk_python/models/actions/ark_service_action_definition.py", "snippet": "class ArkServiceActionDefinition(ArkModel):\n action_name: str = Field(description='Action name to be used in the cli commands')\n schemas: Optional[Dict[str, Optional[Type[ArkModel]]]] = Field(description='Schemas for different cli actions for the definition')\n defaults: Optional[Dict[str, Dict[str, Any]]] = Field(description='Defaults for the action schemas parameters')\n async_actions: Optional[List[str]] = Field(description='List of async actions as part of the schemas')\n subactions: Optional[List['ArkServiceActionDefinition']] = Field(description='Subactions to this action')" }, { "identifier": "ArkDPACommitPolicies", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_commit_policies.py", "snippet": "class ArkDPACommitPolicies(ArkModel):\n names: Optional[List[str]] = Field(\n description='Policy names to commit from the workspace to the remote, if not given, choices will be prompted'\n )\n all: bool = Field(description='Whether to commit all locally edited policies', default=False)" }, { "identifier": "ArkDPAEditPolicies", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_edit_policies.py", "snippet": "class ArkDPAEditPolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policies to edit from the workspace, if not given, choices will be prompted')" }, { "identifier": "ArkDPAGetPoliciesStatus", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_get_policies_status.py", "snippet": "class ArkDPAGetPoliciesStatus(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to show status on, if not given, shows status on all policies')" }, { "identifier": "ArkDPALoadPolicies", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_load_policies.py", "snippet": "class ArkDPALoadPolicies(ArkModel):\n override: bool = Field(description='Whether to override existing policies', default=False)" }, { "identifier": "ArkDPAPoliciesDiff", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_policies_diff.py", "snippet": "class ArkDPAPoliciesDiff(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to show diff on, if not given, shows diff on all policies')\n unified: bool = Field(description='Show all diffs together', default=False)" }, { "identifier": "ArkDPARemovePolicies", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_remove_policies.py", "snippet": "class ArkDPARemovePolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policies to remove from the workspace, if not given, choices will be prompted')" }, { "identifier": "ArkDPAResetPolicies", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_reset_policies.py", "snippet": "class ArkDPAResetPolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to reset on the workspace, if not given, all policies are resetted')\n all: bool = Field(description='Whether to reset all locally edited policies', default=False)" }, { "identifier": "ArkDPAViewPolicies", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/common/ark_dpa_view_policies.py", "snippet": "class ArkDPAViewPolicies(ArkModel):\n names: Optional[List[str]] = Field(description='Policy names to view from the workspace, if not given, choices will be prompted')\n unified: bool = Field(description='Show all requested policies together', default=False)" }, { "identifier": "ArkDPADBGeneratePolicy", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/db/ark_dpa_db_generate_policy.py", "snippet": "class ArkDPADBGeneratePolicy(ArkDPABaseGeneratePolicy):\n providers: Optional[Set[Literal['MySQL', 'MariaDB', 'Postgres', 'MSSQL', 'Oracle']]] = Field(\n description='Providers to generate the policy for'\n )" }, { "identifier": "ArkDPAVMGeneratePolicy", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/vm/ark_dpa_vm_generate_policy.py", "snippet": "class ArkDPAVMGeneratePolicy(ArkDPABaseGeneratePolicy):\n providers: Optional[Set[Literal['AWS', 'Azure', 'OnPrem']]] = Field(description='Providers to generate the policy for')\n protocols: Optional[Set[Literal['ssh', 'rdp']]] = Field(description='Protocols to generate the policy for')" }, { "identifier": "ArkDPACreateCertificate", "path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_certificate.py", "snippet": "class ArkDPACreateCertificate(ArkDPACreateCertificateBase):\n file: FilePath = Field(description='Path to a file with the certificate body')" }, { "identifier": "ArkDPADeleteCertificate", "path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_delete_certificate.py", "snippet": "class ArkDPADeleteCertificate(ArkModel):\n certificate_id: str = Field(description='ID of the certificate to delete', min_length=1)" }, { "identifier": "ArkDPACertificatesFilter", "path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_filter.py", "snippet": "class ArkDPACertificatesFilter(ArkModel):\n domain_name: Optional[str] = Field(default=None, description='Filter by domain name')\n cert_name: Optional[str] = Field(default=None, description='Filter by certificate name')" }, { "identifier": "ArkDPAGetCertificate", "path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_get_certificate.py", "snippet": "class ArkDPAGetCertificate(ArkModel):\n certificate_id: str = Field(description='ID of the certificate', min_length=1)" }, { "identifier": "ArkDPAUpdateCertificate", "path": "ark_sdk_python/models/services/dpa/certificates/ark_dpa_certificates_update_certificate.py", "snippet": "class ArkDPAUpdateCertificate(ArkDPACreateCertificateBase):\n certificate_id: str = Field(description='ID of the certificate to update', min_length=1)\n file: FilePath = Field(description='Path to a file with the certificate body')" }, { "identifier": "ArkDPADBMysqlExecution", "path": "ark_sdk_python/models/services/dpa/db/ark_dpa_db_mysql_execution.py", "snippet": "class ArkDPADBMysqlExecution(ArkDPADBBaseExecution):\n mysql_path: str = Field(description='Path to the psql executable', default='mysql')" }, { "identifier": "ArkDPADBOracleGenerateAssets", "path": "ark_sdk_python/models/services/dpa/db/ark_dpa_db_oracle_generate_assets.py", "snippet": "class ArkDPADBOracleGenerateAssets(ArkDPADBBaseGenerateAssets):\n folder: str = Field(description='Where to output the assets')\n unzip: bool = Field(description='Whether to save zipped or not', default=True)" }, { "identifier": "ArkDPADBPsqlExecution", "path": "ark_sdk_python/models/services/dpa/db/ark_dpa_db_psql_execution.py", "snippet": "class ArkDPADBPsqlExecution(ArkDPADBBaseExecution):\n psql_path: str = Field(description='Path to the psql executable', default='psql')" }, { "identifier": "ArkDPAK8SGenerateKubeConfig", "path": "ark_sdk_python/models/services/dpa/k8s/ark_dpa_k8s_generate_kubeconfig.py", "snippet": "class ArkDPAK8SGenerateKubeConfig(ArkModel):\n folder: str = Field(description='Output folder to download the kube config file', default=None)" }, { "identifier": "ArkDPADeletePolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_delete_policy.py", "snippet": "class ArkDPADeletePolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to delete')\n policy_name: Optional[str] = Field(description='Policy name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPAGetPolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_get_policy.py", "snippet": "class ArkDPAGetPolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to get')\n policy_name: Optional[str] = Field(description='Policy name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPAUpdatePolicyStatus", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_update_policy_status.py", "snippet": "class ArkDPAUpdatePolicyStatus(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to update the status for')\n policy_name: Optional[str] = Field(description='Policy name to update the status for')\n status: ArkDPARuleStatus = Field(description='New status to update')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPADBAddPolicy", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_add_policy.py", "snippet": "class ArkDPADBAddPolicy(ArkDPABaseAddPolicy):\n providers_tags: List[str] = Field(description='Policy tags to use as filters for the assets in the rules', default_factory=list)\n providers_data: Optional[ArkDPADBProvidersData] = Field(\n description='Policy providers data containing database assets of different types'\n )\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(\n description='Authorization rules of the policy describing how and who can access the assets'\n )" }, { "identifier": "ArkDPADBPoliciesFilter", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_policies_filter.py", "snippet": "class ArkDPADBPoliciesFilter(ArkDPABasePoliciesFilter):\n providers: Optional[List[ArkWorkspaceType]] = Field(description='Filter by policies with given database providers')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers')\n def validate_providers(cls, val):\n if val is not None:\n for plat in val:\n if ArkWorkspaceType(plat) not in [\n ArkWorkspaceType.MYSQL,\n ArkWorkspaceType.MARIADB,\n ArkWorkspaceType.POSTGRES,\n ArkWorkspaceType.MSSQL,\n ArkWorkspaceType.ORACLE,\n ]:\n raise ValueError('Invalid Database Type')\n return val" }, { "identifier": "ArkDPADBUpdatePolicy", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_update_policy.py", "snippet": "class ArkDPADBUpdatePolicy(ArkDPABaseUpdatePolicy):\n providers_tags: Optional[List[str]] = Field(description='Policy tags to use as filters for the assets in the rules')\n providers_data: Optional[ArkDPADBProvidersData] = Field(\n description='Policy providers data containing database assets of different types'\n )\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(\n description='Authorization rules of the policy describing how and who can access the assets'\n )" }, { "identifier": "ArkDPAVMAddPolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_add_policy.py", "snippet": "class ArkDPAVMAddPolicy(ArkDPABaseAddPolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(\n description='Workspaces / cloud providers data per type of cloud provider, '\n 'for example for AWS, how to filter ec2 instances to connect to'\n )\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(\n description='Rules describing how and who will be able to connect to the target instances filtered by the cloud providers'\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMPoliciesFilter", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_policies_filter.py", "snippet": "class ArkDPAVMPoliciesFilter(ArkDPABasePoliciesFilter):\n providers: Optional[List[ArkWorkspaceType]] = Field(description='Filter by policies with given cloud providers')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers')\n def validate_providers(cls, val):\n if val is not None:\n for plat in val:\n if ArkWorkspaceType(plat) not in [\n ArkWorkspaceType.AWS,\n ArkWorkspaceType.AZURE,\n ArkWorkspaceType.GCP,\n ArkWorkspaceType.ONPREM,\n ]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMUpdatePolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_update_policy.py", "snippet": "class ArkDPAVMUpdatePolicy(ArkDPABaseUpdatePolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(description='New cloud providers to update')\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(description='New access rules to update')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPADBAddSecret", "path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_add_secret.py", "snippet": "class ArkDPADBAddSecret(ArkModel):\n secret_name: str = Field(description='Name of the secret')\n description: str = Field(description='Description about the secret', default='')\n purpose: str = Field(description='Purpose of the secret', default='')\n secret_type: ArkDPADBSecretType = Field(description='Type of the secret')\n store_type: Optional[ArkDPADBStoreType] = Field(\n description='Store type of the secret of the secret, will be deduced by the secret type if not given'\n )\n tags: List[ArkDPADBTag] = Field(description='Tags of the secret', default_factory=list)\n\n # Username Password Secret Type\n username: Optional[str] = Field(description='Name or id of the user for username_password type')\n password: Optional[SecretStr] = Field(description='Password of the user for username_password type')\n\n # PAM Account Secret Type\n pam_safe: Optional[str] = Field(description='Safe of the account for pam_account type')\n pam_account_name: Optional[str] = Field(description='Account name for pam_account type')\n\n class Config:\n json_encoders = {SecretStr: lambda v: v.get_secret_value() if v else None}" }, { "identifier": "ArkDPADBDeleteSecret", "path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_delete_secret.py", "snippet": "class ArkDPADBDeleteSecret(ArkModel):\n secret_id: Optional[str] = Field(description='ID of the secret to delete')\n secret_name: Optional[str] = Field(description='Name of the secret to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values" }, { "identifier": "ArkDPADBDisableSecret", "path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_disable_secret.py", "snippet": "class ArkDPADBDisableSecret(ArkModel):\n secret_id: Optional[str] = Field(description='ID of the secret to disable')\n secret_name: Optional[str] = Field(description='Name of the secret to disable')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values" }, { "identifier": "ArkDPADBEnableSecret", "path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_enable_secret.py", "snippet": "class ArkDPADBEnableSecret(ArkModel):\n secret_id: Optional[str] = Field(description='ID of the secret to enable')\n secret_name: Optional[str] = Field(description='Name of the secret to enable')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values" }, { "identifier": "ArkDPADBGetSecret", "path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_get_secret.py", "snippet": "class ArkDPADBGetSecret(ArkModel):\n secret_id: Optional[str] = Field(description='ID of the secret to get')\n secret_name: Optional[str] = Field(description='Name of the secret to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values" }, { "identifier": "ArkDPADBSecretsFilter", "path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_secrets_filter.py", "snippet": "class ArkDPADBSecretsFilter(ArkModel):\n secret_name: Optional[str] = Field(description='Filter by secret name')\n secret_type: Optional[ArkDPADBSecretType] = Field(description='Filter by type')\n store_type: Optional[ArkDPADBStoreType] = Field(description='Filter by store type')\n is_active: Optional[bool] = Field(description='Filter by if secret is active')\n tags: Optional[List[ArkDPADBTag]] = Field(description='Filter by tags')" }, { "identifier": "ArkDPADBUpdateSecret", "path": "ark_sdk_python/models/services/dpa/secrets/db/ark_dpa_db_update_secret.py", "snippet": "class ArkDPADBUpdateSecret(ArkModel):\n secret_id: Optional[str] = Field(description='Secret id to update')\n secret_name: Optional[str] = Field(description='Name of the secret to update')\n new_secret_name: Optional[str] = Field(description='New secret name to update to')\n description: Optional[str] = Field(description='Description about the secret to update')\n purpose: Optional[str] = Field(description='Purpose of the secret to update')\n tags: Optional[List[ArkDPADBTag]] = Field(description='Tags of the secret to change to')\n\n # Username Password Secret Type\n username: Optional[str] = Field(description='Name or id of the user for username_password type')\n password: Optional[SecretStr] = Field(description='Password of the user for username_password type')\n\n # PAM Account Secret Type\n pam_safe: Optional[str] = Field(description='Safe of the account for pam_account type')\n pam_account_name: Optional[str] = Field(description='Account name for pam_account type')\n\n class Config:\n json_encoders = {SecretStr: lambda v: v.get_secret_value() if v else None}\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'secret_id' not in values and 'secret_name' not in values:\n raise ValueError('Either secret id or secret name needs to be provided')\n return values" }, { "identifier": "ArkDPASSOGetShortLivedClientCertificate", "path": "ark_sdk_python/models/services/dpa/sso/ark_dpa_sso_get_short_lived_client_certificate.py", "snippet": "class ArkDPASSOGetShortLivedClientCertificate(ArkModel):\n allow_caching: bool = Field(description='Allow short lived token caching', default=False)\n folder: Optional[str] = Field(description='Output folder to write the key / certificate to. Required if format is File')\n output_format: ArkDPASSOShortLiveClientCertificateFormat = Field(\n description='The output format of the key / ' 'certificate. i.e. File, Raw, Base64',\n default=ArkDPASSOShortLiveClientCertificateFormat.FILE,\n )" }, { "identifier": "ArkDPASSOGetShortLivedOracleWallet", "path": "ark_sdk_python/models/services/dpa/sso/ark_dpa_sso_get_short_lived_oracle_wallet.py", "snippet": "class ArkDPASSOGetShortLivedOracleWallet(ArkModel):\n allow_caching: bool = Field(description='Allow short lived token caching', default=False)\n unzip_wallet: bool = Field(description='Whether to save zipped or not', default=True)\n folder: str = Field(description='Output folder to write the wallet to')" }, { "identifier": "ArkDPASSOGetShortLivedPassword", "path": "ark_sdk_python/models/services/dpa/sso/ark_dpa_sso_get_short_lived_password.py", "snippet": "class ArkDPASSOGetShortLivedPassword(ArkModel):\n allow_caching: bool = Field(description='Allow short lived token caching', default=False)" }, { "identifier": "ArkDPASSOGetShortLivedRDPFile", "path": "ark_sdk_python/models/services/dpa/sso/ark_dpa_sso_get_short_lived_rdp_file.py", "snippet": "class ArkDPASSOGetShortLivedRDPFile(ArkModel):\n allow_caching: bool = Field(description='Allow short lived token caching', default=True)\n folder: str = Field(description='Output folder to write the rdp file to')\n target_address: str = Field(description='Address of the Windows target machine')\n target_domain: Optional[str] = Field(description='Domain of the Windows target machine')" }, { "identifier": "ArkDPADBAddDatabase", "path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_add_database.py", "snippet": "class ArkDPADBAddDatabase(ArkCamelizedModel):\n name: str = Field(description='Name of the database, often referenced in policies and other APIs')\n network_name: str = Field(description='Name of the network the database resides in, defaulted to on premises', default='ON-PREMISE')\n platform: ArkWorkspaceType = Field(\n description='Platform of the database, as in, where it resides, defaulted to on premises', default=ArkWorkspaceType.ONPREM\n )\n services: Optional[List[str]] = Field(description='Services related to the database, most commonly used with oracle')\n domain_controller_name: Optional[str] = Field(description='Domain controller name associated to this database')\n domain_controller_netbios: Optional[str] = Field(description='Domain controller netbios associated to this database')\n provider_engine: ArkDPADBDatabaseEngineType = Field(\n description='Provider engine, will be later deduced to the identifer of the provider'\n )\n enable_certificate_validation: bool = Field(description='Whether to enable and enforce certificate validation', default=True)\n certificate: Optional[str] = Field(description='Certificate id used for this database that resides in the certificates service')\n read_write_endpoint: str = Field(description='Read write endpoint of the database')\n read_only_endpoint: Optional[str] = Field(description='Optionally, a read only endpoint of the database')\n port: Optional[int] = Field(description='Port of the database, if not given, the default one will be used')\n secret_id: Optional[str] = Field(description='Secret identifier stored in the secret service related to this database')\n tags: Optional[List[ArkDPADBTag]] = Field(description='Tags for the database')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('platform')\n def validate_workspace_type(cls, val):\n if val and ArkWorkspaceType(val) not in [\n ArkWorkspaceType.AWS,\n ArkWorkspaceType.AZURE,\n ArkWorkspaceType.GCP,\n ArkWorkspaceType.ONPREM,\n ]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPADBDatabasesFilter", "path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_databases_filter.py", "snippet": "class ArkDPADBDatabasesFilter(ArkModel):\n name: Optional[str] = Field(description='Name of the database to filter on')\n provider_family: Optional[ArkDPADBDatabaseFamilyType] = Field(description='List filter by family')\n provider_engine: Optional[ArkDPADBDatabaseEngineType] = Field(description='List filter by engine')\n provider_workspace: Optional[ArkDPADBDatabaseWorkspaceType] = Field(description='List filter by workspace')\n tags: Optional[List[ArkDPADBTag]] = Field(description='List filter by tags')\n db_warnings_filter: Optional[ArkDPADBWarning] = Field(description='Filter by databases who are with warnings / incomplete')" }, { "identifier": "ArkDPADBDeleteDatabase", "path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_delete_database.py", "snippet": "class ArkDPADBDeleteDatabase(ArkCamelizedModel):\n id: Optional[int] = Field(description='Database id to delete')\n name: Optional[str] = Field(description='Database name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'id' not in values and 'name' not in values:\n raise ValueError('Either id or name needs to be provided')\n return values" }, { "identifier": "ArkDPADBGetDatabase", "path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_get_database.py", "snippet": "class ArkDPADBGetDatabase(ArkCamelizedModel):\n id: Optional[int] = Field(description='Database id to get')\n name: Optional[str] = Field(description='Database name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'id' not in values and 'name' not in values:\n raise ValueError('Either id or name needs to be provided')\n return values" }, { "identifier": "ArkDPADBUpdateDatabase", "path": "ark_sdk_python/models/services/dpa/workspaces/db/ark_dpa_db_update_database.py", "snippet": "class ArkDPADBUpdateDatabase(ArkCamelizedModel):\n id: Optional[int] = Field(description='Database id to update')\n name: Optional[str] = Field(description='Database name to update')\n new_name: Optional[str] = Field(description='New name for the database')\n network_name: Optional[str] = Field(description='Name of the network the database resides in', default='ON-PREMISE')\n platform: Optional[ArkWorkspaceType] = Field(description='Platform of the database, as in, where it resides')\n services: Optional[List[str]] = Field(description='Services related to the database, most commonly used with oracle')\n domain_controller_name: Optional[str] = Field(description='Domain controller name associated to this database')\n domain_controller_netbios: Optional[str] = Field(description='Domain controller netbios associated to this database')\n provider_engine: Optional[ArkDPADBDatabaseEngineType] = Field(\n description='Provider engine, will be later deduced to the identifer of the provider'\n )\n enable_certificate_validation: bool = Field(description='Whether to enable and enforce certificate validation', default=True)\n certificate: Optional[str] = Field(description='Certificate id used for this database that resides in the certificates service')\n read_write_endpoint: Optional[str] = Field(description='Read write endpoint of the database')\n read_only_endpoint: Optional[str] = Field(description='Optionally, a read only endpoint of the database')\n port: Optional[int] = Field(description='Port of the database, if not given, the default one will be used')\n secret_id: Optional[str] = Field(description='Secret identifier stored in the secret service related to this database')\n tags: Optional[List[ArkDPADBTag]] = Field(description='Tags for the database')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'id' not in values and 'name' not in values:\n raise ValueError('Either id or name needs to be provided')\n return values\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('platform')\n def validate_workspace_type(cls, val):\n if val and ArkWorkspaceType(val) not in [\n ArkWorkspaceType.AWS,\n ArkWorkspaceType.AZURE,\n ArkWorkspaceType.GCP,\n ArkWorkspaceType.ONPREM,\n ]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" } ]
from typing import Dict, Final, Optional, Type from ark_sdk_python.models import ArkModel from ark_sdk_python.models.actions.ark_service_action_definition import ArkServiceActionDefinition from ark_sdk_python.models.cli_services.dpa.policies_editor.common import ( ArkDPACommitPolicies, ArkDPAEditPolicies, ArkDPAGetPoliciesStatus, ArkDPALoadPolicies, ArkDPAPoliciesDiff, ArkDPARemovePolicies, ArkDPAResetPolicies, ArkDPAViewPolicies, ) from ark_sdk_python.models.cli_services.dpa.policies_editor.db import ArkDPADBGeneratePolicy from ark_sdk_python.models.cli_services.dpa.policies_editor.vm import ArkDPAVMGeneratePolicy from ark_sdk_python.models.services.dpa.certificates import ( ArkDPACertificatesFilter, ArkDPACreateCertificate, ArkDPADeleteCertificate, ArkDPAGetCertificate, ArkDPAUpdateCertificate, ) from ark_sdk_python.models.services.dpa.db import ArkDPADBMysqlExecution, ArkDPADBOracleGenerateAssets, ArkDPADBPsqlExecution from ark_sdk_python.models.services.dpa.k8s.ark_dpa_k8s_generate_kubeconfig import ArkDPAK8SGenerateKubeConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPAUpdatePolicyStatus from ark_sdk_python.models.services.dpa.policies.db import ArkDPADBAddPolicy, ArkDPADBPoliciesFilter, ArkDPADBUpdatePolicy from ark_sdk_python.models.services.dpa.policies.vm import ArkDPAVMAddPolicy, ArkDPAVMPoliciesFilter, ArkDPAVMUpdatePolicy from ark_sdk_python.models.services.dpa.secrets.db import ( ArkDPADBAddSecret, ArkDPADBDeleteSecret, ArkDPADBDisableSecret, ArkDPADBEnableSecret, ArkDPADBGetSecret, ArkDPADBSecretsFilter, ArkDPADBUpdateSecret, ) from ark_sdk_python.models.services.dpa.sso import ( ArkDPASSOGetShortLivedClientCertificate, ArkDPASSOGetShortLivedOracleWallet, ArkDPASSOGetShortLivedPassword, ArkDPASSOGetShortLivedRDPFile, ) from ark_sdk_python.models.services.dpa.workspaces.db import ( ArkDPADBAddDatabase, ArkDPADBDatabasesFilter, ArkDPADBDeleteDatabase, ArkDPADBGetDatabase, ArkDPADBUpdateDatabase, )
7,196
WORKSPACES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = { 'add-database': ArkDPADBAddDatabase, 'delete-database': ArkDPADBDeleteDatabase, 'update-database': ArkDPADBUpdateDatabase, 'list-databases': None, 'list-databases-by': ArkDPADBDatabasesFilter, 'database': ArkDPADBGetDatabase, 'databases-stats': None, } WORKSPACES_DB_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition( action_name='db', schemas=WORKSPACES_DB_ACTION_TO_SCHEMA_MAP ) WORKSPACES_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition( action_name='workspaces', subactions=[WORKSPACES_DB_ACTION] ) POLICIES_VM_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = { 'add-policy': ArkDPAVMAddPolicy,
WORKSPACES_DB_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = { 'add-database': ArkDPADBAddDatabase, 'delete-database': ArkDPADBDeleteDatabase, 'update-database': ArkDPADBUpdateDatabase, 'list-databases': None, 'list-databases-by': ArkDPADBDatabasesFilter, 'database': ArkDPADBGetDatabase, 'databases-stats': None, } WORKSPACES_DB_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition( action_name='db', schemas=WORKSPACES_DB_ACTION_TO_SCHEMA_MAP ) WORKSPACES_ACTION: Final[ArkServiceActionDefinition] = ArkServiceActionDefinition( action_name='workspaces', subactions=[WORKSPACES_DB_ACTION] ) POLICIES_VM_ACTION_TO_SCHEMA_MAP: Final[Dict[(str, Optional[Type[ArkModel]])]] = { 'add-policy': ArkDPAVMAddPolicy,
'delete-policy': ArkDPADeletePolicy,
21
2023-11-13 09:24:31+00:00
8k
CorentinJ/transcription-diff
transcription_diff/text_normalization.py
[ { "identifier": "normalize_numbers", "path": "transcription_diff/number_normalization.py", "snippet": "def normalize_numbers(text: str):\n words = re.split(\"(\\s+)\", text)\n mapping = list(zip(words, [SliceMap.identity(len(word)) for word in words]))\n\n text, mapping = _remove_commas(text, mapping)\n text, mapping = _expand_year(text, mapping)\n text, mapping = _expand_abbreviated_currency_unit(text, mapping)\n text, mapping = _expand_other_currency(text, mapping, _pounds_re, \"pound\", \"pounds\")\n text, mapping = _expand_other_currency(text, mapping, _yen_re, \"yen\", \"yen\")\n text, mapping = _expand_other_currency(text, mapping, _euro_re, \"euro\", \"euros\")\n text, mapping = _expand_other_unit(text, mapping, _ml_re, \"milliliter\", \"milliliters\")\n text, mapping = _expand_other_unit(text, mapping, _cl_re, \"centiliter\", \"centiliters\")\n text, mapping = _expand_other_unit(text, mapping, _g_re, \"gram\", \"grams\")\n text, mapping = _expand_other_unit(text, mapping, _kg_re, \"kilogram\", \"kilograms\")\n text, mapping = _expand_other_unit(text, mapping, _mm_re, \"millimeter\", \"millimeters\")\n text, mapping = _expand_other_unit(text, mapping, _cm_re, \"centimeter\", \"centimeters\")\n text, mapping = _expand_other_unit(text, mapping, _km_re, \"kilometer\", \"kilometers\")\n text, mapping = _expand_other_unit(text, mapping, _in_re, \"inch\", \"inches\")\n text, mapping = _expand_other_unit(text, mapping, _ft_re, \"foot\", \"feet\")\n text, mapping = _expand_other_unit(text, mapping, _l_re, \"liter\", \"liters\")\n text, mapping = _expand_other_unit(text, mapping, _m_re, \"meter\", \"meters\")\n text, mapping = _expand_other_unit(text, mapping, _yd_re, \"yard\", \"yards\")\n text, mapping = _expand_other_unit(text, mapping, _s_re, \"second\", \"seconds\")\n text, mapping = _expand_dollars(text, mapping)\n text, mapping = _convert_hash(text, mapping)\n text, mapping = _expand_decimal_point(text, mapping)\n text, mapping = _expand_time(text, mapping)\n text, mapping = _expand_ordinal(text, mapping)\n text, mapping = _expand_number(text, mapping)\n\n raw2clean_map = SliceMap.empty()\n for word, word_map in mapping:\n raw2clean_map += word_map\n return text, raw2clean_map" }, { "identifier": "SliceMap", "path": "transcription_diff/slice_map.py", "snippet": "class SliceMap:\n def __init__(self, smap: Union[List[slice], np.ndarray], target_len: int):\n \"\"\"\n A slice map smap is a list of slices that maps from X to Y with\n - X[i] mapping to Y[smap[i]]\n - X[i:j] mapping to Y[smap[i].start:smap[j - 1].stop]\n\n Informally, an item in X can correspond to 0 or more consecutive items in Y. A slice of one or more items in X\n will map to the slice spanning from the leftmost corresponding item in Y to the rightmost corresponding item.\n\n :param smap: the list of slices. The following must hold:\n - len(smap) must be equal to the size of the X\n - slices cannot have negative indices and cannot index beyond the size of Y\n - the slice starts cannot decrease, the same goes for the slice stops. You can however have consecutive\n overlapping slices, e.g. [slice(0, 2), slice(0, 2)].\n Note that slices can be empty (stop <= start).\n The slices can also be passed as an (X, 2) shaped integer array. The second dimension holds slice starts and\n ends, respectively.\n :param target_len: the size of Y\n \"\"\"\n self.source_len = len(smap)\n self.target_len = target_len\n\n # Convert slices to an array\n if not isinstance(smap, np.ndarray):\n self._map = np.empty((self.source_len, 2), dtype=np.int64)\n for i, sli in enumerate(smap):\n self._map[i] = [sli.start, sli.stop]\n else:\n self._map = smap.astype(np.int64, copy=True)\n\n assert np.all((0 <= self._map) & (self._map <= target_len)), \"Slice starts/stops out of bounds\"\n assert np.all(self._map[1:] >= self._map[:-1]), \"Slice starts/stops must be increasing\"\n\n def __getitem__(self, item: Union[int, slice]) -> slice:\n \"\"\"\n Indexes the position in X with either an integer or a slice (step != 1 is not supported). Returns the\n corresponding slice in Y.\n \"\"\"\n if np.issubdtype(type(item), np.integer):\n item = slice(item, item + 1)\n else:\n assert item.step in [None, 1], \"Only steps of 1 are supported\"\n\n view = self._map[item]\n if len(view):\n # We return a slice that spans from the lowest to the highest target indices\n return slice(view[0][0], view[-1][1])\n else:\n # We return an empty slice, it is computed so as to stay consistent with our axioms.\n pos = np.clip(0, item.start, self.source_len)\n start = self._map[pos][0] if pos < self.source_len else self.target_len\n stop = self._map[pos - 1][1] if pos > 0 else 0\n stop = max(start, stop)\n return slice(start, stop)\n\n def __len__(self):\n \"\"\"\n Returns the size of X\n \"\"\"\n return self.source_len\n\n def __bool__(self):\n \"\"\"\n To ensure we still get a True value when the mapping is empty\n \"\"\"\n return True\n\n def __iter__(self):\n \"\"\"\n Iterates over slices, returning pairs (start, stop)\n \"\"\"\n yield from ((int(start), int(end)) for start, end in self._map)\n\n def project(self, data: Union[np.ndarray, List], default=None) -> Union[np.ndarray, List]:\n \"\"\"\n Projects data in the source space to the target space.\n A default value will be returned in place of gaps in the target space.\n In case of overlaps, the rightmost item will take priority.\n\n :param data: a list of arbitrary objects or a numpy array. It must be that len(data) == source_len\n :param default: the value to give to entries that nothing maps to. This value must be specified in the case\n of numpy arrays\n :return: the projected data in Y as a list or numpy array\n \"\"\"\n assert len(data) == self.source_len, \"The data to project must have the same length as the mapping.\"\n is_numpy = isinstance(data, np.ndarray)\n assert not (is_numpy and default is None), \"The default value must be specified for numpy arrays.\"\n\n if is_numpy:\n projected = np.full_like(data, default, shape=self.target_len)\n else:\n projected = [default] * self.target_len\n\n for source_idx, (target_start, target_end) in enumerate(self._map):\n if is_numpy:\n projected[target_start:target_end] = data[source_idx]\n else:\n projected[target_start:target_end] = [data[source_idx]] * (target_end - target_start)\n\n return projected\n\n def inverse(self) -> 'SliceMap':\n \"\"\"\n With self mapping from X to Y, returns the inverse Y to X mapping.\n This operation is bijective, including in the presence of gaps or overlaps.\n \"\"\"\n # Find the Points Of Interest: the indices where the mapping's starts or stops increase\n bounded_map = np.concatenate((self._map, [[self.target_len, self.target_len]]))\n changes = np.diff(bounded_map, axis=0, prepend=0)\n (start_pois,), (stop_pois,) = changes[:, 1].nonzero(), changes[:, 0].nonzero()\n\n n_repeats = np.diff(bounded_map[start_pois, 1], prepend=0)\n inv_map_starts = np.repeat(start_pois, n_repeats)\n\n n_repeats = np.diff(bounded_map[stop_pois, 0], prepend=0)\n inv_map_stops = np.repeat(stop_pois, n_repeats)\n\n inv_map = np.stack([inv_map_starts, inv_map_stops], axis=1)\n\n return SliceMap(inv_map, self.source_len)\n\n def compose(self, other: 'SliceMap') -> 'SliceMap':\n \"\"\"\n With self mapping from X to Y and other mapping from Y to Z, returns the composed X to Z mapping.\n \"\"\"\n assert self.target_len == other.source_len, \\\n f\"Cannot compose {self.source_len}x{self.target_len} map with {other.source_len}x{other.target_len} map.\"\n\n smap = np.empty((self.source_len, 2), dtype=np.int64)\n for i in range(len(self)):\n sli = other[self[i]]\n smap[i] = [sli.start, sli.stop]\n\n return SliceMap(smap, other.target_len)\n\n def __mul__(self, other):\n \"\"\"\n Multiplication is shorthand for compose\n \"\"\"\n return self.compose(other)\n\n def concat(self, other: 'SliceMap') -> 'SliceMap':\n \"\"\"\n With self mapping from Xi to Yi and other mapping from Xj to Yj, returns the concatenated mapping\n from cat(Xi, Xj) to cat(Yi, Tj).\n \"\"\"\n new_map = np.concatenate((self._map, other._map + self.target_len))\n return SliceMap(new_map, self.target_len + other.target_len)\n\n def __add__(self, other):\n \"\"\"\n Addition is shorthand for concatenation\n \"\"\"\n return self.concat(other)\n\n def __eq__(self, other: 'SliceMap'):\n if other is None:\n return False\n return \\\n self.source_len == other.source_len and \\\n self.target_len == other.target_len and \\\n np.array_equal(self._map, other._map)\n\n @staticmethod\n def from_1to1_map(oto_map: Iterable[int], target_len: int):\n \"\"\"\n Creates a slicemap where each index i corresponds to the slice oto_map[i]:oto_map[i] + 1\n \"\"\"\n return SliceMap([slice(p, p + 1) for p in oto_map], target_len)\n\n @staticmethod\n def from_ranges(ranges: Iterable[int]):\n \"\"\"\n This is the non-cumulative version of a monotonic mapping:\n - SliceMap.from_ranges(r) is equivalent to SliceMap.from_monotonic_map(np.cumsum(r))\n \"\"\"\n smap = []\n target_pos = 0\n for r in ranges:\n smap.append(slice(target_pos, target_pos + r))\n target_pos += r\n return SliceMap(smap, target_pos)\n\n @staticmethod\n def lerp(source_len: int, target_len: int):\n \"\"\"\n Creates a map that linearly interpolates from X to Y, e.g. for source_len=6 and target_len=12, the slice\n 2:3 in X maps to 4:6 in Y.\n \"\"\"\n low = min(source_len, target_len)\n high = max(source_len, target_len)\n idx = np.linspace(0, low, high, endpoint=False, dtype=np.int64)\n smap = np.stack([idx, np.minimum(idx + 1, low)], axis=1)\n smap = SliceMap(smap, low)\n\n return smap if target_len == low else smap.inverse()\n\n @staticmethod\n def full(source_len: int, target_len: int):\n \"\"\"\n Creates a map where each element in the source space maps to the entirety of the target space.\n \"\"\"\n smap = np.zeros((source_len, 2), dtype=np.int64)\n smap[:, 1] = target_len\n return SliceMap(smap, target_len)\n\n @staticmethod\n def empty() -> 'SliceMap':\n return SliceMap([], 0)\n\n @staticmethod\n def identity(length: int) -> 'SliceMap':\n return SliceMap.slice(0, length, length)\n\n @overload\n def slice(start: int, end: int, target_len: int) -> 'SliceMap': ...\n @overload\n def slice(sli: slice, target_len: int) -> 'SliceMap': ...\n @staticmethod\n def slice(*args) -> 'SliceMap':\n \"\"\"\n Convenience method. Creates a map where all elements map to a slice of a target space.\n - <start> is where the slice begins in the target space\n - <end> is where the slice ends in the target space\n - <target_len> is the size of the target space\n This method is the inverse of eye()\n \"\"\"\n if len(args) == 2:\n start, end, target_len = args[0].start, args[0].stop, args[1]\n else:\n start, end, target_len = args\n assert 0 <= start <= end <= target_len, f\"Invalid slice: {start}:{end} in {target_len}\"\n return SliceMap(\n np.stack([np.arange(start, end), np.arange(start, end) + 1], axis=1),\n target_len\n )\n\n @overload\n def eye(start: int, end: int, length: int) -> 'SliceMap': ...\n @overload\n def eye(sli: slice, length: int) -> 'SliceMap': ...\n @staticmethod\n def eye(*args) -> 'SliceMap':\n \"\"\"\n Convenience method. Creates a map where\n - the <start> first element map to nothing\n - the elements between <start> and <end> map to the identity\n - the elements after <end> (up to <length>) map to nothing\n This method is the inverse of slice()\n \"\"\"\n if len(args) == 2:\n start, end, length = args[0].start, args[0].stop, args[1]\n else:\n start, end, length = args\n return SliceMap.full(start, 0) + SliceMap.identity(end - start) + SliceMap.full(length - end, 0)\n\n @staticmethod\n def compose_by_name(mapping_name: str, **mappings: 'SliceMap'):\n \"\"\"\n Composes mappings together based on their names. Each SliceMap passed as <mappings> argument must have\n the name structure <source2target>.\n\n For example, calling the function as:\n SliceMap.compose_by_name('a2c', a2b=a2b, b2c=b2c)\n will return the composition a2c = a2b * b2c.\n\n An AssertionError will be raised if <source_name> or <target_name> are not found in the names of the\n passed mappings.\n\n Mappings that are not used in the composition may be passed. They will be ignored.\n \"\"\"\n assert all(k.count(\"2\") == 1 for k in list(mappings) + [mapping_name]), \\\n f\"All mappings must conform to the name convention <source2target>, got {list(mappings)}\"\n source_name, target_name = mapping_name.split(\"2\")\n source_names, target_names = zip(*[map_name.split(\"2\") for map_name in mappings])\n assert source_name in source_names, f\"Source name {source_name} not found in {source_names}\"\n assert target_name in target_names, f\"Target name {target_name} not found in {target_names}\"\n\n dim_name = source_name\n composed_map = None\n seen_idx = set()\n while dim_name != target_name:\n map_idx = source_names.index(dim_name)\n assert map_idx not in seen_idx, f\"Cycle detected: {list(mappings)}\"\n seen_idx.add(map_idx)\n map_to_compose = mappings[f\"{dim_name}2{target_names[map_idx]}\"]\n composed_map = composed_map * map_to_compose if composed_map else map_to_compose\n dim_name = target_names[map_idx]\n\n return composed_map\n\n def __repr__(self):\n return f\"<{self.source_len}x{self.target_len} map: {[tuple(sli) for sli in self._map]}>\"" } ]
import inspect import logging import re import unicodedata from typing import Tuple, Callable, List from langcodes import Language from transcription_diff.number_normalization import normalize_numbers from transcription_diff.slice_map import SliceMap
4,620
logger = logging.getLogger(__name__) # Regular expressions matching whitespace. When using with re.split(), the second one will keep whitespaces in the # output because all captured groups are kept. _whitespace_excl_re = re.compile(r'\s+') _whitespace_incl_re = re.compile(r'(\s+)') # List of (regular expression, replacement) pairs for abbreviations: _abbreviations = [ (re.compile('\\b%s\\.' % abbrev, re.IGNORECASE), expanded) for abbrev, expanded in [ ('mrs', 'misess'), ('mr', 'mister'), ('dr', 'doctor'), ('st', 'saint'), ('co', 'company'), ('jr', 'junior'), ('maj', 'major'), ('gen', 'general'), ('drs', 'doctors'), ('rev', 'reverend'), ('lt', 'lieutenant'), ('hon', 'honorable'), ('sgt', 'sergeant'), ('capt', 'captain'), ('esq', 'esquire'), ('ltd', 'limited'), ('col', 'colonel'), ('ft', 'feet'), ('abbrev', 'abbreviation'), ('ave', 'avenue'), ('abstr', 'abstract'), ('addr', 'address'), ('jan', 'january'), ('feb', 'february'), ('mar', 'march'), ('apr', 'april'), ('jul', 'july'), ('aug', 'august'), ('sep', 'september'), ('sept', 'september'), ('oct', 'october'), ('nov', 'november'), ('dec', 'december'), ('mon', 'monday'), ('tue', 'tuesday'), ('wed', 'wednesday'), ('thur', 'thursday'), ('fri', 'friday'), ('sec', 'second'), ('min', 'minute'), ('mo', 'month'), ('yr', 'year'), ('cal', 'calorie'), ('dept', 'department'), ('gal', 'gallon'), ('kg', 'kilogram'), ('km', 'kilometer'), ('mt', 'mount'), ('oz', 'ounce'), ('vol', 'volume'), ('vs', 'versus'), ('yd', 'yard'), ('e\\.g', 'eg'), ('i\\.e', 'ie'), ('etc', 'etc'), ] ] def expand_abbreviations(text: str):
logger = logging.getLogger(__name__) # Regular expressions matching whitespace. When using with re.split(), the second one will keep whitespaces in the # output because all captured groups are kept. _whitespace_excl_re = re.compile(r'\s+') _whitespace_incl_re = re.compile(r'(\s+)') # List of (regular expression, replacement) pairs for abbreviations: _abbreviations = [ (re.compile('\\b%s\\.' % abbrev, re.IGNORECASE), expanded) for abbrev, expanded in [ ('mrs', 'misess'), ('mr', 'mister'), ('dr', 'doctor'), ('st', 'saint'), ('co', 'company'), ('jr', 'junior'), ('maj', 'major'), ('gen', 'general'), ('drs', 'doctors'), ('rev', 'reverend'), ('lt', 'lieutenant'), ('hon', 'honorable'), ('sgt', 'sergeant'), ('capt', 'captain'), ('esq', 'esquire'), ('ltd', 'limited'), ('col', 'colonel'), ('ft', 'feet'), ('abbrev', 'abbreviation'), ('ave', 'avenue'), ('abstr', 'abstract'), ('addr', 'address'), ('jan', 'january'), ('feb', 'february'), ('mar', 'march'), ('apr', 'april'), ('jul', 'july'), ('aug', 'august'), ('sep', 'september'), ('sept', 'september'), ('oct', 'october'), ('nov', 'november'), ('dec', 'december'), ('mon', 'monday'), ('tue', 'tuesday'), ('wed', 'wednesday'), ('thur', 'thursday'), ('fri', 'friday'), ('sec', 'second'), ('min', 'minute'), ('mo', 'month'), ('yr', 'year'), ('cal', 'calorie'), ('dept', 'department'), ('gal', 'gallon'), ('kg', 'kilogram'), ('km', 'kilometer'), ('mt', 'mount'), ('oz', 'ounce'), ('vol', 'volume'), ('vs', 'versus'), ('yd', 'yard'), ('e\\.g', 'eg'), ('i\\.e', 'ie'), ('etc', 'etc'), ] ] def expand_abbreviations(text: str):
orig2new = SliceMap.identity(len(text))
1
2023-11-11 20:51:54+00:00
8k
mohenghui/detectAuto_v8
ultralytics/nn/modules/head.py
[ { "identifier": "TORCH_1_10", "path": "ultralytics/utils/tal.py", "snippet": "TORCH_1_10 = check_version(torch.__version__, '1.10.0')" }, { "identifier": "dist2bbox", "path": "ultralytics/utils/tal.py", "snippet": "def dist2bbox(distance, anchor_points, xywh=True, dim=-1):\n \"\"\"Transform distance(ltrb) to box(xywh or xyxy).\"\"\"\n lt, rb = distance.chunk(2, dim)\n x1y1 = anchor_points - lt\n x2y2 = anchor_points + rb\n if xywh:\n c_xy = (x1y1 + x2y2) / 2\n wh = x2y2 - x1y1\n return torch.cat((c_xy, wh), dim) # xywh bbox\n return torch.cat((x1y1, x2y2), dim) # xyxy bbox" }, { "identifier": "make_anchors", "path": "ultralytics/utils/tal.py", "snippet": "def make_anchors(feats, strides, grid_cell_offset=0.5):\n \"\"\"Generate anchors from features.\"\"\"\n anchor_points, stride_tensor = [], []\n assert feats is not None\n dtype, device = feats[0].dtype, feats[0].device\n for i, stride in enumerate(strides):\n _, _, h, w = feats[i].shape\n sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x\n sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y\n sy, sx = torch.meshgrid(sy, sx, indexing='ij') if TORCH_1_10 else torch.meshgrid(sy, sx)\n anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2))\n stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device))\n return torch.cat(anchor_points), torch.cat(stride_tensor)" }, { "identifier": "DFL", "path": "ultralytics/nn/modules/block.py", "snippet": "class DFL(nn.Module):\n \"\"\"\n Integral module of Distribution Focal Loss (DFL).\n\n Proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391\n \"\"\"\n\n def __init__(self, c1=16):\n \"\"\"Initialize a convolutional layer with a given number of input channels.\"\"\"\n super().__init__()\n self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False)\n x = torch.arange(c1, dtype=torch.float)\n self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1))\n self.c1 = c1\n\n def forward(self, x):\n \"\"\"Applies a transformer layer on input tensor 'x' and returns a tensor.\"\"\"\n b, c, a = x.shape # batch, channels, anchors\n return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a)\n # return self.conv(x.view(b, self.c1, 4, a).softmax(1)).view(b, 4, a)" }, { "identifier": "Proto", "path": "ultralytics/nn/modules/block.py", "snippet": "class Proto(nn.Module):\n \"\"\"YOLOv8 mask Proto module for segmentation models.\"\"\"\n\n def __init__(self, c1, c_=256, c2=32):\n \"\"\"\n Initializes the YOLOv8 mask Proto module with specified number of protos and masks.\n\n Input arguments are ch_in, number of protos, number of masks.\n \"\"\"\n super().__init__()\n self.cv1 = Conv(c1, c_, k=3)\n self.upsample = nn.ConvTranspose2d(c_, c_, 2, 2, 0, bias=True) # nn.Upsample(scale_factor=2, mode='nearest')\n self.cv2 = Conv(c_, c_, k=3)\n self.cv3 = Conv(c_, c2)\n\n def forward(self, x):\n \"\"\"Performs a forward pass through layers using an upsampled input image.\"\"\"\n return self.cv3(self.cv2(self.upsample(self.cv1(x))))" }, { "identifier": "Conv", "path": "ultralytics/nn/modules/conv.py", "snippet": "class Conv(nn.Module):\n \"\"\"Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation).\"\"\"\n default_act = nn.SiLU() # default activation\n\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):\n \"\"\"Initialize Conv layer with given arguments including activation.\"\"\"\n super().__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()\n\n def forward(self, x):\n \"\"\"Apply convolution, batch normalization and activation to input tensor.\"\"\"\n return self.act(self.bn(self.conv(x)))\n\n def forward_fuse(self, x):\n \"\"\"Perform transposed convolution of 2D data.\"\"\"\n return self.act(self.conv(x))" }, { "identifier": "MLP", "path": "ultralytics/nn/modules/transformer.py", "snippet": "class MLP(nn.Module):\n \"\"\"Implements a simple multi-layer perceptron (also called FFN).\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n \"\"\"Initialize the MLP with specified input, hidden, output dimensions and number of layers.\"\"\"\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n \"\"\"Forward pass for the entire MLP.\"\"\"\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "DeformableTransformerDecoder", "path": "ultralytics/nn/modules/transformer.py", "snippet": "class DeformableTransformerDecoder(nn.Module):\n \"\"\"\n Implementation of Deformable Transformer Decoder based on PaddleDetection.\n\n https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py\n \"\"\"\n\n def __init__(self, hidden_dim, decoder_layer, num_layers, eval_idx=-1):\n \"\"\"Initialize the DeformableTransformerDecoder with the given parameters.\"\"\"\n super().__init__()\n self.layers = _get_clones(decoder_layer, num_layers)\n self.num_layers = num_layers\n self.hidden_dim = hidden_dim\n self.eval_idx = eval_idx if eval_idx >= 0 else num_layers + eval_idx\n\n def forward(\n self,\n embed, # decoder embeddings\n refer_bbox, # anchor\n feats, # image features\n shapes, # feature shapes\n bbox_head,\n score_head,\n pos_mlp,\n attn_mask=None,\n padding_mask=None):\n \"\"\"Perform the forward pass through the entire decoder.\"\"\"\n output = embed\n dec_bboxes = []\n dec_cls = []\n last_refined_bbox = None\n refer_bbox = refer_bbox.sigmoid()\n for i, layer in enumerate(self.layers):\n output = layer(output, refer_bbox, feats, shapes, padding_mask, attn_mask, pos_mlp(refer_bbox))\n\n bbox = bbox_head[i](output)\n refined_bbox = torch.sigmoid(bbox + inverse_sigmoid(refer_bbox))\n\n if self.training:\n dec_cls.append(score_head[i](output))\n if i == 0:\n dec_bboxes.append(refined_bbox)\n else:\n dec_bboxes.append(torch.sigmoid(bbox + inverse_sigmoid(last_refined_bbox)))\n elif i == self.eval_idx:\n dec_cls.append(score_head[i](output))\n dec_bboxes.append(refined_bbox)\n break\n\n last_refined_bbox = refined_bbox\n refer_bbox = refined_bbox.detach() if self.training else refined_bbox\n\n return torch.stack(dec_bboxes), torch.stack(dec_cls)" }, { "identifier": "DeformableTransformerDecoderLayer", "path": "ultralytics/nn/modules/transformer.py", "snippet": "class DeformableTransformerDecoderLayer(nn.Module):\n \"\"\"\n Deformable Transformer Decoder Layer inspired by PaddleDetection and Deformable-DETR implementations.\n\n https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py\n https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py\n \"\"\"\n\n def __init__(self, d_model=256, n_heads=8, d_ffn=1024, dropout=0., act=nn.ReLU(), n_levels=4, n_points=4):\n \"\"\"Initialize the DeformableTransformerDecoderLayer with the given parameters.\"\"\"\n super().__init__()\n\n # Self attention\n self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.norm1 = nn.LayerNorm(d_model)\n\n # Cross attention\n self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n self.dropout2 = nn.Dropout(dropout)\n self.norm2 = nn.LayerNorm(d_model)\n\n # FFN\n self.linear1 = nn.Linear(d_model, d_ffn)\n self.act = act\n self.dropout3 = nn.Dropout(dropout)\n self.linear2 = nn.Linear(d_ffn, d_model)\n self.dropout4 = nn.Dropout(dropout)\n self.norm3 = nn.LayerNorm(d_model)\n\n @staticmethod\n def with_pos_embed(tensor, pos):\n \"\"\"Add positional embeddings to the input tensor, if provided.\"\"\"\n return tensor if pos is None else tensor + pos\n\n def forward_ffn(self, tgt):\n \"\"\"Perform forward pass through the Feed-Forward Network part of the layer.\"\"\"\n tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt))))\n tgt = tgt + self.dropout4(tgt2)\n return self.norm3(tgt)\n\n def forward(self, embed, refer_bbox, feats, shapes, padding_mask=None, attn_mask=None, query_pos=None):\n \"\"\"Perform the forward pass through the entire decoder layer.\"\"\"\n\n # Self attention\n q = k = self.with_pos_embed(embed, query_pos)\n tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1),\n attn_mask=attn_mask)[0].transpose(0, 1)\n embed = embed + self.dropout1(tgt)\n embed = self.norm1(embed)\n\n # Cross attention\n tgt = self.cross_attn(self.with_pos_embed(embed, query_pos), refer_bbox.unsqueeze(2), feats, shapes,\n padding_mask)\n embed = embed + self.dropout2(tgt)\n embed = self.norm2(embed)\n\n # FFN\n return self.forward_ffn(embed)" }, { "identifier": "bias_init_with_prob", "path": "ultralytics/nn/modules/utils.py", "snippet": "def bias_init_with_prob(prior_prob=0.01):\n \"\"\"Initialize conv/fc bias value according to a given probability value.\"\"\"\n return float(-np.log((1 - prior_prob) / prior_prob)) # return bias_init" }, { "identifier": "linear_init_", "path": "ultralytics/nn/modules/utils.py", "snippet": "def linear_init_(module):\n \"\"\"Initialize the weights and biases of a linear module.\"\"\"\n bound = 1 / math.sqrt(module.weight.shape[0])\n uniform_(module.weight, -bound, bound)\n if hasattr(module, 'bias') and module.bias is not None:\n uniform_(module.bias, -bound, bound)" } ]
import math import torch import torch.nn as nn from torch.nn.init import constant_, xavier_uniform_ from ultralytics.utils.tal import TORCH_1_10, dist2bbox, make_anchors from .block import DFL, Proto from .conv import Conv from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer from .utils import bias_init_with_prob, linear_init_ from ultralytics.models.utils.ops import get_cdn_group
4,079
# Ultralytics YOLO 🚀, AGPL-3.0 license """Model head modules.""" __all__ = 'Detect', 'Segment', 'Pose', 'Classify', 'RTDETRDecoder' class Detect(nn.Module): """YOLOv8 Detect head for detection models.""" dynamic = False # force grid reconstruction export = False # export mode shape = None anchors = torch.empty(0) # init strides = torch.empty(0) # init def __init__(self, nc=80, ch=()): """Initializes the YOLOv8 detection layer with specified number of classes and channels.""" super().__init__() self.nc = nc # number of classes self.nl = len(ch) # number of detection layers self.reg_max = 16 # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x) self.no = nc + self.reg_max * 4 # number of outputs per anchor self.stride = torch.zeros(self.nl) # strides computed during build c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], min(self.nc, 100)) # channels self.cv2 = nn.ModuleList( nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch) self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch) self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity() def forward(self, x): """Concatenates and returns predicted bounding boxes and class probabilities.""" shape = x[0].shape # BCHW for i in range(self.nl): x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1) if self.training: return x elif self.dynamic or self.shape != shape: self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5)) self.shape = shape x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2) if self.export and self.format in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs'): # avoid TF FlexSplitV ops box = x_cat[:, :self.reg_max * 4] cls = x_cat[:, self.reg_max * 4:] else: box, cls = x_cat.split((self.reg_max * 4, self.nc), 1) dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides if self.export and self.format in ('tflite', 'edgetpu'): # Normalize xywh with image size to mitigate quantization error of TFLite integer models as done in YOLOv5: # https://github.com/ultralytics/yolov5/blob/0c8de3fca4a702f8ff5c435e67f378d1fce70243/models/tf.py#L307-L309 # See this PR for details: https://github.com/ultralytics/ultralytics/pull/1695 img_h = shape[2] * self.stride[0] img_w = shape[3] * self.stride[0] img_size = torch.tensor([img_w, img_h, img_w, img_h], device=dbox.device).reshape(1, 4, 1) dbox /= img_size y = torch.cat((dbox, cls.sigmoid()), 1) return y if self.export else (y, x) def bias_init(self): """Initialize Detect() biases, WARNING: requires stride availability.""" m = self # self.model[-1] # Detect() module # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency for a, b, s in zip(m.cv2, m.cv3, m.stride): # from a[-1].bias.data[:] = 1.0 # box b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img) class Segment(Detect): """YOLOv8 Segment head for segmentation models.""" def __init__(self, nc=80, nm=32, npr=256, ch=()): """Initialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers.""" super().__init__(nc, ch) self.nm = nm # number of masks self.npr = npr # number of protos
# Ultralytics YOLO 🚀, AGPL-3.0 license """Model head modules.""" __all__ = 'Detect', 'Segment', 'Pose', 'Classify', 'RTDETRDecoder' class Detect(nn.Module): """YOLOv8 Detect head for detection models.""" dynamic = False # force grid reconstruction export = False # export mode shape = None anchors = torch.empty(0) # init strides = torch.empty(0) # init def __init__(self, nc=80, ch=()): """Initializes the YOLOv8 detection layer with specified number of classes and channels.""" super().__init__() self.nc = nc # number of classes self.nl = len(ch) # number of detection layers self.reg_max = 16 # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x) self.no = nc + self.reg_max * 4 # number of outputs per anchor self.stride = torch.zeros(self.nl) # strides computed during build c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], min(self.nc, 100)) # channels self.cv2 = nn.ModuleList( nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch) self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch) self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity() def forward(self, x): """Concatenates and returns predicted bounding boxes and class probabilities.""" shape = x[0].shape # BCHW for i in range(self.nl): x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1) if self.training: return x elif self.dynamic or self.shape != shape: self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5)) self.shape = shape x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2) if self.export and self.format in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs'): # avoid TF FlexSplitV ops box = x_cat[:, :self.reg_max * 4] cls = x_cat[:, self.reg_max * 4:] else: box, cls = x_cat.split((self.reg_max * 4, self.nc), 1) dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides if self.export and self.format in ('tflite', 'edgetpu'): # Normalize xywh with image size to mitigate quantization error of TFLite integer models as done in YOLOv5: # https://github.com/ultralytics/yolov5/blob/0c8de3fca4a702f8ff5c435e67f378d1fce70243/models/tf.py#L307-L309 # See this PR for details: https://github.com/ultralytics/ultralytics/pull/1695 img_h = shape[2] * self.stride[0] img_w = shape[3] * self.stride[0] img_size = torch.tensor([img_w, img_h, img_w, img_h], device=dbox.device).reshape(1, 4, 1) dbox /= img_size y = torch.cat((dbox, cls.sigmoid()), 1) return y if self.export else (y, x) def bias_init(self): """Initialize Detect() biases, WARNING: requires stride availability.""" m = self # self.model[-1] # Detect() module # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency for a, b, s in zip(m.cv2, m.cv3, m.stride): # from a[-1].bias.data[:] = 1.0 # box b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img) class Segment(Detect): """YOLOv8 Segment head for segmentation models.""" def __init__(self, nc=80, nm=32, npr=256, ch=()): """Initialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers.""" super().__init__(nc, ch) self.nm = nm # number of masks self.npr = npr # number of protos
self.proto = Proto(ch[0], self.npr, self.nm) # protos
4
2023-11-16 12:49:59+00:00
8k
i-super/Saleor
saleor/plugins/openid_connect/tests/test_utils.py
[ { "identifier": "Group", "path": "saleor/account/models.py", "snippet": "class Group(models.Model):\n \"\"\"The system provides a way to group users.\n\n Groups are a generic way of categorizing users to apply permissions, or\n some other label, to those users. A user can belong to any number of\n groups.\n\n A user in a group automatically has all the permissions granted to that\n group. For example, if the group 'Site editors' has the permission\n can_edit_home_page, any user in that group will have that permission.\n\n Beyond permissions, groups are a convenient way to categorize users to\n apply some label, or extended functionality, to them. For example, you\n could create a group 'Special users', and you could write code that would\n do special things to those users -- such as giving them access to a\n members-only portion of your site, or sending them members-only email\n messages.\n \"\"\"\n\n name = models.CharField(\"name\", max_length=150, unique=True)\n permissions = models.ManyToManyField(\n Permission,\n verbose_name=\"permissions\",\n blank=True,\n )\n restricted_access_to_channels = models.BooleanField(default=False)\n channels = models.ManyToManyField(\"channel.Channel\", blank=True)\n\n objects = GroupManager()\n\n class Meta:\n verbose_name = \"group\"\n verbose_name_plural = \"groups\"\n\n def __str__(self):\n return self.name\n\n def natural_key(self):\n return (self.name,)" }, { "identifier": "User", "path": "saleor/account/models.py", "snippet": "class User(\n PermissionsMixin, ModelWithMetadata, AbstractBaseUser, ModelWithExternalReference\n):\n email = models.EmailField(unique=True)\n first_name = models.CharField(max_length=256, blank=True)\n last_name = models.CharField(max_length=256, blank=True)\n addresses = models.ManyToManyField(\n Address, blank=True, related_name=\"user_addresses\"\n )\n is_staff = models.BooleanField(default=False)\n is_active = models.BooleanField(default=True)\n is_confirmed = models.BooleanField(default=True)\n last_confirm_email_request = models.DateTimeField(null=True, blank=True)\n note = models.TextField(null=True, blank=True)\n date_joined = models.DateTimeField(default=timezone.now, editable=False)\n updated_at = models.DateTimeField(auto_now=True, db_index=True)\n last_password_reset_request = models.DateTimeField(null=True, blank=True)\n default_shipping_address = models.ForeignKey(\n Address, related_name=\"+\", null=True, blank=True, on_delete=models.SET_NULL\n )\n default_billing_address = models.ForeignKey(\n Address, related_name=\"+\", null=True, blank=True, on_delete=models.SET_NULL\n )\n avatar = models.ImageField(upload_to=\"user-avatars\", blank=True, null=True)\n jwt_token_key = models.CharField(\n max_length=12, default=partial(get_random_string, length=12)\n )\n language_code = models.CharField(\n max_length=35, choices=settings.LANGUAGES, default=settings.LANGUAGE_CODE\n )\n search_document = models.TextField(blank=True, default=\"\")\n uuid = models.UUIDField(default=uuid4, unique=True)\n\n USERNAME_FIELD = \"email\"\n\n objects = UserManager()\n\n class Meta:\n ordering = (\"email\",)\n permissions = (\n (AccountPermissions.MANAGE_USERS.codename, \"Manage customers.\"),\n (AccountPermissions.MANAGE_STAFF.codename, \"Manage staff.\"),\n (AccountPermissions.IMPERSONATE_USER.codename, \"Impersonate user.\"),\n )\n indexes = [\n *ModelWithMetadata.Meta.indexes,\n # Orders searching index\n GinIndex(\n name=\"order_user_search_gin\",\n # `opclasses` and `fields` should be the same length\n fields=[\"email\", \"first_name\", \"last_name\"],\n opclasses=[\"gin_trgm_ops\"] * 3,\n ),\n # Account searching index\n GinIndex(\n name=\"user_search_gin\",\n # `opclasses` and `fields` should be the same length\n fields=[\"search_document\"],\n opclasses=[\"gin_trgm_ops\"],\n ),\n GinIndex(\n name=\"user_p_meta_jsonb_path_idx\",\n fields=[\"private_metadata\"],\n opclasses=[\"jsonb_path_ops\"],\n ),\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._effective_permissions = None\n\n def __str__(self):\n # Override the default __str__ of AbstractUser that returns username, which may\n # lead to leaking sensitive data in logs.\n return str(self.uuid)\n\n @property\n def effective_permissions(self) -> models.QuerySet[Permission]:\n if self._effective_permissions is None:\n self._effective_permissions = get_permissions()\n if not self.is_superuser:\n UserPermission = User.user_permissions.through\n user_permission_queryset = UserPermission._default_manager.filter(\n user_id=self.pk\n ).values(\"permission_id\")\n\n UserGroup = User.groups.through\n GroupPermission = Group.permissions.through\n user_group_queryset = UserGroup._default_manager.filter(\n user_id=self.pk\n ).values(\"group_id\")\n group_permission_queryset = GroupPermission.objects.filter(\n Exists(user_group_queryset.filter(group_id=OuterRef(\"group_id\")))\n ).values(\"permission_id\")\n\n self._effective_permissions = self._effective_permissions.filter(\n Q(\n Exists(\n user_permission_queryset.filter(\n permission_id=OuterRef(\"pk\")\n )\n )\n )\n | Q(\n Exists(\n group_permission_queryset.filter(\n permission_id=OuterRef(\"pk\")\n )\n )\n )\n )\n return self._effective_permissions\n\n @effective_permissions.setter\n def effective_permissions(self, value: models.QuerySet[Permission]):\n self._effective_permissions = value\n # Drop cache for authentication backend\n self._effective_permissions_cache = None\n\n def get_full_name(self):\n if self.first_name or self.last_name:\n return f\"{self.first_name} {self.last_name}\".strip()\n if self.default_billing_address:\n first_name = self.default_billing_address.first_name\n last_name = self.default_billing_address.last_name\n if first_name or last_name:\n return f\"{first_name} {last_name}\".strip()\n return self.email\n\n def get_short_name(self):\n return self.email\n\n def has_perm(self, perm: Union[BasePermissionEnum, str], obj=None) -> bool:\n # This method is overridden to accept perm as BasePermissionEnum\n perm = perm.value if isinstance(perm, BasePermissionEnum) else perm\n\n # Active superusers have all permissions.\n if self.is_active and self.is_superuser and not self._effective_permissions:\n return True\n return _user_has_perm(self, perm, obj)\n\n def has_perms(\n self, perm_list: Iterable[Union[BasePermissionEnum, str]], obj=None\n ) -> bool:\n # This method is overridden to accept perm as BasePermissionEnum\n perm_list = [\n perm.value if isinstance(perm, BasePermissionEnum) else perm\n for perm in perm_list\n ]\n return super().has_perms(perm_list, obj)\n\n def can_login(self, site_settings: SiteSettings):\n return self.is_active and (\n site_settings.allow_login_without_confirmation\n or not site_settings.enable_account_confirmation_by_email\n or self.is_confirmed\n )" }, { "identifier": "JWT_REFRESH_TYPE", "path": "saleor/core/jwt.py", "snippet": "JWT_REFRESH_TYPE = \"refresh\"" }, { "identifier": "PERMISSIONS_FIELD", "path": "saleor/core/jwt.py", "snippet": "PERMISSIONS_FIELD = \"permissions\"" }, { "identifier": "jwt_decode", "path": "saleor/core/jwt.py", "snippet": "def jwt_decode(\n token: str, verify_expiration=settings.JWT_EXPIRE, verify_aud: bool = False\n) -> dict[str, Any]:\n jwt_manager = get_jwt_manager()\n return jwt_manager.decode(token, verify_expiration, verify_aud=verify_aud)" }, { "identifier": "jwt_encode", "path": "saleor/core/jwt.py", "snippet": "def jwt_encode(payload: dict[str, Any]) -> str:\n jwt_manager = get_jwt_manager()\n return jwt_manager.encode(payload)" }, { "identifier": "jwt_user_payload", "path": "saleor/core/jwt.py", "snippet": "def jwt_user_payload(\n user: User,\n token_type: str,\n exp_delta: Optional[timedelta],\n additional_payload: Optional[dict[str, Any]] = None,\n token_owner: str = JWT_SALEOR_OWNER_NAME,\n) -> dict[str, Any]:\n payload = jwt_base_payload(exp_delta, token_owner)\n payload.update(\n {\n \"token\": user.jwt_token_key,\n \"email\": user.email,\n \"type\": token_type,\n \"user_id\": graphene.Node.to_global_id(\"User\", user.id),\n \"is_staff\": user.is_staff,\n }\n )\n if additional_payload:\n payload.update(additional_payload)\n return payload" }, { "identifier": "Permission", "path": "saleor/permission/models.py", "snippet": "class Permission(models.Model):\n \"\"\"The system provides a way to assign permissions to users and groups of users.\n\n The permission system is used by the Django admin site, but may also be\n useful in your own code. The Django admin site uses permissions as follows:\n\n - The \"add\" permission limits the user's ability to view the \"add\" form\n and add an object.\n - The \"change\" permission limits a user's ability to view the change\n list, view the \"change\" form and change an object.\n - The \"delete\" permission limits the ability to delete an object.\n - The \"view\" permission limits the ability to view an object.\n\n Permissions are set globally per type of object, not per specific object\n instance. It is possible to say \"Mary may change news stories,\" but it's\n not currently possible to say \"Mary may change news stories, but only the\n ones she created herself\" or \"Mary may only change news stories that have a\n certain status or publication date.\"\n\n The permissions listed above are automatically created for each model.\n \"\"\"\n\n name = models.CharField(_(\"name\"), max_length=255)\n content_type = models.ForeignKey(\n ContentType,\n models.CASCADE,\n verbose_name=_(\"content type\"),\n related_name=\"content_type\",\n )\n codename = models.CharField(_(\"codename\"), max_length=100)\n\n objects = PermissionManager()\n\n class Meta:\n verbose_name = _(\"permission\")\n verbose_name_plural = _(\"permissions\")\n unique_together = [[\"content_type\", \"codename\"]]\n ordering = [\"content_type__app_label\", \"content_type__model\", \"codename\"]\n\n def __str__(self):\n return f\"{self.content_type} | {self.name}\"\n\n def natural_key(self):\n return (self.codename,) + self.content_type.natural_key()\n\n natural_key.dependencies = [\"contenttypes.contenttype\"] # type: ignore[attr-defined] # noqa: E501" }, { "identifier": "AuthenticationError", "path": "saleor/plugins/openid_connect/exceptions.py", "snippet": "class AuthenticationError(Exception):\n \"\"\"Raises when error occurred during authentication.\"\"\"" }, { "identifier": "JWKS_CACHE_TIME", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "JWKS_CACHE_TIME = 60 * 60 # 1 hour" }, { "identifier": "JWKS_KEY", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "JWKS_KEY = \"oauth_jwks\"" }, { "identifier": "OIDC_DEFAULT_CACHE_TIME", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "OIDC_DEFAULT_CACHE_TIME = 60 * 60 # 1 hour" }, { "identifier": "_update_user_details", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def _update_user_details(\n user: User,\n oidc_key: str,\n user_email: str,\n user_first_name: str,\n user_last_name: str,\n sub: str,\n last_login: Optional[int],\n):\n user_sub = user.get_value_from_private_metadata(oidc_key)\n fields_to_save = set()\n if user_sub != sub:\n user.store_value_in_private_metadata({oidc_key: sub})\n fields_to_save.add(\"private_metadata\")\n\n if user.email != user_email:\n if User.objects.filter(email=user_email).exists():\n logger.warning(\n \"Unable to update user email as the new one already exists in DB\",\n extra={\"oidc_key\": oidc_key},\n )\n return\n user.email = user_email\n match_orders_with_new_user(user)\n fields_to_save.update({\"email\", \"search_document\"})\n\n if last_login:\n if not user.last_login or user.last_login.timestamp() < last_login:\n login_time = timezone.make_aware(datetime.fromtimestamp(last_login))\n user.last_login = login_time\n fields_to_save.add(\"last_login\")\n else:\n if (\n not user.last_login\n or (timezone.now() - user.last_login).seconds\n > settings.OAUTH_UPDATE_LAST_LOGIN_THRESHOLD\n ):\n user.last_login = timezone.now()\n fields_to_save.add(\"last_login\")\n\n if user.first_name != user_first_name:\n user.first_name = user_first_name\n fields_to_save.update({\"first_name\", \"search_document\"})\n\n if user.last_name != user_last_name:\n user.last_name = user_last_name\n fields_to_save.update({\"last_name\", \"search_document\"})\n\n if \"search_document\" in fields_to_save:\n user.search_document = prepare_user_search_document_value(\n user, attach_addresses_data=False\n )\n\n if fields_to_save:\n user.save(update_fields=fields_to_save)" }, { "identifier": "assign_staff_to_default_group_and_update_permissions", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def assign_staff_to_default_group_and_update_permissions(\n user: \"User\", default_group_name: str\n):\n \"\"\"Assign staff user to the default permission group. and update user permissions.\n\n If the group doesn't exist, the new group without any assigned permissions and\n channels will be created.\n \"\"\"\n default_group_name = (\n default_group_name.strip() if default_group_name else default_group_name\n )\n if default_group_name:\n group, _ = Group.objects.get_or_create(\n name=default_group_name, defaults={\"restricted_access_to_channels\": True}\n )\n user.groups.add(group)\n group_permissions = get_user_groups_permissions(user)\n user.effective_permissions |= group_permissions" }, { "identifier": "create_jwt_refresh_token", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def create_jwt_refresh_token(user: User, refresh_token: str, csrf: str, owner: str):\n additional_payload = {\n OAUTH_TOKEN_REFRESH_FIELD: refresh_token,\n CSRF_FIELD: csrf,\n }\n jwt_payload = jwt_user_payload(\n user,\n JWT_REFRESH_TYPE,\n # oauth_refresh_token has own expiration time. No need to duplicate it here\n exp_delta=None,\n additional_payload=additional_payload,\n token_owner=owner,\n )\n return jwt_encode(jwt_payload)" }, { "identifier": "create_jwt_token", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def create_jwt_token(\n id_payload: CodeIDToken,\n user: User,\n access_token: str,\n permissions: Optional[list[str]],\n owner: str,\n) -> str:\n additional_payload = {\n \"exp\": id_payload[\"exp\"],\n \"oauth_access_key\": access_token,\n }\n if permissions is not None:\n additional_payload[PERMISSIONS_FIELD] = permissions\n\n jwt_payload = jwt_user_payload(\n user,\n JWT_ACCESS_TYPE,\n exp_delta=None, # we pass exp from auth service, in additional_payload\n additional_payload=additional_payload,\n token_owner=owner,\n )\n return jwt_encode(jwt_payload)" }, { "identifier": "create_tokens_from_oauth_payload", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def create_tokens_from_oauth_payload(\n token_data: dict,\n user: User,\n claims: CodeIDToken,\n permissions: Optional[list[str]],\n owner: str,\n):\n refresh_token = token_data.get(\"refresh_token\")\n access_token = token_data.get(\"access_token\", \"\")\n\n tokens = {\n \"token\": create_jwt_token(claims, user, access_token, permissions, owner),\n }\n if refresh_token:\n csrf_token = _get_new_csrf_token()\n tokens[\"refresh_token\"] = create_jwt_refresh_token(\n user, refresh_token, csrf_token, owner\n )\n tokens[\"csrf_token\"] = csrf_token\n return tokens" }, { "identifier": "fetch_jwks", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def fetch_jwks(jwks_url) -> Optional[dict]:\n \"\"\"Fetch JSON Web Key Sets from a provider.\n\n Fetched keys will be stored in the cache to the reduced amount of possible\n requests.\n :raises AuthenticationError\n \"\"\"\n response = None\n try:\n response = HTTPClient.send_request(\"GET\", jwks_url, allow_redirects=False)\n response.raise_for_status()\n jwks = response.json()\n except requests.exceptions.RequestException:\n logger.exception(\"Unable to fetch jwks from %s\", jwks_url)\n raise AuthenticationError(\"Unable to finalize the authentication process.\")\n except json.JSONDecodeError:\n content = response.content if response else \"Unable to find the response\"\n logger.exception(\n \"Unable to decode the response from auth service with jwks. \"\n \"Response: %s\",\n content,\n )\n raise AuthenticationError(\"Unable to finalize the authentication process.\")\n keys = jwks.get(\"keys\", [])\n if not keys:\n logger.warning(\"List of JWKS keys is empty\")\n cache.set(JWKS_KEY, keys, JWKS_CACHE_TIME)\n return keys" }, { "identifier": "get_domain_from_email", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def get_domain_from_email(email: str):\n \"\"\"Return domain from the email.\"\"\"\n _user, delim, domain = email.rpartition(\"@\")\n return domain if delim else None" }, { "identifier": "get_or_create_user_from_payload", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def get_or_create_user_from_payload(\n payload: dict,\n oauth_url: str,\n last_login: Optional[int] = None,\n) -> User:\n oidc_metadata_key = f\"oidc:{oauth_url}\"\n user_email = payload.get(\"email\")\n if not user_email:\n raise AuthenticationError(\"Missing user's email.\")\n\n sub = payload.get(\"sub\")\n get_kwargs = {\"private_metadata__contains\": {oidc_metadata_key: sub}}\n if not sub:\n get_kwargs = {\"email\": user_email}\n logger.warning(\"Missing sub section in OIDC payload\")\n\n defaults_create = {\n \"is_active\": True,\n \"is_confirmed\": True,\n \"email\": user_email,\n \"first_name\": payload.get(\"given_name\", \"\"),\n \"last_name\": payload.get(\"family_name\", \"\"),\n \"private_metadata\": {oidc_metadata_key: sub},\n \"password\": make_password(None),\n }\n cache_key = oidc_metadata_key + \":\" + str(sub)\n user_id = cache.get(cache_key)\n if user_id:\n get_kwargs = {\"id\": user_id}\n try:\n user = User.objects.using(settings.DATABASE_CONNECTION_REPLICA_NAME).get(\n **get_kwargs\n )\n except User.DoesNotExist:\n user, _ = User.objects.get_or_create(\n email=user_email,\n defaults=defaults_create,\n )\n match_orders_with_new_user(user)\n except User.MultipleObjectsReturned:\n logger.warning(\"Multiple users returned for single OIDC sub ID\")\n user, _ = User.objects.get_or_create(\n email=user_email,\n defaults=defaults_create,\n )\n\n site_settings = Site.objects.get_current().settings\n if not user.can_login(site_settings): # it is true only if we fetch disabled user.\n raise AuthenticationError(\"Unable to log in.\")\n\n _update_user_details(\n user=user,\n oidc_key=oidc_metadata_key,\n user_email=user_email,\n user_first_name=defaults_create[\"first_name\"],\n user_last_name=defaults_create[\"last_name\"],\n sub=sub, # type: ignore\n last_login=last_login,\n )\n\n cache.set(cache_key, user.id, min(JWKS_CACHE_TIME, OIDC_DEFAULT_CACHE_TIME))\n return user" }, { "identifier": "get_saleor_permission_names", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def get_saleor_permission_names(permissions: QuerySet) -> list[str]:\n permission_names = get_permission_names(permissions)\n return list(permission_names)" }, { "identifier": "get_saleor_permissions_qs_from_scope", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def get_saleor_permissions_qs_from_scope(scope: str) -> QuerySet[Permission]:\n scope_list = scope.lower().strip().split()\n return get_saleor_permissions_from_list(scope_list)" }, { "identifier": "get_user_from_oauth_access_token_in_jwt_format", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def get_user_from_oauth_access_token_in_jwt_format(\n token_payload: JWTClaims,\n user_info_url: str,\n access_token: str,\n use_scope_permissions: bool,\n audience: str,\n staff_user_domains: list[str],\n staff_default_group_name: str,\n):\n try:\n token_payload.validate()\n except (JoseError, ValueError) as e:\n logger.info(\n \"OIDC access token validation failed\",\n extra={\"error\": e, \"user_info_url\": user_info_url},\n )\n return None\n\n user_info = get_user_info_from_cache_or_fetch(\n user_info_url,\n access_token,\n token_payload.get(\"exp\"),\n )\n if not user_info:\n logger.info(\n \"Failed to fetch user info for a valid OIDC access token\",\n extra={\"token_exp\": token_payload[\"exp\"], \"user_info_url\": user_info_url},\n )\n return None\n\n try:\n user = get_or_create_user_from_payload(\n user_info,\n user_info_url,\n last_login=token_payload.get(\"iat\"),\n )\n except AuthenticationError as e:\n logger.info(\"Unable to create a user object\", extra={\"error\": e})\n return None\n\n scope = token_payload.get(\"scope\")\n token_permissions = token_payload.get(\"permissions\", [])\n\n # check if token contains expected aud\n aud = token_payload.get(\"aud\")\n if not audience:\n audience_in_token = False\n elif isinstance(aud, list):\n audience_in_token = audience in aud\n else:\n audience_in_token = audience == aud\n\n is_staff = None\n email_domain = get_domain_from_email(user.email)\n is_staff_email = email_domain in staff_user_domains\n is_staff_id = SALEOR_STAFF_PERMISSION\n if (use_scope_permissions and audience_in_token) or is_staff_email:\n permissions = get_saleor_permissions_qs_from_scope(scope)\n if not permissions and token_permissions:\n permissions = get_saleor_permissions_from_list(token_permissions)\n user.effective_permissions = permissions\n\n is_staff_in_scope = is_staff_id in scope\n is_staff_in_token_permissions = is_staff_id in token_permissions\n if (\n is_staff_email\n or is_staff_in_scope\n or is_staff_in_token_permissions\n or permissions\n ):\n assign_staff_to_default_group_and_update_permissions(\n user, staff_default_group_name\n )\n if not user.is_staff:\n is_staff = True\n elif user.is_staff:\n is_staff = False\n else:\n is_staff = False\n\n if is_staff is not None:\n user.is_staff = is_staff\n user.save(update_fields=[\"is_staff\"])\n\n return user" }, { "identifier": "get_user_from_token", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def get_user_from_token(claims: CodeIDToken) -> User:\n user_email = claims.get(\"email\")\n if not user_email:\n raise AuthenticationError(\"Missing user's email.\")\n\n site_settings = Site.objects.get_current().settings\n user = User.objects.filter(email=user_email).first()\n if not user or not user.can_login(site_settings):\n raise AuthenticationError(\"User does not exist.\")\n return user" }, { "identifier": "get_user_info", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def get_user_info(user_info_url, access_token) -> Optional[dict]:\n try:\n response = HTTPClient.send_request(\n \"GET\",\n user_info_url,\n headers={\"Authorization\": f\"Bearer {access_token}\"},\n allow_redirects=False,\n )\n response.raise_for_status()\n return response.json()\n except requests.exceptions.HTTPError as e:\n logger.warning(\n \"Fetching OIDC user info failed. HTTP error occurred\",\n extra={\"user_info_url\": user_info_url, \"error\": e},\n )\n return None\n except requests.exceptions.RequestException as e:\n logger.warning(\n \"Fetching OIDC user info failed\",\n extra={\"user_info_url\": user_info_url, \"error\": e},\n )\n return None\n except json.JSONDecodeError as e:\n logger.warning(\n \"Invalid OIDC user info response\",\n extra={\"user_info_url\": user_info_url, \"error\": e},\n )\n return None" }, { "identifier": "validate_refresh_token", "path": "saleor/plugins/openid_connect/utils.py", "snippet": "def validate_refresh_token(refresh_token, data):\n csrf_token = data.get(\"csrfToken\")\n if not refresh_token:\n raise ValidationError(\n {\n \"refreshToken\": ValidationError(\n \"Missing token.\", code=PluginErrorCode.NOT_FOUND.value\n )\n }\n )\n\n try:\n refresh_payload = jwt_decode(refresh_token, verify_expiration=True)\n except PyJWTError:\n raise ValidationError(\n {\n \"refreshToken\": ValidationError(\n \"Unable to decode the refresh token.\",\n code=PluginErrorCode.INVALID.value,\n )\n }\n )\n\n if not data.get(\"refreshToken\"):\n if not refresh_payload.get(CSRF_FIELD):\n raise ValidationError(\n {\n CSRF_FIELD: ValidationError(\n \"Missing CSRF token in refresh payload.\",\n code=PluginErrorCode.INVALID.value,\n )\n }\n )\n if not csrf_token:\n raise ValidationError(\n {\n \"csrfToken\": ValidationError(\n \"CSRF token needs to be provided.\",\n code=PluginErrorCode.INVALID.value,\n )\n }\n )\n is_valid = _does_token_match(csrf_token, refresh_payload[CSRF_FIELD])\n if not is_valid:\n raise ValidationError(\n {\n \"csrfToken\": ValidationError(\n \"CSRF token doesn't match.\",\n code=PluginErrorCode.INVALID.value,\n )\n }\n )" } ]
import json import time import warnings import pytest import pytz import requests from datetime import datetime, timedelta from unittest import mock from unittest.mock import MagicMock, Mock, call, patch from authlib.jose import JWTClaims from django.core.exceptions import ValidationError from django.utils import timezone from freezegun import freeze_time from requests import Response from requests_hardened import HTTPSession from ....account.models import Group, User from ....core.jwt import ( JWT_REFRESH_TYPE, PERMISSIONS_FIELD, jwt_decode, jwt_encode, jwt_user_payload, ) from ....permission.models import Permission from ..exceptions import AuthenticationError from ..utils import ( JWKS_CACHE_TIME, JWKS_KEY, OIDC_DEFAULT_CACHE_TIME, _update_user_details, assign_staff_to_default_group_and_update_permissions, create_jwt_refresh_token, create_jwt_token, create_tokens_from_oauth_payload, fetch_jwks, get_domain_from_email, get_or_create_user_from_payload, get_saleor_permission_names, get_saleor_permissions_qs_from_scope, get_user_from_oauth_access_token_in_jwt_format, get_user_from_token, get_user_info, validate_refresh_token, )
7,196
OIDC_CACHE_TIMEOUT = min(JWKS_CACHE_TIME, OIDC_DEFAULT_CACHE_TIME) @pytest.mark.parametrize( "error", [ json.JSONDecodeError(msg="", doc="", pos=0), requests.exceptions.RequestException(), ], ) def test_fetch_jwks_raises_error(monkeypatch, error): mocked_get = Mock() mocked_get.side_effect = error jwks_url = "http://localhost:3000/" monkeypatch.setattr(HTTPSession, "request", mocked_get) with pytest.raises(AuthenticationError): fetch_jwks(jwks_url) @pytest.mark.vcr @mock.patch("saleor.plugins.openid_connect.utils.cache.set") def test_fetch_jwks(mocked_cache_set): jwks_url = "https://saleor.io/.well-known/jwks.json" keys = fetch_jwks(jwks_url) assert len(keys) == 2 mocked_cache_set.assert_called_once_with(JWKS_KEY, keys, JWKS_CACHE_TIME) def test_get_or_create_user_from_token_missing_email(id_payload): del id_payload["email"] with pytest.raises(AuthenticationError): get_or_create_user_from_payload(id_payload, "https://saleor.io/oauth") def test_get_or_create_user_from_token_user_not_active(id_payload, admin_user): admin_user.is_active = False admin_user.save() with pytest.raises(AuthenticationError): get_or_create_user_from_payload(id_payload, "https://saleor.io/oauth") def test_get_user_from_token_missing_email(id_payload): del id_payload["email"] with pytest.raises(AuthenticationError): get_user_from_token(id_payload) def test_get_user_from_token_missing_user(id_payload): User.objects.all().delete() with pytest.raises(AuthenticationError): get_user_from_token(id_payload) def test_get_user_from_token_user_not_active(id_payload, admin_user): admin_user.is_active = False admin_user.save() with pytest.raises(AuthenticationError): get_user_from_token(id_payload) @freeze_time("2019-03-18 12:00:00") def test_create_tokens_from_oauth_payload(monkeypatch, id_token, id_payload): mocked_jwt_validator = MagicMock() mocked_jwt_validator.__getitem__.side_effect = id_payload.__getitem__ monkeypatch.setattr( "saleor.plugins.openid_connect.utils.get_decoded_token", Mock(return_value=mocked_jwt_validator), ) permissions_from_scope = [ "MANAGE_ORDERS", ] auth_payload = { "access_token": "FeHkE_QbuU3cYy1a1eQUrCE5jRcUnBK3", "refresh_token": "refresh", "id_token": id_token, "scope": ( "openid profile email offline_access saleor:manage_orders saleor:staff" ), "expires_in": 86400, "token_type": "Bearer", "expires_at": 1600851112, } user = get_or_create_user_from_payload(id_payload, "https://saleor.io/oauth") permissions = get_saleor_permissions_qs_from_scope(auth_payload.get("scope")) perms = get_saleor_permission_names(permissions) tokens = create_tokens_from_oauth_payload( auth_payload, user, id_payload, perms, "PluginID" ) created_user = User.objects.get()
OIDC_CACHE_TIMEOUT = min(JWKS_CACHE_TIME, OIDC_DEFAULT_CACHE_TIME) @pytest.mark.parametrize( "error", [ json.JSONDecodeError(msg="", doc="", pos=0), requests.exceptions.RequestException(), ], ) def test_fetch_jwks_raises_error(monkeypatch, error): mocked_get = Mock() mocked_get.side_effect = error jwks_url = "http://localhost:3000/" monkeypatch.setattr(HTTPSession, "request", mocked_get) with pytest.raises(AuthenticationError): fetch_jwks(jwks_url) @pytest.mark.vcr @mock.patch("saleor.plugins.openid_connect.utils.cache.set") def test_fetch_jwks(mocked_cache_set): jwks_url = "https://saleor.io/.well-known/jwks.json" keys = fetch_jwks(jwks_url) assert len(keys) == 2 mocked_cache_set.assert_called_once_with(JWKS_KEY, keys, JWKS_CACHE_TIME) def test_get_or_create_user_from_token_missing_email(id_payload): del id_payload["email"] with pytest.raises(AuthenticationError): get_or_create_user_from_payload(id_payload, "https://saleor.io/oauth") def test_get_or_create_user_from_token_user_not_active(id_payload, admin_user): admin_user.is_active = False admin_user.save() with pytest.raises(AuthenticationError): get_or_create_user_from_payload(id_payload, "https://saleor.io/oauth") def test_get_user_from_token_missing_email(id_payload): del id_payload["email"] with pytest.raises(AuthenticationError): get_user_from_token(id_payload) def test_get_user_from_token_missing_user(id_payload): User.objects.all().delete() with pytest.raises(AuthenticationError): get_user_from_token(id_payload) def test_get_user_from_token_user_not_active(id_payload, admin_user): admin_user.is_active = False admin_user.save() with pytest.raises(AuthenticationError): get_user_from_token(id_payload) @freeze_time("2019-03-18 12:00:00") def test_create_tokens_from_oauth_payload(monkeypatch, id_token, id_payload): mocked_jwt_validator = MagicMock() mocked_jwt_validator.__getitem__.side_effect = id_payload.__getitem__ monkeypatch.setattr( "saleor.plugins.openid_connect.utils.get_decoded_token", Mock(return_value=mocked_jwt_validator), ) permissions_from_scope = [ "MANAGE_ORDERS", ] auth_payload = { "access_token": "FeHkE_QbuU3cYy1a1eQUrCE5jRcUnBK3", "refresh_token": "refresh", "id_token": id_token, "scope": ( "openid profile email offline_access saleor:manage_orders saleor:staff" ), "expires_in": 86400, "token_type": "Bearer", "expires_at": 1600851112, } user = get_or_create_user_from_payload(id_payload, "https://saleor.io/oauth") permissions = get_saleor_permissions_qs_from_scope(auth_payload.get("scope")) perms = get_saleor_permission_names(permissions) tokens = create_tokens_from_oauth_payload( auth_payload, user, id_payload, perms, "PluginID" ) created_user = User.objects.get()
token = create_jwt_token(
15
2023-11-13 05:00:35+00:00
8k
Aues6uen11Z/Zafkiel
zafkiel/ocr/ocr.py
[ { "identifier": "logger", "path": "zafkiel/logger.py", "snippet": "" }, { "identifier": "Config", "path": "zafkiel/config.py", "snippet": "class Config:\n ST = Settings\n ST.CVSTRATEGY = [\"mstpl\", \"sift\"]\n ST.THRESHOLD = 0.8\n\n GAME_PATH = None\n SERVER_LANG = 'cn'\n\n # Top, left and bottom boundary pixel values when running in a bordered program\n # The value on my Win10 computer, may not accurate for everyone.\n BORDER = (32, 3, 2)" }, { "identifier": "cached_property", "path": "zafkiel/decorator.py", "snippet": "class cached_property(Generic[T]):\n \"\"\"\n From https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/base/decorator.py\n\n A property that is only computed once per instance and then replaces itself\n with an ordinary attribute. Deleting the attribute resets the property.\n Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76\n \"\"\"\n\n def __init__(self, func: Callable[..., T]):\n self.func = func\n\n def __get__(self, obj, cls) -> T:\n if obj is None:\n return self\n\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n return value" }, { "identifier": "ImageTemplate", "path": "zafkiel/device/template.py", "snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n target_pos: int = TargetPos.MID,\n resolution: tuple = (1280, 720),\n rgb: bool = False,\n scale_max: int = 800,\n scale_step: float = 0.005,\n template_path: str = 'templates'\n ):\n\n super().__init__(filename, threshold, target_pos, record_pos, resolution, rgb, scale_max, scale_step)\n\n self.template_path = template_path # under root path\n self.keyword = keyword\n if self.keyword is not None and self.keyword.name == '':\n \"\"\"\n Please note that due to the __post_init__ method of the Keyword class running before this 'name' assignment, \n its 'instances' dictionary will get a dictionary item with an empty string key.\n This means that each instance of the Keyword class that omits the 'name' parameter will be constantly \n overwritten. If you want to use Keyword().instances for special purposes, you must initialize 'name'.\n \"\"\"\n self.keyword.name = self.name\n\n @cached_property\n def filepath(self) -> str:\n if self._filepath:\n return self._filepath\n for dir_name in G.BASEDIR:\n filepath = os.path.join(dir_name, self.template_path, self.filename)\n if os.path.isfile(filepath):\n self._filepath = filepath\n return self._filepath\n return self.filename\n\n @cached_property\n def name(self) -> str:\n return Path(self.filename).stem\n\n @cached_property\n def image(self) -> ndarray:\n return self._imread()\n\n @cached_property\n def height(self) -> int:\n return self.image.shape[0]\n\n @cached_property\n def width(self) -> int:\n return self.image.shape[1]\n\n def _has_border(self) -> bool:\n \"\"\"\n If game running in a bordered process, coordinates need to be corrected.\n\n Returns:\n Whether the game running in a bordered process.\n \"\"\"\n actual_ratio = G.DEVICE.get_current_resolution()[0] / G.DEVICE.get_current_resolution()[1]\n template_ratio = self.resolution[0] / self.resolution[1]\n return actual_ratio != template_ratio\n\n def ratio(self, screen_height: float = None) -> float:\n \"\"\"\n Calculate the ratio of the current screen to the template image.\n \"\"\"\n if screen_height is None:\n if self._has_border():\n border = Config.BORDER[0] + Config.BORDER[2]\n else:\n border = 0\n screen_height = G.DEVICE.get_current_resolution()[1] - border\n\n return screen_height / self.resolution[1]\n\n @cached_property\n def area(self) -> tuple:\n \"\"\"\n Calculate the area of the template image on the current screen.\n\n Returns:\n Upper left and lower right corner coordinate.\n \"\"\"\n screen_resolution = G.DEVICE.get_current_resolution()\n\n if self._has_border():\n border = Config.BORDER\n else:\n border = (0, 0, 0)\n\n screen_width = screen_resolution[0] - border[1] * 2\n screen_height = screen_resolution[1] - border[0] - border[2]\n\n ratio = self.ratio(screen_height)\n x1 = screen_width / 2 + self.record_pos[0] * screen_width - self.width / 2 * ratio + border[1]\n y1 = screen_height / 2 + self.record_pos[1] * screen_width - self.height / 2 * ratio + border[0]\n x2 = screen_width / 2 + self.record_pos[0] * screen_width + self.width / 2 * ratio + border[1]\n y2 = screen_height / 2 + self.record_pos[1] * screen_width + self.height / 2 * ratio + border[0]\n return x1, y1, x2, y2" }, { "identifier": "ScriptError", "path": "zafkiel/exception.py", "snippet": "class ScriptError(Exception):\n pass" }, { "identifier": "Keyword", "path": "zafkiel/ocr/keyword.py", "snippet": "class Keyword:\n cn: str = ''\n cht: str = ''\n en: str = ''\n jp: str = ''\n # id: int # To be considered\n name: str = ''\n\n \"\"\"\n Instance attributes and methods\n TODO: Error handling for missing attributes\n \"\"\"\n\n @cached_property\n def ch(self) -> str:\n return self.cn\n\n @cached_property\n def cn_parsed(self) -> str:\n return parse_name(self.cn)\n\n @cached_property\n def en_parsed(self) -> str:\n return parse_name(self.en)\n\n @cached_property\n def jp_parsed(self) -> str:\n return parse_name(self.jp)\n\n @cached_property\n def cht_parsed(self) -> str:\n return parse_name(self.cht)\n\n def __str__(self):\n keyword_list = []\n for keyword in [self.cn, self.cht, self.en, self.jp]:\n if keyword != '':\n keyword_list.append(keyword)\n return f\"{self.__class__.__name__}({self.name})->{'/'.join(keyword_list)}\"\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return str(self) == str(other)\n\n def __hash__(self):\n return hash(self.name)\n\n def __bool__(self):\n return True\n\n def keywords_to_find(self, lang: str = None, ignore_punctuation: bool = True):\n if lang is None:\n lang = Config.SERVER_LANG\n\n # TODO: fix this refer to SRC\n if lang == 'cn':\n if ignore_punctuation:\n return [self.cn_parsed]\n else:\n return [self.cn]\n elif lang == 'en':\n if ignore_punctuation:\n return [self.en_parsed]\n else:\n return [self.en]\n elif lang == 'jp':\n if ignore_punctuation:\n return [self.jp_parsed]\n else:\n return [self.jp]\n elif lang == 'cht':\n if ignore_punctuation:\n return [self.cht_parsed]\n else:\n return [self.cht]\n else:\n if ignore_punctuation:\n return [\n self.cn_parsed,\n self.en_parsed,\n self.jp_parsed,\n self.cht_parsed,\n ]\n else:\n return [\n self.cn,\n self.en,\n self.jp,\n self.cht,\n ]\n\n \"\"\"\n Class attributes and methods\n\n Note that dataclasses inherited `Keyword` must override `instances` attribute,\n or `instances` will still be a class attribute of base class.\n ```\n @dataclass\n class DungeonNav(Keyword):\n instances: ClassVar = {}\n ```\n \"\"\"\n # Key: instance name. Value: instance object.\n instances: ClassVar = {}\n\n def __post_init__(self):\n self.__class__.instances[self.name] = self\n\n @classmethod\n def _compare(cls, name, keyword):\n return name == keyword\n\n @classmethod\n def find(cls, name, lang: str = None, ignore_punctuation: bool = True):\n \"\"\"\n Args:\n name: Name in any server or instance id.\n lang: Lang to find from. None to search the names from current server only.\n ignore_punctuation: True to remove punctuations and turn into lowercase before searching.\n\n Returns:\n Keyword instance.\n\n Raises:\n ScriptError: If nothing found.\n \"\"\"\n # Already a keyword\n if isinstance(name, Keyword):\n return name\n\n # Probably a variable name\n if isinstance(name, str) and '_' in name:\n for instance in cls.instances.values():\n if name == instance.name:\n return instance\n # Probably an in-game name\n if ignore_punctuation:\n name = parse_name(name)\n else:\n name = str(name)\n instance: Keyword\n for instance in cls.instances.values():\n for keyword in instance.keywords_to_find(\n lang=lang, ignore_punctuation=ignore_punctuation):\n if cls._compare(name, keyword):\n return instance\n\n # Not found\n raise ScriptError(f'Cannot find a {cls.__name__} instance that matches \"{name}\"')" }, { "identifier": "TextSystem", "path": "zafkiel/ocr/models.py", "snippet": "class TextSystem(TextSystem_):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.text_recognizer.rec_batch_num = 1" }, { "identifier": "OCR_MODEL", "path": "zafkiel/ocr/models.py", "snippet": "OCR_MODEL = OcrModel()" }, { "identifier": "merge_buttons", "path": "zafkiel/ocr/utils.py", "snippet": "def merge_buttons(buttons: list[BoxedResult], thres_x=20, thres_y=20) -> list[BoxedResult]:\n \"\"\"\n Args:\n buttons:\n thres_x: Merge results with horizontal box distance <= `thres_x`\n thres_y: Merge results with vertical box distance <= `thres_y`\n\n Returns:\n\n \"\"\"\n if thres_x <= 0 and thres_y <= 0:\n return buttons\n\n dic_button = {button.box: button for button in buttons}\n set_merged = set()\n for left, right in itertools.combinations(dic_button.items(), 2):\n left_box, left = left\n right_box, right = right\n if area_cross_area(left.box, right.box, thres_x=thres_x, thres_y=thres_y):\n left = _merge_boxed_result(left, right)\n dic_button[left_box] = left\n dic_button[right_box] = left\n set_merged.add(right_box)\n\n return [button for box, button in dic_button.items() if box not in set_merged]" }, { "identifier": "corner2area", "path": "zafkiel/ocr/utils.py", "snippet": "def corner2area(corner):\n \"\"\"\n Args:\n corner: [upper-left, upper-right, bottom-left, bottom-right]\n\n Returns:\n np.ndarray: (x1, y1, x2, y2)\n \"\"\"\n x, y = np.array(corner).T\n return np.rint([np.min(x), np.min(y), np.max(x), np.max(y)]).astype(int)" }, { "identifier": "area_pad", "path": "zafkiel/ocr/utils.py", "snippet": "def area_pad(area, pad=10):\n \"\"\"\n Inner offset an area.\n\n Args:\n area: (upper_left_x, upper_left_y, bottom_right_x, bottom_right_y).\n pad (int):\n\n Returns:\n tuple: (upper_left_x, upper_left_y, bottom_right_x, bottom_right_y).\n \"\"\"\n upper_left_x, upper_left_y, bottom_right_x, bottom_right_y = area\n return upper_left_x + pad, upper_left_y + pad, bottom_right_x - pad, bottom_right_y - pad" }, { "identifier": "crop", "path": "zafkiel/utils.py", "snippet": "def crop(image, area):\n \"\"\"\n From https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/base/utils/utils.py\n\n Crop image like pillow, when using opencv / numpy.\n Provides a black background if cropping outside of image.\n\n Args:\n image: Image to be cropped, usually a screenshot.\n area: Upper left and lower right corner coordinate of the area to be cropped.\n\n Returns:\n cropped image\n \"\"\"\n x1, y1, x2, y2 = map(int, map(round, area))\n h, w = image.shape[:2]\n border = np.maximum((0 - y1, y2 - h, 0 - x1, x2 - w), 0)\n x1, y1, x2, y2 = np.maximum((x1, y1, x2, y2), 0)\n image = image[y1:y2, x1:x2]\n if sum(border) > 0:\n image = cv2.copyMakeBorder(image, *border, borderType=cv2.BORDER_CONSTANT, value=(0, 0, 0))\n return image" } ]
import re import time from datetime import timedelta from difflib import SequenceMatcher from typing import Optional from pponnxcr.predict_system import BoxedResult from zafkiel.logger import logger from zafkiel.config import Config from zafkiel.decorator import cached_property from zafkiel.device.template import ImageTemplate from zafkiel.exception import ScriptError from zafkiel.ocr.keyword import Keyword from zafkiel.ocr.models import TextSystem, OCR_MODEL from zafkiel.ocr.utils import merge_buttons, corner2area, area_pad from zafkiel.utils import crop
3,845
OCR_EQUAL = 0 OCR_CONTAINS = 1 OCR_SIMILAR = 2 class OcrResultButton: def __init__(self, boxed_result: BoxedResult, matched_keyword: Optional[Keyword]): """ Args: boxed_result: BoxedResult from ppocr-onnx matched_keyword: Keyword object or None """ self.area = boxed_result.box self.search = area_pad(self.area, pad=-20) # self.button = boxed_result.box if matched_keyword is not None: self.matched_keyword = matched_keyword self.name = str(matched_keyword) else: self.matched_keyword = None self.name = boxed_result.ocr_text self.text = boxed_result.ocr_text self.score = boxed_result.score @property def is_keyword_matched(self) -> bool: return self.matched_keyword is not None def __str__(self): return self.name __repr__ = __str__ def __eq__(self, other): return str(self) == str(other) def __hash__(self): return hash(self.name) def __bool__(self): return True class Ocr: # Merge results with box distance <= thres merge_thres_x = 0 merge_thres_y = 0 def __init__(self, button: ImageTemplate, lang=None, name=None): """ Args: button: lang: If None, use in-game language name: If None, use button.name """ if lang is None: lang = Config.SERVER_LANG if name is None: name = button.name self.button: ImageTemplate = button self.lang: str = lang self.name: str = name
OCR_EQUAL = 0 OCR_CONTAINS = 1 OCR_SIMILAR = 2 class OcrResultButton: def __init__(self, boxed_result: BoxedResult, matched_keyword: Optional[Keyword]): """ Args: boxed_result: BoxedResult from ppocr-onnx matched_keyword: Keyword object or None """ self.area = boxed_result.box self.search = area_pad(self.area, pad=-20) # self.button = boxed_result.box if matched_keyword is not None: self.matched_keyword = matched_keyword self.name = str(matched_keyword) else: self.matched_keyword = None self.name = boxed_result.ocr_text self.text = boxed_result.ocr_text self.score = boxed_result.score @property def is_keyword_matched(self) -> bool: return self.matched_keyword is not None def __str__(self): return self.name __repr__ = __str__ def __eq__(self, other): return str(self) == str(other) def __hash__(self): return hash(self.name) def __bool__(self): return True class Ocr: # Merge results with box distance <= thres merge_thres_x = 0 merge_thres_y = 0 def __init__(self, button: ImageTemplate, lang=None, name=None): """ Args: button: lang: If None, use in-game language name: If None, use button.name """ if lang is None: lang = Config.SERVER_LANG if name is None: name = button.name self.button: ImageTemplate = button self.lang: str = lang self.name: str = name
@cached_property
2
2023-11-12 09:33:35+00:00
8k
medkit-lib/medkit
medkit/core/text/document.py
[ { "identifier": "dict_conv", "path": "medkit/core/dict_conv.py", "snippet": "_CLASS_NAME_KEY: str = \"_class_name\"\ndef get_class_name(class_: Type) -> str:\ndef add_class_name_to_data_dict(instance: object, data_dict: Dict[str, Any]):\ndef get_class_name_from_data_dict(data_dict: Dict[str, Any]):\n def to_dict(self) -> Dict[str, Any]:\n def from_dict(cls, data_dict: Dict[str, Any]) -> Self:\n def __init_subclass__(cls):\n def register_subclass(cls, subclass: Type[Self]):\n def get_subclass(cls, name: str) -> Optional[Type[Self]]:\n def get_subclass_for_data_dict(\n cls,\n data_dict: Dict[str, Any],\n ) -> Optional[Type[Self]]:\nclass DictConvertible(Protocol):\nclass SubclassMapping:" }, { "identifier": "Attribute", "path": "medkit/core/attribute.py", "snippet": "class Attribute(dict_conv.SubclassMapping):\n \"\"\"\n Medkit attribute, to be added to an annotation\n\n Attributes\n ----------\n label:\n The attribute label\n value:\n The value of the attribute. Should be either simple built-in types (int,\n float, bool, str) or collections of these types (list, dict, tuple). If\n you need structured complex data you should create a subclass of\n `Attribute`.\n metadata:\n The metadata of the attribute\n uid:\n The identifier of the attribute\n \"\"\"\n\n label: str\n value: Optional[Any]\n metadata: Dict[str, Any]\n uid: str\n\n def __init__(\n self,\n label: str,\n value: Optional[Any] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n ):\n if metadata is None:\n metadata = {}\n if uid is None:\n uid = generate_id()\n\n self.uid = uid\n self.label = label\n self.value = value\n self.metadata = metadata\n\n def __init_subclass__(cls):\n Attribute.register_subclass(cls)\n super().__init_subclass__()\n\n def to_dict(self) -> Dict[str, Any]:\n attribute_dict = dict(\n uid=self.uid,\n label=self.label,\n value=self.value,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, attribute_dict)\n return attribute_dict\n\n def to_brat(self) -> Optional[Any]:\n \"\"\"\n Return a value compatible with the brat format\n \"\"\"\n\n return self.value\n\n def to_spacy(self) -> Optional[Any]:\n \"\"\"\n Return a value compatible with spaCy\n \"\"\"\n\n return self.value\n\n def copy(self) -> Attribute:\n \"\"\"\n Create a new attribute that is a copy of the current instance, but\n with a new identifier\n\n This is used when we want to duplicate an existing attribute onto a\n different annotation.\n \"\"\"\n return dataclasses.replace(self, uid=generate_id())\n\n @classmethod\n def from_dict(cls, attribute_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates an Attribute from a dict\n\n Parameters\n ----------\n attribute_dict: dict\n A dictionary from a serialized Attribute as generated by to_dict()\n \"\"\"\n\n subclass = cls.get_subclass_for_data_dict(attribute_dict)\n if subclass is not None:\n return subclass.from_dict(attribute_dict)\n\n return cls(\n uid=attribute_dict[\"uid\"],\n label=attribute_dict[\"label\"],\n value=attribute_dict[\"value\"],\n metadata=attribute_dict[\"metadata\"],\n )" }, { "identifier": "AttributeContainer", "path": "medkit/core/attribute_container.py", "snippet": "class AttributeContainer:\n \"\"\"\n Manage a list of attributes attached to another data structure.\n For example, it may be a document or an annotation.\n\n This behaves more or less like a list: calling `len()` and iterating are\n supported. Additional filtering is available through the `get()` method.\n\n The attributes will be stored in a :class:`~medkit.core.Store`, which can\n rely on a simple dict or something more complicated like a database.\n\n This global store may be initialized using :class:~medkit.core.GlobalStore.\n Otherwise, a default one (i.e. dict store) is used.\n \"\"\"\n\n def __init__(self, owner_id: str):\n self._store: Store = GlobalStore.get_store()\n self._owner_id = owner_id\n self._attr_ids: List[str] = []\n self._attr_ids_by_label: Dict[str, List[str]] = {}\n\n def __len__(self) -> int:\n \"\"\"Add support for calling `len()`\"\"\"\n return len(self._attr_ids)\n\n def __iter__(self) -> Iterator[Attribute]:\n \"\"\"\n Add support for iterating over an `AttributeContainer` (will yield each\n attribute)\n \"\"\"\n return iter(self.get_by_id(uid) for uid in self._attr_ids)\n\n def __getitem__(self, key: Union[int, slice]) -> Union[Attribute, List[Attribute]]:\n \"\"\"\n Add support for subscript access\n \"\"\"\n\n if isinstance(key, slice):\n return [self.get_by_id(uid) for uid in self._attr_ids[key]]\n else:\n return self.get_by_id(self._attr_ids[key])\n\n def get(self, *, label: Optional[str] = None) -> List[Attribute]:\n \"\"\"\n Return a list of the attributes of the annotation, optionally filtering\n by label.\n\n Parameters\n ----------\n label:\n Label to use to filter attributes.\n \"\"\"\n if label is None:\n return list(iter(self))\n else:\n return [self.get_by_id(uid) for uid in self._attr_ids_by_label.get(label, [])]\n\n def add(self, attr: Attribute):\n \"\"\"\n Attach an attribute to the annotation.\n\n Parameters\n ----------\n attr:\n Attribute to add.\n\n Raises\n ------\n ValueError\n If the attribute is already attached to the annotation (based on\n `attr.uid`).\n \"\"\"\n\n uid = attr.uid\n if uid in self._attr_ids:\n raise ValueError(f\"Attribute with uid {uid} already attached to annotation\")\n\n self._attr_ids.append(uid)\n self._store.store_data_item(data_item=attr, parent_id=self._owner_id)\n\n # update label index\n label = attr.label\n if label not in self._attr_ids_by_label:\n self._attr_ids_by_label[label] = []\n self._attr_ids_by_label[label].append(uid)\n\n def get_by_id(self, uid: str) -> Attribute:\n \"\"\"Return the attribute corresponding to a specific identifier.\n\n Parameters\n ----------\n uid:\n Identifier of the attribute to return.\n \"\"\"\n\n attr = self._store.get_data_item(uid)\n if attr is None:\n raise ValueError(f\"No known attribute with uid '{uid}'\")\n return typing.cast(Attribute, attr)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, self.__class__):\n return False\n return self.get() == other.get()\n\n def __repr__(self) -> str:\n attrs = self.get()\n return f\"{self.__class__.__name__}(ann_id={self._owner_id!r}, attrs={attrs!r})\"" }, { "identifier": "generate_deterministic_id", "path": "medkit/core/id.py", "snippet": "def generate_deterministic_id(reference_id: str) -> uuid.UUID:\n \"\"\"Generate a deterministic UUID based on reference_id.\n The generated UUID will be the same if the reference_id is the same.\n\n Parameters\n ----------\n reference_id\n A string representation of an UID\n\n Returns\n -------\n uuid.UUID\n The UUID object\n \"\"\"\n rng = random.Random(reference_id)\n uid = uuid.UUID(int=rng.getrandbits(128))\n return uid" }, { "identifier": "generate_id", "path": "medkit/core/id.py", "snippet": "def generate_id() -> str:\n return str(uuid.uuid1())" }, { "identifier": "span_utils", "path": "medkit/core/text/span_utils.py", "snippet": "def _spans_have_same_length_as_text(text, spans):\ndef _lists_have_same_dimension(list_1, list_2):\ndef _list_is_sorted(list_1):\ndef _ranges_are_within_text(text, ranges):\ndef _positions_are_within_text(text, positions):\ndef replace(\n text: str,\n spans: List[AnySpan],\n ranges: List[Tuple[int, int]],\n replacement_texts: List[str],\n) -> Tuple[str, List[AnySpan]]:\ndef _replace_in_spans(spans, ranges, replacement_lengths):\ndef remove(\n text: str,\n spans: List[AnySpan],\n ranges: List[Tuple[int, int]],\n) -> Tuple[str, List[AnySpan]]:\ndef _remove_in_spans(spans, ranges):\ndef extract(\n text: str,\n spans: List[AnySpan],\n ranges: List[Tuple[int, int]],\n) -> Tuple[str, List[AnySpan]]:\ndef _extract_in_spans(spans, ranges):\ndef insert(\n text: str,\n spans: List[AnySpan],\n positions: List[int],\n insertion_texts: List[str],\n) -> Tuple[str, List[AnySpan]]:\ndef _insert_in_spans(spans, positions, insertion_lengths):\ndef move(\n text: str,\n spans: List[AnySpan],\n range: Tuple[int, int],\n destination: int,\n) -> Tuple[str, List[AnySpan]]:\ndef _move_in_spans(spans, range, destination):\ndef concatenate(texts: List[str], all_spans: List[List[AnySpan]]) -> Tuple[str, List[AnySpan]]:\ndef normalize_spans(spans: List[AnySpan]) -> List[Span]:\ndef clean_up_gaps_in_normalized_spans(spans: List[Span], text: str, max_gap_length: int = 3):" }, { "identifier": "Segment", "path": "medkit/core/text/annotation.py", "snippet": "class Segment(TextAnnotation):\n \"\"\"\n Text segment referencing part of an :class:`~medkit.core.text.TextDocument`.\n\n Attributes\n ----------\n uid:\n The segment identifier.\n label:\n The label for this segment (e.g., SENTENCE)\n text:\n Text of the segment.\n spans:\n List of spans indicating which parts of the segment text correspond to\n which part of the document's full text.\n attrs:\n Attributes of the segment. Stored in a\n :class:{~medkit.core.AttributeContainer} but can be passed as a list at\n init.\n metadata:\n The metadata of the segment\n keys:\n Pipeline output keys to which the segment belongs to.\n \"\"\"\n\n spans: List[AnySpan]\n text: str\n\n def __init__(\n self,\n label: str,\n text: str,\n spans: List[AnySpan],\n attrs: Optional[List[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n store: Optional[Store] = None,\n attr_container_class: Type[AttributeContainer] = AttributeContainer,\n ):\n super().__init__(\n label=label,\n attrs=attrs,\n metadata=metadata,\n uid=uid,\n attr_container_class=attr_container_class,\n )\n\n self.text = text\n self.spans = spans\n\n # check if spans length is equal to text length\n length = sum(s.length for s in self.spans)\n assert len(self.text) == length, \"Spans length does not match text length\"\n\n def to_dict(self) -> Dict[str, Any]:\n spans = [s.to_dict() for s in self.spans]\n attrs = [a.to_dict() for a in self.attrs]\n segment_dict = dict(\n uid=self.uid,\n label=self.label,\n text=self.text,\n spans=spans,\n attrs=attrs,\n metadata=self.metadata,\n )\n dict_conv.add_class_name_to_data_dict(self, segment_dict)\n return segment_dict\n\n @classmethod\n def from_dict(cls, segment_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a Segment from a dict\n\n Parameters\n ----------\n segment_dict: dict\n A dictionary from a serialized segment as generated by to_dict()\n \"\"\"\n\n spans = [AnySpan.from_dict(s) for s in segment_dict[\"spans\"]]\n attrs = [Attribute.from_dict(a) for a in segment_dict[\"attrs\"]]\n return cls(\n uid=segment_dict[\"uid\"],\n label=segment_dict[\"label\"],\n text=segment_dict[\"text\"],\n spans=spans,\n attrs=attrs,\n metadata=segment_dict[\"metadata\"],\n )" }, { "identifier": "TextAnnotation", "path": "medkit/core/text/annotation.py", "snippet": "class TextAnnotation(abc.ABC, dict_conv.SubclassMapping):\n \"\"\"Base abstract class for all text annotations\n\n Attributes\n ----------\n uid:\n Unique identifier of the annotation.\n label:\n The label for this annotation (e.g., SENTENCE)\n attrs:\n Attributes of the annotation. Stored in a\n :class:{~medkit.core.AttributeContainer} but can be passed as a list at\n init.\n metadata:\n The metadata of the annotation\n keys:\n Pipeline output keys to which the annotation belongs to.\n \"\"\"\n\n uid: str\n label: str\n attrs: AttributeContainer\n metadata: Dict[str, Any]\n keys: Set[str]\n\n @abc.abstractmethod\n def __init__(\n self,\n label: str,\n attrs: Optional[List[Attribute]] = None,\n metadata: Optional[Dict[str, Any]] = None,\n uid: Optional[str] = None,\n attr_container_class: Type[AttributeContainer] = AttributeContainer,\n ):\n if attrs is None:\n attrs = []\n if metadata is None:\n metadata = {}\n if uid is None:\n uid = generate_id()\n\n self.uid = uid\n self.label = label\n self.metadata = metadata\n self.keys = set()\n\n self.attrs = attr_container_class(owner_id=self.uid)\n for attr in attrs:\n self.attrs.add(attr)\n\n def __init_subclass__(cls):\n TextAnnotation.register_subclass(cls)\n super().__init_subclass__()\n\n @classmethod\n def from_dict(cls, ann_dict: Dict[str, Any]) -> Self:\n subclass = cls.get_subclass_for_data_dict(ann_dict)\n if subclass is None:\n raise NotImplementedError(\n \"TextAnnotation is an abstract class. Its class method `from_dict` is\"\n \" only used for calling the correct subclass `from_dict`. Subclass is\"\n f\" {subclass}\"\n )\n return subclass.from_dict(ann_dict)\n\n def to_dict(self) -> Dict[str, Any]:\n raise NotImplementedError()" }, { "identifier": "TextAnnotationContainer", "path": "medkit/core/text/annotation_container.py", "snippet": "class TextAnnotationContainer(AnnotationContainer[TextAnnotation]):\n \"\"\"\n Manage a list of text annotations belonging to a text document.\n\n This behaves more or less like a list: calling `len()` and iterating are\n supported. Additional filtering is available through the `get()` method.\n\n Also provides retrieval of entities, segments, relations, and handling of\n raw segment.\n \"\"\"\n\n def __init__(self, doc_id: str, raw_segment: Segment):\n super().__init__(doc_id=doc_id)\n\n # auto-generated raw segment\n # not stored with other annotations but injected in calls to get()\n # and get_by_id()\n self.raw_segment = raw_segment\n\n self._segment_ids: List[str] = []\n self._entity_ids: List[str] = []\n self._relation_ids: List[str] = []\n self._relation_ids_by_source_id: Dict[str, List[str]] = {}\n\n @property\n def segments(self) -> List[Segment]:\n \"\"\"Return the list of segments\"\"\"\n return self.get_segments()\n\n @property\n def entities(self) -> List[Entity]:\n \"\"\"Return the list of entities\"\"\"\n return self.get_entities()\n\n @property\n def relations(self) -> List[Relation]:\n \"\"\"Return the list of relations\"\"\"\n return self.get_relations()\n\n def add(self, ann: TextAnnotation):\n if ann.label == self.raw_segment.label:\n raise RuntimeError(f\"Cannot add annotation with reserved label {self.raw_segment.label}\")\n\n super().add(ann)\n\n # update entity/segments/relations index\n if isinstance(ann, Entity):\n self._entity_ids.append(ann.uid)\n elif isinstance(ann, Segment):\n self._segment_ids.append(ann.uid)\n elif isinstance(ann, Relation):\n self._relation_ids.append(ann.uid)\n if ann.source_id not in self._relation_ids_by_source_id:\n self._relation_ids_by_source_id[ann.source_id] = []\n self._relation_ids_by_source_id[ann.source_id].append(ann.uid)\n\n def get(self, *, label: Optional[str] = None, key: Optional[str] = None) -> List[TextAnnotation]:\n # inject raw segment\n if label == self.raw_segment.label and key is None:\n return [self.raw_segment]\n return super().get(label=label, key=key)\n\n def get_by_id(self, uid) -> TextAnnotation:\n # inject raw segment\n if uid == self.raw_segment.uid:\n return self.raw_segment\n return super().get_by_id(uid)\n\n def get_segments(self, *, label: Optional[str] = None, key: Optional[str] = None) -> List[Segment]:\n \"\"\"\n Return a list of the segments of the document (not including entities),\n optionally filtering by label or key.\n\n Parameters\n ----------\n label:\n Label to use to filter segments.\n key:\n Key to use to filter segments.\n \"\"\"\n\n # get ids filtered by label/key\n uids = self.get_ids(label=label, key=key)\n # keep only segment ids\n uids = (uid for uid in uids if uid in self._segment_ids)\n\n segments = [self.get_by_id(uid) for uid in uids]\n return typing.cast(List[Segment], segments)\n\n def get_entities(self, *, label: Optional[str] = None, key: Optional[str] = None) -> List[Entity]:\n \"\"\"\n Return a list of the entities of the document, optionally filtering\n by label or key.\n\n Parameters\n ----------\n label:\n Label to use to filter entities.\n key:\n Key to use to filter entities.\n \"\"\"\n\n # get ids filtered by label/key\n uids = self.get_ids(label=label, key=key)\n # keep only entity ids\n uids = (uid for uid in uids if uid in self._entity_ids)\n\n entities = [self.get_by_id(uid) for uid in uids]\n return typing.cast(List[Entity], entities)\n\n def get_relations(\n self,\n *,\n label: Optional[str] = None,\n key: Optional[str] = None,\n source_id: Optional[str] = None,\n ) -> List[Relation]:\n \"\"\"\n Return a list of the relations of the document, optionally filtering\n by label, key or source entity.\n\n Parameters\n ----------\n label:\n Label to use to filter relations.\n key:\n Key to use to filter relations.\n source_id:\n Identifier of the source entity to use to filter relations.\n \"\"\"\n\n # get ids filtered by label/key\n uids = self.get_ids(label=label, key=key)\n # keep only relation ids\n # (either all relations or relations with specific source)\n if source_id is None:\n uids = (uid for uid in uids if uid in self._relation_ids)\n else:\n relation_ids = self._relation_ids_by_source_id.get(source_id, [])\n uids = (uid for uid in uids if uid in relation_ids)\n\n entities = [self.get_by_id(uid) for uid in uids]\n return typing.cast(List[Relation], entities)" }, { "identifier": "Span", "path": "medkit/core/text/span.py", "snippet": "class Span(AnySpan):\n \"\"\"\n Slice of text extracted from the original text\n\n Parameters\n ----------\n start: int\n Index of the first character in the original text\n end: int\n Index of the last character in the original text, plus one\n \"\"\"\n\n start: int\n end: int\n\n @property\n def length(self):\n return self.end - self.start\n\n def to_dict(self) -> Dict[str, Any]:\n span_dict = dict(start=self.start, end=self.end)\n dict_conv.add_class_name_to_data_dict(self, span_dict)\n return span_dict\n\n def overlaps(self, other: Span):\n \"\"\"Test if 2 spans reference at least one character in common\"\"\"\n return (self.start < other.end) and (self.end > other.start)\n\n @classmethod\n def from_dict(cls, span_dict: Dict[str, Any]) -> Self:\n \"\"\"\n Creates a Span from a dict\n\n Parameters\n ----------\n span_dict: dict\n A dictionary from a serialized span as generated by to_dict()\n \"\"\"\n return cls(start=span_dict[\"start\"], end=span_dict[\"end\"])" } ]
import dataclasses import os from pathlib import Path from typing import Any, ClassVar, Dict, List, Optional, Sequence from typing_extensions import Self from medkit.core import Attribute, AttributeContainer, dict_conv from medkit.core.id import generate_deterministic_id, generate_id from medkit.core.text import span_utils from medkit.core.text.annotation import Segment, TextAnnotation from medkit.core.text.annotation_container import TextAnnotationContainer from medkit.core.text.span import Span
5,534
from __future__ import annotations __all__ = ["TextDocument"] @dataclasses.dataclass(init=False) class TextDocument(dict_conv.SubclassMapping): """ Document holding text annotations Annotations must be subclasses of `TextAnnotation`. Attributes ---------- uid: Unique identifier of the document. text: Full document text. anns: Annotations of the document. Stored in an :class:`~.text.TextAnnotationContainer` but can be passed as a list at init. attrs: Attributes of the document. Stored in an :class:`~.core.AttributeContainer` but can be passed as a list at init metadata: Document metadata. raw_segment: Auto-generated segment containing the full unprocessed document text. To get the raw text as an annotation to pass to processing operations: >>> doc = TextDocument(text="hello") >>> raw_text = doc.anns.get(label=TextDocument.RAW_LABEL)[0] """ RAW_LABEL: ClassVar[str] = "RAW_TEXT" uid: str anns: TextAnnotationContainer attrs: AttributeContainer metadata: Dict[str, Any] raw_segment: Segment def __init__( self, text: str,
from __future__ import annotations __all__ = ["TextDocument"] @dataclasses.dataclass(init=False) class TextDocument(dict_conv.SubclassMapping): """ Document holding text annotations Annotations must be subclasses of `TextAnnotation`. Attributes ---------- uid: Unique identifier of the document. text: Full document text. anns: Annotations of the document. Stored in an :class:`~.text.TextAnnotationContainer` but can be passed as a list at init. attrs: Attributes of the document. Stored in an :class:`~.core.AttributeContainer` but can be passed as a list at init metadata: Document metadata. raw_segment: Auto-generated segment containing the full unprocessed document text. To get the raw text as an annotation to pass to processing operations: >>> doc = TextDocument(text="hello") >>> raw_text = doc.anns.get(label=TextDocument.RAW_LABEL)[0] """ RAW_LABEL: ClassVar[str] = "RAW_TEXT" uid: str anns: TextAnnotationContainer attrs: AttributeContainer metadata: Dict[str, Any] raw_segment: Segment def __init__( self, text: str,
anns: Optional[Sequence[TextAnnotation]] = None,
7
2023-11-13 16:28:56+00:00
8k
eidolon-ai/eidOS
sdk/eidos_sdk/system/agent_machine.py
[ { "identifier": "AgentController", "path": "sdk/eidos_sdk/system/agent_controller.py", "snippet": "class AgentController:\n name: str\n agent: object\n programs: typing.Dict[str, EidosHandler]\n actions: typing.Dict[str, EidosHandler]\n\n def __init__(self, name, agent):\n self.name = name\n self.programs = {}\n self.actions = {}\n self.agent = agent\n for handler in get_handlers(self.agent):\n if handler.extra[\"type\"] == \"program\":\n self.programs[handler.name] = handler\n else:\n self.actions[handler.name] = handler\n\n async def start(self, app: FastAPI):\n logger.info(f\"Starting agent '{self.name}'\")\n for handler in [*self.programs.values(), *self.actions.values().__reversed__()]:\n path = f\"/agents/{self.name}\"\n handler_name = handler.name\n if handler.extra[\"type\"] == \"program\":\n path += f\"/programs/{handler_name}\"\n else:\n path += f\"/processes/{{process_id}}/actions/{handler_name}\"\n endpoint = self.process_action(handler)\n app.add_api_route(\n path,\n endpoint=endpoint,\n methods=[\"POST\"],\n tags=[self.name],\n responses={\n 202: {\"model\": AsyncStateResponse},\n 200: {\"model\": self.create_response_model(handler)},\n },\n description=handler.description(self.agent, handler),\n )\n\n app.add_api_route(\n f\"/agents/{self.name}/processes\",\n endpoint=self.list_processes,\n methods=[\"GET\"],\n response_model=ListProcessesResponse,\n tags=[self.name],\n )\n\n app.add_api_route(\n f\"/agents/{self.name}/processes/{{process_id}}/status\",\n endpoint=self.get_process_info,\n methods=[\"GET\"],\n response_model=SyncStateResponse,\n tags=[self.name],\n )\n\n # todo, unregister routes\n def stop(self, app: FastAPI):\n pass\n\n async def restart(self, app: FastAPI):\n self.stop(app)\n await self.start(app)\n\n def process_action(self, handler: EidosHandler):\n async def run_program(\n request: Request,\n background_tasks: BackgroundTasks,\n process_id: typing.Optional[str] = None,\n **kwargs,\n ):\n callback = request.headers.get(\"callback-url\")\n execution_mode = request.headers.get(\"execution-mode\", \"async\" if callback else \"sync\").lower()\n\n if not process_id:\n if not handler.extra[\"type\"] == \"program\":\n raise HTTPException(\n status_code=400,\n detail=f'Action \"{handler.name}\" is not an initializer, but no process_id was provided',\n )\n process = await ProcessDoc.create(agent=self.name, state=\"processing\", data=dict(action=handler.name))\n process_id = process.record_id\n else:\n process = await self.get_latest_process_event(process_id)\n if not process:\n raise HTTPException(status_code=404, detail=\"Process not found\")\n if process.state not in handler.extra[\"allowed_states\"]:\n logger.warn(f\"Action {handler.name} cannot process state {process.state}. Allowed states: {handler.extra['allowed_states']}\")\n raise HTTPException(\n status_code=409,\n detail=f'Action \"{handler.name}\" cannot process state \"{process.state}\"',\n )\n process = await process.update(\n agent=self.name, record_id=process_id, state=\"processing\", data=dict(action=handler.name)\n )\n RequestContext.set(\"process_id\", process_id)\n\n async def run_and_store_response():\n try:\n sig = inspect.signature(handler.fn)\n if \"process_id\" in dict(sig.parameters):\n kwargs[\"process_id\"] = process_id\n response = await handler.fn(self.agent, **kwargs)\n if isinstance(response, AgentState):\n state = response.name\n data = to_jsonable_python(response.data)\n else:\n state = \"terminated\"\n data = to_jsonable_python(response)\n doc = await process.update(\n state=state,\n data=data,\n )\n except HTTPException as e:\n doc = await process.update(\n state=\"http_error\",\n data=dict(detail=e.detail, status_code=e.status_code),\n )\n if e.status_code >= 500:\n logging.exception(\"Unhandled error raised by handler\")\n else:\n logging.debug(f\"Handler {handler.name} raised a http error\", exc_info=True)\n except Exception as e:\n doc = await process.update(\n state=\"unhandled_error\",\n data=dict(error=str(e)),\n )\n logging.exception(\"Unhandled error raised by handler\")\n if callback:\n raise Exception(\"Not implemented\")\n return doc\n\n if execution_mode == \"sync\":\n state = await run_and_store_response()\n return self.doc_to_response(state)\n else:\n background_tasks.add_task(run_and_store_response)\n return JSONResponse(AsyncStateResponse(process_id=process_id).model_dump(), 202)\n\n logger.debug(f\"Registering action {handler.name} for program {self.name}\")\n sig = inspect.signature(run_program)\n params = dict(sig.parameters)\n model: typing.Type[BaseModel] = handler.input_model_fn(self.agent, handler)\n for field in model.model_fields:\n kwargs = dict(annotation=model.model_fields[field].annotation)\n if isinstance(model.model_fields[field], Body) or isinstance(model.model_fields[field], Param):\n kwargs[\"annotation\"] = typing.Annotated[model.model_fields[field].annotation, model.model_fields[field]]\n if model.model_fields[field].default is not PydanticUndefined:\n kwargs[\"default\"] = model.model_fields[field].default\n\n params[field] = Parameter(field, Parameter.KEYWORD_ONLY, **kwargs)\n if handler.extra[\"type\"] == \"program\":\n del params[\"process_id\"]\n else:\n replace: Parameter = params[\"process_id\"].replace(annotation=str)\n params[\"process_id\"] = replace\n del params[\"kwargs\"]\n run_program.__signature__ = sig.replace(parameters=params.values())\n return run_program\n\n async def get_process_info(self, process_id: str):\n latest_record = await self.get_latest_process_event(process_id)\n return self.doc_to_response(latest_record)\n\n async def list_processes(\n self,\n request: Request,\n limit: int = 20,\n skip: int = 0,\n sort: typing.Literal[\"ascending\", \"descending\"] = \"ascending\",\n ):\n query = dict(agent=self.name)\n count = await AgentOS.symbolic_memory.count(ProcessDoc.collection, query)\n cursor = AgentOS.symbolic_memory.find(\n ProcessDoc.collection, query, sort=dict(updated=1 if sort == \"ascending\" else -1), skip=skip\n )\n acc = []\n async for doc in cursor:\n process = ProcessDoc.model_validate(doc)\n acc.append(\n StateSummary(\n process_id=process.record_id,\n state=process.state,\n available_actions=self.get_available_actions(process.state),\n )\n )\n if len(acc) == limit:\n break\n if len(acc) + skip <= count:\n next_page_url = f\"{request.url}agents/{self.name}/processes/?limit={limit}&skip={skip + limit}\"\n else:\n next_page_url = None\n return JSONResponse(\n ListProcessesResponse(\n total=count,\n processes=acc,\n next=next_page_url,\n ).model_dump(),\n 200,\n )\n\n def doc_to_response(self, latest_record: ProcessDoc):\n if not latest_record:\n return JSONResponse(dict(detail=\"Process not found\"), 404)\n elif latest_record.state == \"unhandled_error\":\n return JSONResponse(latest_record.data, 500)\n elif latest_record.state == \"http_error\":\n return JSONResponse(\n dict(detail=latest_record.data[\"detail\"]),\n latest_record.data[\"status_code\"],\n )\n else:\n return JSONResponse(\n SyncStateResponse(\n process_id=latest_record.record_id,\n state=latest_record.state,\n data=latest_record.data,\n available_actions=self.get_available_actions(latest_record.state),\n ).model_dump(),\n 200,\n )\n\n def get_available_actions(self, state):\n return [action for action, handler in self.actions.items() if state in handler.extra[\"allowed_states\"]]\n\n @staticmethod\n async def get_latest_process_event(process_id) -> ProcessDoc:\n return await ProcessDoc.find(query=dict(_id=process_id), sort=dict(updated=-1))\n\n def create_response_model(self, handler: EidosHandler):\n # if we want, we can calculate the literal state and allowed actions statically for most actions. Not for now though.\n fields = {key: (fieldinfo.annotation, fieldinfo) for key, fieldinfo in SyncStateResponse.model_fields.items()}\n return_type = handler.output_model_fn(self.agent, handler)\n if inspect.isclass(return_type) and issubclass(return_type, AgentState):\n return_type = return_type.model_fields[\"data\"].annotation\n fields[\"data\"] = (\n return_type,\n Field(..., description=fields[\"data\"][1].description),\n )\n return create_model(f\"{handler.name.capitalize()}ResponseModel\", **fields)" }, { "identifier": "AnnotatedReference", "path": "sdk/eidos_sdk/system/reference_model.py", "snippet": "class AnnotatedReference(Reference):\n \"\"\"\n Helper class to manage References with defaults.\n\n Default is set to the class name, which should be as a builtin pointing to the FQN of the class\n\n Example:\n class MySpec(BaseModel):\n ref1: AnnotatedReference[MyBound] = Field(description=\"My description\")\n\n Note:\n The description can still be added via a Field annotation without affecting default behavior\n \"\"\"\n\n def __class_getitem__(cls, params) -> Type[Reference]:\n if not isinstance(params, tuple):\n params = (params, params.__name__)\n return Annotated[Reference[params], Field(default_factory=Reference[params])]" }, { "identifier": "Specable", "path": "sdk/eidos_sdk/system/reference_model.py", "snippet": "class Specable(Generic[T]):\n \"\"\"\n A generic type which can be used to describe a specable type. Specable types are expected to accept \"spec\" in kwarg.\n If Specable is not used, There will be no spec validation and the spec will be passed through as-is.\n \"\"\"\n\n spec: T\n\n def __init__(self, spec: T, **kwargs: object):\n self.spec = spec" }, { "identifier": "AgentResource", "path": "sdk/eidos_sdk/system/resources/agent_resource.py", "snippet": "class AgentResource(Resource):\n kind: Literal[\"Agent\"] = \"Agent\"\n spec: Reference" }, { "identifier": "Resource", "path": "sdk/eidos_sdk/system/resources/resources_base.py", "snippet": "class Resource(BaseModel, extra=\"allow\"):\n apiVersion: Literal[\"eidolon/v1\"]\n kind: str\n metadata: Metadata = Metadata()\n\n @classmethod\n def kind_literal(cls) -> Optional[str]:\n return getattr(cls.model_fields[\"kind\"].annotation, \"__args__\", [None])[0]\n\n def promote(self, clazz: Type[T]) -> T:\n return clazz.model_validate(self.model_dump())" }, { "identifier": "AgentOS", "path": "sdk/eidos_sdk/agent_os.py", "snippet": "class AgentOS:\n _resources: Dict[str, Dict[str, Tuple[\"Resource\", str]]] = ... # noqa: F821\n file_memory: \"FileMemory\" = ... # noqa: F821\n symbolic_memory: \"SymbolicMemory\" = ... # noqa: F821\n similarity_memory: \"SimilarityMemory\" = ... # noqa: F821\n security_manager: \"SecurityManager\" = ... # noqa: F821\n\n @classmethod\n def _get_or_load_resources(cls) -> Dict[str, Dict[str, Tuple[Resource, str]]]:\n if cls._resources is ...:\n from eidos_sdk.builtins.code_builtins import named_builtins\n\n cls._resources = {}\n for resource in named_builtins():\n cls.register_resource(resource, source=\"builtin\")\n for resource in load_resources(pathlib.Path(__file__).parent / \"builtins\" / \"resources\"):\n cls.register_resource(resource, source=\"builtin\")\n\n return cls._resources\n\n @classmethod\n def load_machine(cls, machine):\n cls.file_memory = machine.memory.file_memory\n cls.symbolic_memory = machine.memory.symbolic_memory\n cls.similarity_memory = machine.memory.similarity_memory\n cls.security_manager = machine.security_manager\n\n @classmethod\n def register_resource(cls, resource: Resource, source=None): # noqa: F821\n resources = cls._get_or_load_resources()\n if resource.kind not in resources:\n resources[resource.kind] = {}\n bucket = resources[resource.kind]\n if resource.metadata.name in bucket:\n if bucket[resource.metadata.name][1] == \"builtin\":\n logger.info(f\"Overriding builtin resource '{resource.kind}.{resource.metadata.name}'\")\n else:\n raise ValueError(\n f\"Resource {resource.metadata.name} already registered by {bucket[resource.metadata.name][1]}\"\n )\n logger.debug(f\"Registering resource {resource.kind}.{resource.metadata.name}\")\n bucket[resource.metadata.name] = (resource, source)\n\n @classmethod\n def get_resources(cls, kind: Type[T]) -> Dict[str, T]: # noqa: F821\n return {k: tu[0].promote(kind) for k, tu in cls._get_or_load_resources().get(kind.kind_literal(), {}).items()}\n\n @classmethod\n def get_resource(cls, kind: Type[T], name: str, default=...) -> T:\n bucket = kind.kind_literal()\n try:\n return cls._get_or_load_resources()[bucket][name][0].promote(kind)\n except KeyError:\n if default is not ...:\n return default\n raise ValueError(f\"Resource {name} not found in bucket {bucket}\")\n\n @classmethod\n def get_resource_source(cls, bucket, name: str) -> str:\n try:\n return cls._get_or_load_resources()[bucket][name][1]\n except KeyError:\n raise ValueError(f\"Resource {name} not found in bucket {bucket}\")\n\n @classmethod\n def reset(cls):\n cls._resources = ...\n cls.file_memory = ...\n cls.symbolic_memory = ...\n cls.similarity_memory = ...\n cls.embedder = ..." }, { "identifier": "FileMemory", "path": "sdk/eidos_sdk/memory/file_memory.py", "snippet": "class FileMemory(ABC):\n \"\"\"\n Abstract base class representing the file memory interface for an agent.\n\n This class defines the essential file operations that an agent's memory component\n must support. It includes starting and stopping the file memory processes,\n reading from a file, and writing to a file within the agent's operational context.\n\n All methods in this class are abstract and must be implemented by a subclass\n that provides the specific logic for handling file operations related to the\n agent's memory.\n \"\"\"\n\n @abstractmethod\n def start(self):\n \"\"\"\n Starts the memory implementation.\n \"\"\"\n pass\n\n @abstractmethod\n def stop(self):\n \"\"\"\n Stops the memory implementation.\n \"\"\"\n pass\n\n @abstractmethod\n def read_file(self, file_path: str) -> bytes:\n \"\"\"\n Reads the contents of a file specified by `file_path` within the context\n of an agent call. The context of the call provides additional information\n that may influence how the file is read.\n :param file_path: The path to the file to be read.\n :return: bytes: The contents of the file as a bytes object.\n \"\"\"\n pass\n\n @abstractmethod\n def write_file(self, file_path: str, file_contents: bytes) -> None:\n \"\"\"\n Writes the given `file_contents` to the file specified by `file_path`\n within the context of an agent call. This method ensures that the file is\n written in the appropriate location and manner as dictated by the call context.\n\n :param file_path: The path to the file where the contents should be written.\n :param file_contents: The contents to write to the file.\n \"\"\"\n pass\n\n @abstractmethod\n def delete_file(self, file_path: str) -> None:\n \"\"\"\n Deletes the file specified by `file_path` within the context of an agent call.\n This method ensures that the file is deleted in the appropriate location and\n manner as dictated by the call context.\n\n :param file_path: The path to the file to be deleted.\n \"\"\"\n pass\n\n @abstractmethod\n def mkdir(self, directory: str, exist_ok: bool = False):\n pass\n\n @abstractmethod\n def exists(self, file_name: str):\n pass" }, { "identifier": "SymbolicMemory", "path": "sdk/eidos_sdk/memory/semantic_memory.py", "snippet": "class SymbolicMemory(ABC):\n \"\"\"\n Abstract base class for a symbolic memory component within an agent.\n\n This class defines the contract for symbolic memory operations such as starting\n and stopping the memory service, and CRUD (Create, Read, Update, Delete) operations\n on symbolic data. Implementations of this class are expected to manage collections\n of symbols, providing a high-level interface to store and retrieve symbolic information.\n \"\"\"\n\n @abstractmethod\n def start(self):\n \"\"\"\n Prepares the symbolic memory for operation, which may include tasks like\n allocating resources or initializing connections to databases.\n \"\"\"\n pass\n\n @abstractmethod\n def stop(self):\n \"\"\"\n Properly shuts down the symbolic memory, ensuring that any resources are released\n or any established connections are terminated.\n \"\"\"\n pass\n\n @abstractmethod\n async def count(self, symbol_collection: str, query: dict[str, Any]) -> int:\n \"\"\"\n Searches for symbols within a specified collection that match the given query and returns the number matching.\n\n Args:\n symbol_collection (str): The name of the collection to search within.\n query (dict[str, Any]): The search criteria used to filter symbols.\n\n Returns:\n int: The number of symbols that match the query.\n \"\"\"\n pass\n\n @abstractmethod\n def find(\n self,\n symbol_collection: str,\n query: dict[str, Any],\n projection: Union[List[str], Dict[str, int]] = None,\n sort: dict = None,\n skip: int = None,\n ) -> AsyncIterable[dict[str, Any]]:\n \"\"\"\n Searches for symbols within a specified collection that match the given query.\n\n Args:\n symbol_collection (str): The name of the collection to search within.\n query (dict[str, Any]): The search criteria used to filter symbols.\n projection (Union[List[str], Dict[str, int]]): The fields to include or exclude from the results. If a list,\n the fields will be included. If a dictionary, the fields will be included or excluded based on the\n value of the dictionary. A value of 1 will include the field, and a value of 0 will exclude it.\n sort (dict): The fields to sort the results by. The key is the field to sort by, and the value is the direction\n to sort by. A value of 1 will sort in ascending order, and a value of -1 will sort in descending order.\n skip (int): The number of results to skip.\n\n Returns:\n Iterable[dict[str, Any]]: A list of symbols that match the query, each represented as a dictionary.\n \"\"\"\n pass\n\n @abstractmethod\n async def find_one(\n self, symbol_collection: str, query: dict[str, Any], sort: dict[str, int] = None\n ) -> Optional[dict[str, Any]]:\n \"\"\"\n Searches for a single symbol within a specified collection that matches the given query.\n\n Args:\n symbol_collection (str): The name of the collection to search within.\n query (dict[str, Any]): The search criteria used to filter symbols.\n sort (dict[str, int]): The fields to sort the results by. The key is the field to sort by, and the value is the direction\n\n Returns:\n Optional[dict[str, Any]]: A single symbol that matches the query, represented as a dictionary,\n or None if no match is found.\n \"\"\"\n pass\n\n @abstractmethod\n async def insert(self, symbol_collection: str, documents: list[dict[str, Any]]) -> None:\n \"\"\"\n Inserts multiple symbols into the specified collection.\n\n Args:\n symbol_collection (str): The name of the collection where symbols will be inserted.\n documents (list[dict[str, Any]]): A list of symbols to insert, each represented as a dictionary.\n\n Returns:\n None\n \"\"\"\n pass\n\n @abstractmethod\n async def insert_one(self, symbol_collection: str, document: dict[str, Any]) -> None:\n \"\"\"\n Inserts a single symbol into the specified collection.\n\n Args:\n symbol_collection (str): The name of the collection where the symbol will be inserted.\n document (dict[str, Any]): The symbol to insert, represented as a dictionary.\n\n Returns:\n None\n \"\"\"\n pass\n\n @abstractmethod\n async def upsert_one(self, symbol_collection: str, document: dict[str, Any], query: dict[str, Any]) -> None:\n \"\"\"\n Updates a single symbol in the specified collection based on the query, or inserts it if it does not exist.\n\n Args:\n symbol_collection (str): The name of the collection where the symbol will be upserted.\n document (dict[str, Any]): The symbol to upsert, represented as a dictionary.\n query (dict[str, Any]): The search criteria used to find the symbol to update.\n\n Returns:\n None\n \"\"\"\n pass\n\n @abstractmethod\n async def update_many(self, symbol_collection: str, query: dict[str, Any], document: dict[str, Any]) -> None:\n pass\n\n @abstractmethod\n async def delete(self, symbol_collection, query):\n pass" }, { "identifier": "SimilarityMemory", "path": "sdk/eidos_sdk/memory/similarity_memory.py", "snippet": "class SimilarityMemory(Specable[SimilarityMemorySpec]):\n embedder: Embedding\n vector_store: VectorStore\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.embedder = self.spec.embedder.instantiate()\n self.vector_store = self.spec.vector_store.instantiate()\n\n def start(self):\n self.embedder.start()\n self.vector_store.start()\n\n def stop(self):\n self.embedder.stop()\n self.vector_store.stop()" }, { "identifier": "SecurityManager", "path": "sdk/eidos_sdk/security/security_manager.py", "snippet": "class SecurityManager(Specable[SecurityManagerSpec]):\n authorization_processor: BaseHTTPMiddleware\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.authorization_processor = self.spec.authorization_processor.instantiate()" } ]
from contextlib import contextmanager from fastapi import FastAPI from pydantic import BaseModel, Field from typing import List, Optional from eidos_sdk.memory.agent_memory import AgentMemory from .agent_controller import AgentController from .reference_model import AnnotatedReference, Specable from .resources.agent_resource import AgentResource from .resources.resources_base import Resource from ..agent_os import AgentOS from ..memory.file_memory import FileMemory from ..memory.semantic_memory import SymbolicMemory from ..memory.similarity_memory import SimilarityMemory from ..security.security_manager import SecurityManager
5,803
class MachineSpec(BaseModel): symbolic_memory: AnnotatedReference[SymbolicMemory] = Field(description="The Symbolic Memory implementation.") file_memory: AnnotatedReference[FileMemory] = Field(desciption="The File Memory implementation.") similarity_memory: AnnotatedReference[SimilarityMemory] = Field(description="The Vector Memory implementation.") security_manager: AnnotatedReference[SecurityManager] = Field(description="The Security Manager implementation.") def get_agent_memory(self): file_memory = self.file_memory.instantiate() symbolic_memory = self.symbolic_memory.instantiate() vector_memory = self.similarity_memory.instantiate() return AgentMemory( file_memory=file_memory, symbolic_memory=symbolic_memory, similarity_memory=vector_memory, ) class AgentMachine(Specable[MachineSpec]): memory: AgentMemory security_manager: SecurityManager agent_controllers: List[AgentController] app: Optional[FastAPI] def __init__(self, spec: MachineSpec): super().__init__(spec) agents = {}
class MachineSpec(BaseModel): symbolic_memory: AnnotatedReference[SymbolicMemory] = Field(description="The Symbolic Memory implementation.") file_memory: AnnotatedReference[FileMemory] = Field(desciption="The File Memory implementation.") similarity_memory: AnnotatedReference[SimilarityMemory] = Field(description="The Vector Memory implementation.") security_manager: AnnotatedReference[SecurityManager] = Field(description="The Security Manager implementation.") def get_agent_memory(self): file_memory = self.file_memory.instantiate() symbolic_memory = self.symbolic_memory.instantiate() vector_memory = self.similarity_memory.instantiate() return AgentMemory( file_memory=file_memory, symbolic_memory=symbolic_memory, similarity_memory=vector_memory, ) class AgentMachine(Specable[MachineSpec]): memory: AgentMemory security_manager: SecurityManager agent_controllers: List[AgentController] app: Optional[FastAPI] def __init__(self, spec: MachineSpec): super().__init__(spec) agents = {}
for name, r in AgentOS.get_resources(AgentResource).items():
5
2023-11-10 20:42:00+00:00
8k
interpretml/LLM-Tabular-Memorization-Checker
tabmemcheck/functions.py
[ { "identifier": "LLM_Interface", "path": "tabmemcheck/llm.py", "snippet": "class LLM_Interface:\n \"\"\"The interface to the language model.\"\"\"\n\n # if true, the tests use the chat_completion function, otherwise the completion function\n chat_mode = False\n\n def completion(self, prompt, temperature, max_tokens):\n \"\"\"Returns: The response (string)\"\"\"\n\n def chat_completion(self, messages, temperature, max_tokens):\n \"\"\"Returns: The response (string)\"\"\"\n raise NotImplementedError" }, { "identifier": "ChatWrappedLLM", "path": "tabmemcheck/llm.py", "snippet": "class ChatWrappedLLM(LLM_Interface):\n \"\"\"Wrap a base language model (i.e. an LLM_Interface that only implements the completion method) to act as a chat completion model.\n\n The wrapped model take queries via the chat_completion interface. It transforms the messages list into a single textual prompt using the provided prompt_fn.\n \"\"\"\n\n def __init__(self, llm, prompt_fn, ends_with: str = None):\n assert not llm.chat_mode, \"The wrapped model must be a base model.\"\n self.llm = llm\n self.chat_mode = True\n self.wrapper_fn = prompt_fn\n self.ends_with = ends_with\n\n def chat_completion(self, messages, temperature, max_tokens):\n prompt = self.wrapper_fn(messages)\n # print(prompt)\n response = self.llm.completion(prompt, temperature, max_tokens)\n # print(response)\n if (\n self.ends_with is not None\n ): # we frequently use '\\n\\n' as the end of the relevant part of the response\n if self.ends_with in response:\n response = response[: response.find(self.ends_with)]\n return response\n\n def __repr__(self) -> str:\n return self.llm.__repr__()" }, { "identifier": "send_chat_completion", "path": "tabmemcheck/llm.py", "snippet": "def send_chat_completion(llm: LLM_Interface, messages, max_tokens=None, logfile=None):\n \"\"\"Send chat completion with retrying and logging.\n\n Returns: The response (string))\"\"\"\n config = tabmem.config\n if max_tokens is None:\n max_tokens = config.max_tokens\n response = llm.chat_completion(messages, config.temperature, max_tokens)\n if config.sleep > 0.0:\n time.sleep(config.sleep)\n # logging\n log(messages, response, logfile)\n # printing\n if config.print_prompts or config.print_next_prompt:\n pretty_print_messages(messages)\n if config.print_prompts or config.print_responses or config.print_next_prompt:\n pretty_print_response(response)\n # reset print_next_prompt\n config.print_next_prompt = False\n # return string response\n return response" }, { "identifier": "send_completion", "path": "tabmemcheck/llm.py", "snippet": "def send_completion(llm: LLM_Interface, prompt, max_tokens=None, logfile=None):\n config = tabmem.config\n if max_tokens is None:\n max_tokens = config.max_tokens\n response = llm.completion(prompt, config.temperature, max_tokens)\n # logging\n log(prompt, response, logfile)\n # printing\n if config.print_prompts or config.print_next_prompt:\n pretty_print_completion(prompt, response)\n elif config.print_responses:\n pretty_print_response(response)\n # reset print_next_prompt\n config.print_next_prompt = False\n # return string response\n return response" }, { "identifier": "bcolors", "path": "tabmemcheck/llm.py", "snippet": "class bcolors:\n HEADER = \"\\033[95m\"\n OKBLUE = \"\\033[94m\"\n OKCYAN = \"\\033[96m\"\n OKGREEN = \"\\033[92m\"\n WARNING = \"\\033[93m\"\n FAIL = \"\\033[91m\"\n ENDC = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n UNDERLINE = \"\\033[4m\"\n\n # Regular Colors\n Black = \"\\033[0;30m\" # Black\n Red = \"\\033[0;31m\" # Red\n Green = \"\\033[0;32m\" # Green\n Yellow = \"\\033[0;33m\" # Yellow\n Blue = \"\\033[0;34m\" # Blue\n Purple = \"\\033[0;35m\" # Purple\n Cyan = \"\\033[0;36m\" # Cyan\n White = \"\\033[0;37m\" # White\n\n # Background\n On_Black = \"\\033[40m\" # Black\n On_Red = \"\\033[41m\" # Red\n On_Green = \"\\033[42m\" # Green\n On_Yellow = \"\\033[43m\" # Yellow\n On_Blue = \"\\033[44m\" # Blue\n On_Purple = \"\\033[45m\" # Purple\n On_Cyan = \"\\033[46m\" # Cyan\n On_White = \"\\033[47m\" # White" }, { "identifier": "statistical_feature_prediction_test", "path": "tabmemcheck/row_independence.py", "snippet": "@retry(\n stop=stop_after_attempt(10)\n) # the automated fitting can fail for an unlucky choice of the test rows (I think. at least it can fail with certain probability due to bad label encoding. this is a quick fix)\ndef statistical_feature_prediction_test(\n csv_file, feature_name, num_prefix_rows=5, confidence_level=0.95, verbose=False\n):\n \"\"\"Train a gradient boosted tree and a linear classifer to predict the value of feature {feature_name} in the n-th row of the csv file,\n using all the features of the previous {num_prefix_rows} rows.\n\n Returns: True if the null of no overalp is rejected, False otherwise.\n \"\"\"\n # load the file as a pandas dataframe\n df = utils.load_csv_df(csv_file)\n feature_names = utils.get_feature_names(csv_file)\n\n # auto-adjust the number of prefix rows bases on the size of the dataset\n # (it is more important to have a big test set, so that we can detect strong effects (row id) on small datasets with significance)\n num_prefix_rows = 5\n if len(df) < 1000:\n num_prefix_rows = 3\n if len(df) < 500:\n num_prefix_rows = 2\n if len(df) < 200:\n num_prefix_rows = 1\n\n # we need to make a strict separation between train and test rows\n # this means that we exclude the {num_prefix_rows} rows before any test row from the training set\n test_rows = np.random.choice(\n len(df), size=(len(df) // (1 + num_prefix_rows)) // 2, replace=False\n )\n\n # regression or classification?\n classification = False\n if df[feature_name].dtype == \"object\":\n classification = True\n elif (\n len(df[feature_name].unique()) < 25\n and len(df[feature_name].unique()) / len(df) < 0.05\n ):\n # if the feature takes only a couple of values, classification\n df[feature_name] = df[feature_name].astype(\"category\").cat.codes\n classification = True\n\n # convert all numbers to floats\n for fn in feature_names:\n if df[fn].dtype == \"int64\":\n df[fn] = df[fn].astype(float)\n\n # convert stings to categorical features\n for fn in feature_names:\n if df[fn].dtype == \"object\":\n df[fn] = df[fn].astype(\"category\").cat.codes\n\n # impute all missing values with the mean\n df = df.fillna(df.mean())\n\n # construct the prediction problem\n X_train, X_test = [], []\n y_train, y_test = [], []\n for i_row in range(num_prefix_rows, len(df)):\n # the value of the feature in the test row\n y_i = df[feature_name].iloc[i_row]\n # all the values of the previous num_prefix_rows rows\n X_i = df.iloc[i_row - num_prefix_rows : i_row].values.flatten()\n # is this row train, test, or excluded?\n if i_row in test_rows: # test\n X_test.append(X_i)\n y_test.append(y_i)\n else:\n excluded = False\n for dist in range(num_prefix_rows):\n if i_row + dist + 1 in test_rows: # excluded\n excluded = True\n if not excluded: # train\n X_train.append(X_i)\n y_train.append(y_i)\n X_train, X_test = np.array(X_train), np.array(X_test)\n y_train, y_test = np.array(y_train), np.array(y_test)\n\n # train a gradient boosted tree and logistic/linear regression\n gbtree = XGBRegressor()\n linear_clf = make_pipeline(StandardScaler(), LinearRegression())\n if classification:\n gbtree = XGBClassifier()\n linear_clf = make_pipeline(StandardScaler(), LogisticRegression())\n # ignore convergence warnings etc.\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n gbtree.fit(X_train, y_train)\n linear_clf.fit(X_train, y_train)\n # for the test, we choose the classifier with the lower TRAINING error\n # (we can do this without adjusting the confidence level)\n final_model = gbtree\n if linear_clf.score(X_train, y_train) < gbtree.score(X_train, y_train):\n final_model = linear_clf\n # the final predictions\n y_pred = final_model.predict(X_test)\n\n # evaluation\n if classification:\n # measure the predictive accuracy\n score, ci = utils.accuracy(y_pred, y_test, confidence_level=confidence_level)\n # the best unconditional predictor: always predicting the most common class\n y_pred = np.repeat(np.argmax(np.bincount(y_train)), len(y_test))\n baseline_score, baseline_ci = utils.accuracy(y_pred, y_test)\n if verbose:\n print(f\"Accuracy: {score:.3} ({ci.low:.3}, {ci.high:.3})\")\n print(\n f\"Baseline (most common class): {baseline_score:.3} ({baseline_ci.low:.3}, {baseline_ci.high:.3})\"\n )\n else:\n # measure the mean squared error\n score, ci = utils.mean_squared_error(\n y_pred, y_test, confidence_level=confidence_level\n )\n # the mean absolute error of the mean\n baseline_score, baseline_ci = utils.mean_squared_error(\n np.repeat(np.mean(y_train), len(y_test)), y_test\n )\n if verbose:\n print(f\"Mean squared error: {score:.3} ({ci.low:.3}, {ci.high:.3})\")\n print(\n f\"Baseline (mean): {baseline_score:.3} ({baseline_ci.low:.3}, {baseline_ci.high:.3})\"\n )\n\n # is the gbtree significantly better than the baseline?\n if classification:\n if ci.low > baseline_ci.high:\n return True\n else:\n if ci.high < baseline_ci.low:\n return True\n return False" }, { "identifier": "prefix_suffix_chat_completion", "path": "tabmemcheck/chat_completion.py", "snippet": "def prefix_suffix_chat_completion(\n llm: LLM_Interface,\n prefixes: list[str],\n suffixes: list[str],\n system_prompt: str,\n few_shot=None,\n num_queries=100,\n out_file=None,\n rng=None,\n):\n \"\"\"A basic chat completion function. Takes a list of prefixes and suffixes and a system prompt.\n Sends {num_queries} prompts of the format\n\n System: <system_prompt>\n User: <prefix> |\n Assistant: <suffix> |\n ... | {few_shot} times, or one example from each (prefixes, suffixes) pair in a {few_shot} list.\n User: <prefix> | In the second case, few_shot = [([prefixes], [suffixes]), ..., ([prefixes], [suffixes])]\n Assistant: <suffix> |\n User: <prefix>\n Assistant: <response> (= test suffix?)\n\n The num_queries prefixes and suffixes are randomly selected from the respective lists.\n The function guarantees that the test suffix (as a complete string) is not contained in any of the few-shot prefixes or suffixes.\n\n Stores the results in a csv file.\n\n Returns: the test prefixes, test suffixes, and responses\n \"\"\"\n assert len(prefixes) == len(\n suffixes\n ), \"prefixes and suffixes must have the same length\"\n\n # randomly shuffle the prefixes and suffixes\n if rng is None:\n rng = np.random.default_rng()\n idx = rng.permutation(len(prefixes))\n prefixes = [prefixes[i] for i in idx]\n suffixes = [suffixes[i] for i in idx]\n\n # the number of points to evaluate\n num_points = min(num_queries, len(prefixes))\n\n test_prefixes = []\n test_suffixes = []\n responses = []\n for i_testpoint in range(num_points):\n # system prompt\n messages = [\n {\n \"role\": \"system\",\n \"content\": system_prompt,\n },\n ]\n # few-shot examples?\n if few_shot is not None:\n # if few_shot is an integer, include few_shot examples from the original prefixes and suffixes\n if isinstance(few_shot, int):\n for _ in range(few_shot):\n idx = None\n retries = 0\n # select a random prefix/suffix pair\n while (\n idx is None\n or idx == i_testpoint\n # assert that the test suffix is not contained in the few-shot prefixes or suffixes\n or suffixes[i_testpoint] in prefixes[idx]\n or suffixes[i_testpoint] in suffixes[idx]\n ):\n idx = rng.choice(len(prefixes))\n retries += 1\n if retries > 100:\n raise Exception(\n \"Unable to construct a query where the desired output is not contained in the few-shot data.\\nDid you provide the test dataset as few-shot example?\"\n )\n prefix = prefixes[idx]\n suffix = suffixes[idx]\n messages.append({\"role\": \"user\", \"content\": prefix})\n messages.append({\"role\": \"assistant\", \"content\": suffix})\n # if few_shot is a list of (prefixes, suffixes)-tuples, inlude one example from each tuple\n elif isinstance(few_shot, list):\n for fs_prefixes, fs_suffixes in few_shot:\n fs_prefix, fs_suffix = None, None\n retries = 0\n # select a random prefix/suffix pair\n while (\n fs_prefix is None\n # assert that the test suffix is not contained in the few-shot prefixes or suffixes\n or suffixes[i_testpoint] in fs_prefix\n or suffixes[i_testpoint] in fs_suffix\n ):\n fs_idx = rng.choice(len(fs_prefixes))\n fs_prefix = fs_prefixes[fs_idx]\n fs_suffix = fs_suffixes[fs_idx]\n retries += 1\n if retries > 100:\n raise Exception(\n \"Unable to construct a query where the desired output is not contained in the few-shot data.\\nDid you provide the test dataset as few-shot example?\"\n )\n messages.append({\"role\": \"user\", \"content\": fs_prefix})\n messages.append({\"role\": \"assistant\", \"content\": fs_suffix})\n\n # test observation\n test_prefix = prefixes[i_testpoint]\n test_suffix = suffixes[i_testpoint]\n messages.append({\"role\": \"user\", \"content\": test_prefix})\n response = send_chat_completion(llm, messages)\n # store prefix, suffix and response\n test_prefixes.append(test_prefix)\n test_suffixes.append(test_suffix)\n responses.append(response)\n\n # save the results to file\n if out_file is not None:\n results_df = pd.DataFrame(\n {\n \"prefix\": test_prefixes,\n \"suffix\": test_suffixes,\n \"response\": responses,\n }\n )\n results_df.to_csv(\n out_file,\n index=False,\n )\n\n return test_prefixes, test_suffixes, responses" }, { "identifier": "row_chat_completion", "path": "tabmemcheck/chat_completion.py", "snippet": "def row_chat_completion(\n llm,\n csv_file,\n system_prompt,\n num_prefix_rows=10,\n num_queries=100,\n few_shot=7,\n out_file=None,\n):\n \"\"\"Row chat completion task. This task ask the LLM to predict the next row in the\n csv file, given the previous rows. This task is the basis for the row completion\n test, and also for the first token test. Uses prefix_suffix_chat_completion.\"\"\"\n # assert that few_shot is an integer\n assert isinstance(few_shot, int), \"For row completion, few_shot must be an integer.\"\n\n # load the file as a list of strings\n rows = utils.load_csv_rows(csv_file)\n\n # prepare data\n prefixes = []\n suffixes = []\n for idx in range(len(rows) - num_prefix_rows):\n prefixes.append(\"\\n\".join(rows[idx : idx + num_prefix_rows]))\n suffixes.append(rows[idx + num_prefix_rows])\n\n test_prefixes, test_suffixes, responses = prefix_suffix_chat_completion(\n llm,\n prefixes,\n suffixes,\n system_prompt,\n few_shot=few_shot,\n num_queries=num_queries,\n out_file=out_file,\n )\n\n return test_prefixes, test_suffixes, responses" }, { "identifier": "row_completion", "path": "tabmemcheck/chat_completion.py", "snippet": "def row_completion(\n llm,\n csv_file,\n num_prefix_rows=10,\n num_queries=100,\n out_file=None, # TODO support out_file\n):\n \"\"\"Plain language model variant of row_chat_completion\"\"\"\n # load the file as a list of strings\n rows = utils.load_csv_rows(csv_file)\n\n # choose num_queries rows to complete\n prefixes = []\n suffixes = []\n responses = []\n for idx in np.random.choice(\n len(rows) - num_prefix_rows, num_queries, replace=False\n ):\n # prepare query\n prefix = \"\\n\".join(rows[idx : idx + num_prefix_rows])\n suffix = rows[idx + num_prefix_rows]\n\n # send query\n response = send_completion(llm, prefix, max_tokens=1 + len(suffix))\n\n # keep only the first row in the response\n response = response.strip(\"\\n\").split(\"\\n\")[0]\n\n # store prefix, suffix and response\n prefixes.append(prefix)\n suffixes.append(suffix)\n responses.append(response)\n\n return prefixes, suffixes, responses" }, { "identifier": "feature_values_chat_completion", "path": "tabmemcheck/chat_completion.py", "snippet": "def feature_values_chat_completion(\n llm: LLM_Interface,\n csv_file: str,\n system_prompt,\n num_queries,\n few_shot=[], # list or integer\n cond_feature_names=[],\n fs_cond_feature_names=[], # a list of lists of conditional feature names for each few-shot example\n add_description=True,\n out_file=None,\n):\n \"\"\"Feature chat completion task. This task asks the LLM to complete the feature values of observations in the dataset.\n\n The prompt format is the following:\n System: <system_prompt>\n |\n | {few_shot} examples from other csv files.\n |\n User: Dataset: <dataset_name>\n Feature Names: Feature 1, Feature 2, ..., Feature n\n Feature Values: Feature 1 = value 1, Feature 2 = value 2, ..., Feature m = value m\n [Target: Feature k]\n Response: Feature m + 1 = value m + 1, ..., Feature n = value n [Feature k = value k]\n\n This can be modified in the following ways:\n - Remove dataset description and feature names ({add_description} parameter)\n - don't provide any conditional features\n - Don't use the feature names, but only the values. (TODO ? or maybe remove, latter for formatter class)\n\n Options:\n - few_shot: use few-shot examples from other csv files (list), or few_shot examples from the same csv file (int)\n - target & fs_targets: if target is not None, then the LLM is asked to complete only the value of the target feature.\n\n The feature names are ordered in the prompt as they are ordered in the csv file. In the future we might want to relax this.\n\n TODO test and debug this function\n \"\"\"\n # TODO assert that all the given feature names are valid (i.e. occur in the dataset, otherwise throw exception)\n\n dataset_name = utils.get_dataset_name(csv_file)\n conditional_sampling = (\n cond_feature_names is not None and len(cond_feature_names) > 0\n )\n\n # if the few-shot argument is a list, then csv_file should not be in there\n # the current option is to remove it (TODO issue warning)\n if isinstance(few_shot, list):\n few_shot = [\n x for x in few_shot if not dataset_name in utils.get_dataset_name(x)\n ]\n\n # if few-shot is an integer, then include few_shot examples from csv_file\n # this is implemented by replacing few_shot and fs_cond_feature_names with the appropriate lists\n if isinstance(few_shot, int):\n few_shot = [csv_file for _ in range(few_shot)]\n fs_cond_feature_names = [cond_feature_names for _ in range(len(few_shot))]\n\n # issue a warning if conditional_sampling, but no fs_cond_feature_names\n if conditional_sampling and len(few_shot) > 0 and len(fs_cond_feature_names) == 0:\n print(\n llm.bcolors.WARNING\n + \"WARNING: feature_chat_completion: Conditional sampling, but no conditional feature names for the few-shot examples provided.\"\n + llm.bcolors.ENDC\n )\n\n # prefixes and suffixes for the main dataset\n if conditional_sampling:\n prefixes, samples = utils.load_cond_samples(\n csv_file, cond_feature_names, add_description=add_description\n )\n else:\n prefix, samples = utils.load_samples(csv_file)\n prefixes = [prefix] * len(samples)\n\n # prefixes and suffixes for the few-shot examples\n few_shot_prefixes_suffixes = []\n for fs_idx, fs_csv_file in enumerate(few_shot):\n if conditional_sampling:\n fs_prefixes, fs_samples = utils.load_cond_samples(\n fs_csv_file,\n fs_cond_feature_names[fs_idx],\n add_description=add_description,\n )\n few_shot_prefixes_suffixes.append((fs_prefixes, fs_samples))\n else:\n fs_prefix, fs_samples = utils.load_samples(fs_csv_file)\n few_shot_prefixes_suffixes.append(\n ([fs_prefix] * len(fs_samples), fs_samples)\n )\n\n # execute chat queries\n test_prefixes, test_suffixes, responses = prefix_suffix_chat_completion(\n llm,\n prefixes,\n samples,\n system_prompt,\n few_shot=few_shot_prefixes_suffixes,\n num_queries=num_queries,\n out_file=out_file,\n )\n\n return test_prefixes, test_suffixes, responses" } ]
import os import numpy as np import pandas as pd import tabmemcheck as tabmem import tabmemcheck.analysis as analysis import tabmemcheck.utils as utils from typing import Any, Union from difflib import SequenceMatcher from tabmemcheck.llm import ( LLM_Interface, ChatWrappedLLM, send_chat_completion, send_completion, bcolors, ) from tabmemcheck.row_independence import statistical_feature_prediction_test from tabmemcheck.chat_completion import ( prefix_suffix_chat_completion, row_chat_completion, row_completion, feature_values_chat_completion, )
5,933
DEFAULT_FEW_SHOT_CSV_FILES = [ "iris.csv", "adult-train.csv", "titanic-train.csv", "uci-wine.csv", "california-housing.csv", ] def __difflib_similar(csv_file_1, csv_file_2): sm = SequenceMatcher( None, utils.load_csv_string(csv_file_1), utils.load_csv_string(csv_file_2) ) if sm.quick_ratio() > 0.9: return sm.ratio() > 0.9 return False def __validate_few_shot_files(csv_file, few_shot_csv_files): """check if the csv_file is contained in the few_shot_csv_files.""" dataset_name = utils.get_dataset_name(csv_file) few_shot_names = [utils.get_dataset_name(x) for x in few_shot_csv_files] if dataset_name in few_shot_names: # replace the dataset_name with open-ml diabetes few_shot_csv_files = [ x for x in few_shot_csv_files if utils.get_dataset_name(x) != dataset_name ] few_shot_csv_files.append("openml-diabetes.csv") # now test with difflib if the dataset contents are very similar for fs_file in few_shot_csv_files: if __difflib_similar(csv_file, fs_file): print(
DEFAULT_FEW_SHOT_CSV_FILES = [ "iris.csv", "adult-train.csv", "titanic-train.csv", "uci-wine.csv", "california-housing.csv", ] def __difflib_similar(csv_file_1, csv_file_2): sm = SequenceMatcher( None, utils.load_csv_string(csv_file_1), utils.load_csv_string(csv_file_2) ) if sm.quick_ratio() > 0.9: return sm.ratio() > 0.9 return False def __validate_few_shot_files(csv_file, few_shot_csv_files): """check if the csv_file is contained in the few_shot_csv_files.""" dataset_name = utils.get_dataset_name(csv_file) few_shot_names = [utils.get_dataset_name(x) for x in few_shot_csv_files] if dataset_name in few_shot_names: # replace the dataset_name with open-ml diabetes few_shot_csv_files = [ x for x in few_shot_csv_files if utils.get_dataset_name(x) != dataset_name ] few_shot_csv_files.append("openml-diabetes.csv") # now test with difflib if the dataset contents are very similar for fs_file in few_shot_csv_files: if __difflib_similar(csv_file, fs_file): print(
bcolors.BOLD
4
2023-11-14 18:34:51+00:00
8k
WindowsSov8forUs/bestdori_api
bestdori/events.py
[ { "identifier": "get_list", "path": "bestdori/post.py", "snippet": "@overload\ndef get_list(\n proxy: Optional[str]=None,\n *,\n search: str='',\n category_name: Literal['SELF_POST']='SELF_POST',\n category_id: Literal['chart']='chart',\n tags: list[Tag]=[],\n order: Literal['TIME_DESC', 'TIME_ASC']='TIME_DESC',\n limit: int=20,\n offset: int=0\n) -> dict[str, Any]:\n '''搜索社区谱面\n ```python\n # 以 'Arghena' 为关键词,搜索社区谱面\n Post.search(search='Arghena', caregory_name='SELF_POST', category_id='chart')\n ```\n\n 参数:\n proxy (Optional[str], optional): 代理服务器\n\n search (str, optional): 搜索关键词,默认为空\n \n category_name (Literal[&#39;SELF_POST&#39;], optional): 搜索的帖子类型 `SELF_POST`\n \n category_id (Literal[&#39;chart&#39;, &#39;text&#39;], optional): 搜索的画廊种类 `chart`\n \n tags (list[Tag], optional): 搜索的标签,默认为空\n \n order (Literal[&#39;TIME_DESC&#39;, &#39;TIME_ASC&#39;], optional): 帖子排序,默认时间倒序\n \n limit (int, optional): 展示出的帖子数,默认 20\n \n offset (int, optional): 忽略前面的 `offset` 个帖子,默认 0\n\n 返回:\n dict[str, Any]: 搜索结果\n ```python\n result: bool # 是否有响应\n count: int # 搜索到的谱面总数\n posts: list[dict[str, Any]] # 列举出的谱面\n ```\n '''\n ..." }, { "identifier": "API", "path": "bestdori/utils/utils.py", "snippet": "API = {\n 'user': {\n 'info': 'user',\n 'login': 'user/login',\n 'me': 'user/me'\n },\n 'post': {\n 'basic': 'post/basic',\n 'details': 'post/details',\n 'list': 'post/list',\n 'tag': 'post/tag',\n 'post': 'post',\n 'find': 'post/find',\n 'like': 'post/like'\n },\n 'charts': {\n 'info': 'charts/{id}/{diff}.json'\n },\n 'characters': {\n 'info': 'characters/{id}.json',\n 'all': 'characters/all.{index}.json'\n },\n 'cards': {\n 'info': 'cards/{id}.json',\n 'all': 'cards/all.{index}.json'\n },\n 'costumes': {\n 'info': 'costumes/{id}.json',\n 'all': 'costumes/all.{index}.json'\n },\n 'events': {\n 'info': 'events/{id}.json',\n 'all': 'events/all.{index}.json',\n 'top': 'eventtop/data'\n },\n 'gacha': {\n 'info': 'gacha/{id}.json',\n 'all': 'gacha/all.{index}.json'\n },\n 'songs': {\n 'info': 'songs/{id}.json',\n 'all': 'songs/all.{index}.json'\n },\n 'loginCampaigns': {\n 'info': 'loginCampaigns/{id}.json',\n 'all': 'loginCampaigns/all.{index}.json'\n },\n 'bands': {\n 'all': 'bands/all.{index}.json',\n 'main': 'bands/main.{index}.json'\n },\n 'upload': {\n 'file': 'upload/file/{hash}',\n 'prepare': 'upload/prepare',\n 'upload': 'upload',\n 'status': 'upload/status/{hash}'\n },\n 'misc': {\n 'llsif': 'misc/llsif.{index}.json'\n },\n 'all': {\n 'skills': 'skills/all.{index}.json',\n 'stamps': 'stamps/all.{index}.json',\n 'degrees': 'degrees/all.{index}.json',\n 'meta': 'songs/meta/all.{index}.json',\n 'archives': 'archives/all.{index}.json',\n 'miracleTicketExchanges': 'miracleTicketExchanges/all.{index}.json',\n 'comics': 'comics/all.{index}.json',\n }\n}" }, { "identifier": "ASSETS", "path": "bestdori/utils/utils.py", "snippet": "ASSETS = {\n 'characters': {\n 'character_kv_image': 'ui/character_kv_image/{id:>03d}_rip/image.png',\n 'resourceset': 'characters/resourceset/{resource_set_name}_rip/{name}_{type}.png',\n 'livesd': 'characters/livesd/{sd_resource_name}_rip/sdchara.png'\n },\n 'event': {\n 'banner': 'event/{asset_bundle_name}/images_rip/banner.png',\n 'logo': 'event/{asset_bundle_name}/images_rip/logo.png',\n 'topscreen': 'event/{asset_bundle_name}/topscreen_rip/{type}_eventtop.png',\n 'loginbouns': 'event/loginbonus/{asset_bundle_name}_rip/background.png'\n },\n 'songs': {\n 'musicjacket': 'musicjacket/musicjacket{index:>03d}_rip/assets-star-forassetbundle-startapp-musicjacket-musicjacket{index:>03d}-{jacket_image}-jacket.png',\n 'sound': 'sound/bgm{id:>03d}_rip/bgm{id:>03d}.mp3',\n 'musicscore': ''\n },\n 'thumb': {\n 'chara': 'thumb/chara/card{id:>05d}_rip/{resource_set_name}_{type}.png',\n 'degree': 'thumb/degree_rip/{degree_name}.png',\n 'costume': 'thumb/costume/group{id}_rip/{asset_bundle_name}.png',\n },\n 'stamp': {\n 'get': 'stamp/01_rip/{image_name}.png'\n },\n 'homebanner': {\n 'get': 'homebanner_rip/{banner_asset_bundle_name}.png'\n },\n 'gacha': {\n 'screen': 'gacha/screen/gacha{id}_rip/{asset_name}.png'\n },\n 'comic': {\n 'comic': 'comic/comic_{type}/{asset_bundle_name}_rip/{asset_bundle_name}.png',\n 'thumbnail': 'comic/comic_{type}_thumbnail/{asset_bundle_name}_rip/{asset_bundle_name}.png'\n },\n 'missions': {\n 'info': 'missions/{id}.json',\n 'all': 'missions/all.{index}.json'\n },\n 'band': {\n 'logo': 'band/logo/{id:>03d}_rip/{type}.png'\n },\n 'live2d': {\n 'buildData': 'live2d/chara/{asset_bundle_name}_rip/buildData.asset'\n }\n}" }, { "identifier": "Api", "path": "bestdori/utils/network.py", "snippet": "class Api:\n '''向 Bestdori 发送 API 请求类\n\n 参数:\n api (str): 请求的 API 地址\n \n proxy (Optional[str]): 代理服务器'''\n api: str\n '''请求的 API 地址'''\n proxy: Optional[str]=None\n '''代理服务器'''\n headers: dict[str, str]\n '''请求头'''\n # 初始化\n def __init__(\n self,\n api: str,\n proxy: Optional[str]=None\n ) -> None:\n '''初始化'''\n self.api = api\n self.proxy = proxy\n self.headers = {'Content-Type': 'application/json;charset=UTF-8'}\n return\n \n # 请求发送\n def request(\n self,\n method: Literal['get', 'post'],\n *,\n cookies: Optional[Cookies]=None,\n params: Optional[dict[str, Any]]=None,\n data: Optional[dict[str, Any]]=None,\n files: Optional[dict[str, tuple[str, BufferedReader]]]=None\n ) -> Response:\n '''请求发送\n\n 参数:\n method (Literal[&#39;get&#39;, &#39;post&#39;]): API 调用方法\n \n cookies (Optional[Cookies], optional): Cookies\n \n params (Optional[dict[str, Any]], optional): 调用参数\n \n data (Optional[dict[str, Any]], optional): 调用参数,将以 `json` 字符串形式发送\n \n files (Optional[dict[str, tuple[str, BufferedReader]]], optional): 发送文件参数\n\n 返回:\n Response: 收到的响应\n '''\n # 处理接收到的 API\n if self.api.startswith('http://') or self.api.startswith('https://'):\n self.api = self.api\n else:\n self.api = 'https://bestdori.com/api/' + self.api\n # 构建一个请求体\n request = Request(\n method,\n self.api,\n cookies=cookies,\n params=params,\n data=cast(dict, dumps(data)) if data is not None else data,\n files=files,\n headers=self.headers if not self.api.endswith('/upload') else None\n )\n # 构建代理服务器字典\n if self.proxy is not None:\n proxies = {'http://': self.proxy, 'https://': self.proxy}\n else:\n proxies = None\n \n # 发送请求并获取响应\n with Client(proxies=cast(dict, proxies)) as client:\n response = client.send(request)\n client.close()\n \n # 处理接收到的响应\n response.raise_for_status()\n # 判断接收到的响应是否为 json 格式\n if 'application/json' not in (content_type := response.headers.get('content-type', None)):\n if content_type is not None:\n return response\n else:\n raise Exception('接收到的响应没有 content-type。')\n \n if isinstance((response_data := response.json()), dict):\n if (result := response_data.get('result', None)) is not None:\n if result is False:\n if (code := response_data.get('code', None)) is not None:\n if code in REQUEST_EXCEPTION.keys(): # 若错误码已被记录\n exception_class = REQUEST_EXCEPTION[code]\n if params is not None:\n raise exception_class(self.api, **params)\n elif data is not None:\n raise exception_class(self.api, **data)\n else:\n raise exception_class(self.api)\n else:\n raise RequestException(self.api, code)\n else:\n raise RequestException(self.api)\n return response" }, { "identifier": "Assets", "path": "bestdori/utils/network.py", "snippet": "class Assets:\n '''获取 Bestdori 资源数据\n\n 参数:\n url (str): 请求的资源地址\n \n server (Literal[&#39;jp&#39;, &#39;en&#39;, &#39;tw&#39;, &#39;cn&#39;, &#39;kr&#39;]): 资源所在服务器\n \n proxy (Optional[str]): 代理服务器'''\n url: str\n '''请求的资源地址'''\n server: Literal['jp', 'en', 'tw', 'cn', 'kr', 'llsif']\n '''资源所在服务器'''\n proxy: Optional[str]=None\n '''代理服务器'''\n # 初始化\n def __init__(\n self,\n url: str,\n server: Literal['jp', 'en', 'tw', 'cn', 'kr', 'llsif'],\n proxy: Optional[str]=None\n ) -> None:\n '''获取 Bestdori 资源数据\n\n 参数:\n url (str): 请求的资源地址\n \n server (Literal[&#39;jp&#39;, &#39;en&#39;, &#39;tw&#39;, &#39;cn&#39;, &#39;kr&#39;, &#39;llsif&#39;]): 资源所在服务器\n \n proxy (Optional[str]): 代理服务器\n '''\n self.url = url\n self.server = server\n self.proxy = proxy\n return\n \n # 获取资源连接\n def get_url(self) -> str:\n '''获取资源连接\n\n 返回:\n str: 获取的资源连接 `str`\n '''\n # 如果服务器为 llsif 则转接方法\n if self.server == 'llsif':\n return self._get_niconi_url()\n \n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://bestdori.com/assets/{self.server}/' + self.url\n return self.url\n \n # 从 card.niconi.co.ni 获取资源连接\n def _get_niconi_url(self) -> str:\n '''从 card.niconi.co.ni 获取资源连接\n\n 返回:\n str: 获取的资源连接 `str`\n '''\n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://card.niconi.co.ni/asset/' + self.url\n return self.url\n \n # 获取资源\n def get(self) -> bytes:\n '''获取资源\n\n 返回:\n bytes: 获取的资源字节数据 `bytes`\n '''\n # 如果服务器为 llsif 则转接方法\n if self.server == 'llsif':\n return self._get_from_niconi()\n \n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://bestdori.com/assets/{self.server}/' + self.url\n # 构建一个请求体\n request = Request('get', self.url)\n # 构建代理服务器字典\n if self.proxy is not None:\n proxies = {'http://': self.proxy, 'https://': self.proxy}\n else:\n proxies = None\n \n # 发送请求并获取响应\n with Client(proxies=cast(dict, proxies)) as client:\n response = client.send(request)\n client.close()\n \n response.raise_for_status()\n # 检测响应资源是否存在\n content_type = response.headers.get('content-type', None)\n if content_type is None or content_type == 'text/html':\n raise AssetsNotExistError(self.url)\n return response.content\n \n # 从 card.niconi.co.ni 获取资源\n def _get_from_niconi(self) -> bytes:\n '''从 card.niconi.co.ni 获取资源\n\n 返回:\n bytes: 获取的资源字节数据 `bytes`\n '''\n # 处理接收到的 URL\n if self.url.startswith('http://') or self.url.startswith('https://'):\n self.url = self.url\n else:\n self.url = f'https://card.niconi.co.ni/asset/' + self.url\n # 构建一个请求体\n request = Request('get', self.url)\n # 构建代理服务器字典\n if self.proxy is not None:\n proxies = {'http://': self.proxy, 'https://': self.proxy}\n else:\n proxies = None\n \n # 发送请求并获取响应\n with Client(proxies=cast(dict, proxies)) as client:\n response = client.send(request)\n client.close()\n \n response.raise_for_status()\n # 检测响应资源是否存在\n content_type = response.headers.get('content-type', None)\n if content_type is None or content_type == 'text/html':\n raise AssetsNotExistError(self.url)\n return response.content" }, { "identifier": "EventArchive", "path": "bestdori/eventarchives.py", "snippet": "class EventArchive():\n '''活动数据类\n\n 参数:\n id_ (int): 活动 ID\n \n proxy (Optional[str], optional): 代理服务器\n '''\n # 初始化\n def __init__(self, id_: int, proxy: Optional[str]=None) -> None:\n '''活动数据类\n\n 参数:\n id_ (int): 活动 ID\n \n proxy (Optional[str], optional): 代理服务器\n '''\n self.id: int = id_\n '''活动 ID'''\n self._info: dict[str, Any] = {}\n '''活动信息'''\n self.proxy: Optional[str] = proxy\n '''代理服务器'''\n # 检测 ID 是否存在\n all_id = get_all(5, proxy=proxy)\n if not str(id_) in all_id.keys():\n raise EventNotExistError(id_)\n return\n \n # 获取排名分数线\n def get_top(\n self,\n server: Literal[0, 1, 2, 3, 4],\n mid: Literal['0']='0',\n latest: Literal['1']='1'\n ) -> dict[str, list[dict[str, Any]]]:\n '''获取排名分数线\n\n 参数:\n server (Literal[0, 1, 2, 3, 4]): 指定服务器\n `0`: 日服\n `1`: 英服\n `2`: 台服\n `3`: 国服\n `4`: 韩服\n \n mid (Literal[&#39;0&#39;], optional): 指定是否为中间分数线,默认为 `0`\n \n latest (Literal[&#39;1&#39;], optional): 指定是否为最终分数线,默认为 `1`\n\n 返回:\n dict[str, list[dict[str, Any]]]: 排名分数线数据\n '''\n return Api(API['events']['top'], proxy=self.proxy).request(\n 'get', params={\n 'server': server,\n 'event': self.id,\n 'mid': mid,\n 'latest': latest\n }\n ).json()\n\n # 获取活动数据评论\n def get_comment(\n self,\n limit: int=20,\n offset: int=0,\n order: Literal['TIME_DESC', 'TIME_ASC']='TIME_ASC'\n ) -> dict[str, Any]:\n '''获取动数据评论\n\n 参数:\n limit (int, optional): 展示出的评论数,默认为 20\n \n offset (int, optional): 忽略前面的 `offset` 条评论,默认为 0\n \n order (Literal[&#39;TIME_DESC&#39;, &#39;TIME_ASC&#39;], optional): 排序顺序,默认时间顺序\n\n 返回:\n dict[str, Any]: 搜索结果\n ```python\n result: bool # 是否有响应\n count: int # 搜索到的评论总数\n posts: list[dict[str, Any]] # 列举出的评论\n ```\n '''\n return get_list(\n proxy=self.proxy,\n category_id=str(self.id),\n category_name='EVENTARCHIVE_COMMENT',\n limit=limit,\n offset=offset,\n order=order\n )" }, { "identifier": "ServerNotAvailableError", "path": "bestdori/exceptions.py", "snippet": "class ServerNotAvailableError(BaseException):\n '''服务器指定错误'''\n # 初始化\n def __init__(self, name: str, server: str) -> None:\n msg = f'{name} 在服务器 {server} 不可用。'\n super().__init__(msg)\n self.message = msg\n '''错误信息'''\n return" }, { "identifier": "EventHasNoStampError", "path": "bestdori/exceptions.py", "snippet": "class EventHasNoStampError(BaseException):\n '''活动没有奖励贴纸错误'''\n # 初始化\n def __init__(self, id_: int) -> None:\n msg = f'活动 ID {id_} 没有奖励贴纸。'\n super().__init__(msg)\n self.message = msg\n '''错误信息'''\n return" }, { "identifier": "AssetsNotExistError", "path": "bestdori/exceptions.py", "snippet": "class AssetsNotExistError(AssetsException):\n '''资源不存在'''\n # 初始化\n def __init__(self, asset_name: str) -> None:\n msg = f'资源 {asset_name} 可能不存在。'\n super().__init__(msg)" }, { "identifier": "EventNotExistError", "path": "bestdori/exceptions.py", "snippet": "class EventNotExistError(BaseException):\n '''活动不存在'''\n # 初始化\n def __init__(self, id_: int) -> None:\n msg = f'活动 ID {id_} 不存在。'\n super().__init__(msg)\n self.message = msg\n '''错误信息'''\n return" } ]
from typing import Optional, Literal, Any from .post import get_list from .utils.utils import API, ASSETS from .utils.network import Api, Assets from .eventarchives import EventArchive from .exceptions import ( ServerNotAvailableError, EventHasNoStampError, AssetsNotExistError, EventNotExistError )
5,543
'''`bestdori.events` BanG Dream! 活动相关操作''' # 获取总活动信息 def get_all(index: Literal[0, 5, 6]=5, proxy: Optional[str]=None) -> dict[str, dict[str, Any]]: '''获取总活动信息 参数: index (Literal[0, 5, 6], optional): 指定获取哪种 `all.json` `0`: 仅获取所有已有活动 ID `all.0.json` `5`: 获取所有已有活动的简洁信息 `all.5.json` `6`: 获取所有已有活动的简洁信息 `all.6.json` proxy (Optional[str], optional): 代理服务器 返回: dict[str, dict[str, Any]]: 获取到的总活动信息 ''' return Api(API['events']['all'].format(index), proxy=proxy).request('get').json() # 活动类 class Event: '''活动类 参数: id_ (int): 活动 ID proxy (Optional[str], optional): 代理服务器 ''' # 初始化 def __init__(self, id_: int, proxy: Optional[str]=None) -> None: '''活动类 参数: id_ (int): 活动 ID proxy (Optional[str], optional): 代理服务器 ''' self.id: int = id_ '''活动 ID'''
'''`bestdori.events` BanG Dream! 活动相关操作''' # 获取总活动信息 def get_all(index: Literal[0, 5, 6]=5, proxy: Optional[str]=None) -> dict[str, dict[str, Any]]: '''获取总活动信息 参数: index (Literal[0, 5, 6], optional): 指定获取哪种 `all.json` `0`: 仅获取所有已有活动 ID `all.0.json` `5`: 获取所有已有活动的简洁信息 `all.5.json` `6`: 获取所有已有活动的简洁信息 `all.6.json` proxy (Optional[str], optional): 代理服务器 返回: dict[str, dict[str, Any]]: 获取到的总活动信息 ''' return Api(API['events']['all'].format(index), proxy=proxy).request('get').json() # 活动类 class Event: '''活动类 参数: id_ (int): 活动 ID proxy (Optional[str], optional): 代理服务器 ''' # 初始化 def __init__(self, id_: int, proxy: Optional[str]=None) -> None: '''活动类 参数: id_ (int): 活动 ID proxy (Optional[str], optional): 代理服务器 ''' self.id: int = id_ '''活动 ID'''
self.archive: EventArchive = EventArchive(self.id, self.proxy)
5
2023-11-16 13:09:20+00:00
8k
kampta/asic
commons/logger.py
[ { "identifier": "images2grid", "path": "commons/utils.py", "snippet": "def images2grid(images, **grid_kwargs):\n # images should be (N, C, H, W)\n grid = make_grid(images, **grid_kwargs)\n out = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()\n return out" }, { "identifier": "map_minmax", "path": "commons/utils.py", "snippet": "def map_minmax(x, in_min, in_max, out_min, out_max):\n return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min" }, { "identifier": "compute_pck", "path": "commons/utils.py", "snippet": "def compute_pck(pred, target, vis=None, thresholds=None, img_size=256,\n alphas=None):\n if type(target) == list:\n target = torch.cat(target, dim=0).float().cpu()\n else:\n target = target.float().cpu()\n if type(pred) == list:\n pred = torch.cat(pred, dim=0).float().cpu()\n else:\n pred = pred.float().cpu()\n if vis is not None and type(vis) == list:\n vis = torch.cat(vis, dim=0).bool().cpu()\n elif vis is not None:\n vis = vis.bool().cpu()\n else:\n vis = torch.ones(target.size(0)).bool()\n target = target[vis]\n pred = pred[vis]\n\n if alphas is None:\n alphas = torch.arange(0.1, 0.009, -0.01)\n else:\n alphas = torch.tensor(alphas)\n correct = torch.zeros(len(alphas))\n\n err = (pred- target).norm(dim=-1)\n err = err.unsqueeze(0).repeat(len(alphas), 1)\n\n if thresholds is None:\n thresholds = alphas.unsqueeze(-1).repeat(1, err.size(1)) * img_size\n else:\n # Each keypoint within an image pair get same threshold\n # First get threshold (bbox) for all the visible keypoints\n if type(thresholds) == list:\n thresholds = torch.cat(thresholds, dim=0).float().cpu()\n thresholds = thresholds.unsqueeze(-1).repeat(1, vis.size(1))\n thresholds = thresholds[vis]\n # Next compute alpha x threshold for all the keypoints\n thresholds = thresholds.unsqueeze(0).repeat(len(alphas), 1)\n thresholds = thresholds * alphas.unsqueeze(-1)\n\n correct = err < thresholds\n correct = correct.sum(dim=-1) / len(target)\n\n print(\"PCK-Transfer: \", ','.join([f'{pck * 100:.2f}' for pck in correct]))\n return correct" }, { "identifier": "sample_tuples", "path": "commons/utils.py", "snippet": "def sample_tuples(N, k=1, count=None, seed=None):\n\n if seed is not None:\n np.random.seed(seed)\n\n if count is None: # return all possible (k+1) permutations\n # (N!/(N-k)!) x k array\n samples = np.array(list(permutations(range(N), k+1)))\n\n elif k == 1:\n p1 = np.random.choice(N, count)\n p2 = np.random.choice(N, count)\n return np.stack([p1, p2], axis=1)\n\n elif count == -1:\n samples = np.array(list(permutations(range(N), k)))\n samples = np.concatenate([samples, samples[:, 0].reshape(-1, 1)], axis=1)\n\n else: # sample count number of permutations\n # count x k array\n samples = np.zeros((count, k+1), dtype=int)\n for i in range(count):\n samples[i, :k] = np.random.choice(N, k, replace=False)\n # Force the last column to be same as the first column\n samples[:, k] = samples[:, 0]\n\n return samples" }, { "identifier": "pck_loop", "path": "commons/utils.py", "snippet": "def pck_loop(tuples, kps_all, transfer_fn, *args, ignore_interim=False, **kwargs):\n chain_length = tuples.shape[1] - 1\n gt_kps_all = []\n pred_kps_all = []\n vis_all = []\n for ch in range(chain_length):\n src_idx = tuples[:, ch]\n trg_idx = tuples[:, ch+1]\n\n if ch == 0:\n src_kps = kps_all[src_idx]\n else:\n src_kps = pred_kps\n\n pred_kps = transfer_fn(src_kps[..., :2], src_idx, trg_idx,\n *args, **kwargs)\n\n gt_kps_all.append(kps_all[trg_idx][..., :2])\n pred_kps_all.append(pred_kps)\n \n if ch == 0:\n vis = kps_all[src_idx][..., 2] * kps_all[trg_idx][..., 2] > 0\n else:\n vis = vis * kps_all[trg_idx][..., 2] > 0\n vis_all.append(vis)\n\n if ignore_interim:\n return gt_kps_all[-1], pred_kps_all[-1], vis_all[-1]\n else:\n vis_all = torch.cat(vis_all)\n gt_kps_all = torch.cat(gt_kps_all)\n pred_kps_all = torch.cat(pred_kps_all)\n return gt_kps_all, pred_kps_all, vis_all" }, { "identifier": "splat_points", "path": "commons/draw.py", "snippet": "@torch.inference_mode()\ndef splat_points(images, points, sigma, opacity, colorscale='turbo',\n colors=None, alpha_channel=None, blend_alg='alpha'):\n \"\"\"\n Highly efficient GPU-based splatting algorithm. This function is a wrapper\n for Splat2D to overlay points on images. For highest performance, use the\n colors argument directly instead of colorscale.\n images: (N, C, H, W) tensor in [-1, +1]\n points: (N, P, 2) tensor with values in [0, resolution - 1]\n (can be sub-pixel/non-integer coordinates)\n Can also be (N, K, P, 2) tensor, in which case points[:, i]\n gets a unique colorscale\n Expects points in (x, y) order.\n sigma: either float or (N,) tensor with values > 0\n controls the size of the splatted points\n opacity: float in [0, 1], controls the opacity of the splatted points\n colorscale: [Optional] str (or length-K list of str if points is size\n (N, K, P, 2)) indicating the Plotly colorscale to visualize\n points with\n colors: [Optional] (N, P, 3) tensor (or (N, K*P, 3)). If specified,\n colorscale will be ignored. Computing the colorscale\n often takes several orders of magnitude longer than the GPU-based\n splatting, so pre-computing the colors and passing them here\n instead of using the colorscale argument can provide a significant\n speed-up.\n alpha_channel: [Optional] (N, P, 1) tensor (or (N, K*P, 1)). If specified,\n colors will be blended into the output image based on the\n opacity values in alpha_channel (between 0 and 1).\n blend_alg: [Optiona] str. Specifies the blending algorithm to use when\n merging points into images. Can use alpha compositing ('alpha'),\n Laplacian Pyramid Blending ('laplacian') or a more conservative\n version of Laplacian Blending ('laplacian_light')\n :return (N, C, H, W) tensor in [-1, +1] with points splatted onto images\n \"\"\"\n assert images.dim() == 4 # (N, C, H, W)\n assert points.dim() == 3 or points.dim() == 4 # (N, P, 2) or (N, K, P, 2)\n batch_size = images.size(0)\n # each index in the second dimension gets a unique colorscale\n if points.dim() == 4:\n num_points = points.size(2)\n points = points.reshape(\n points.size(0), points.size(1) * points.size(2), 2) # (N, K*P, 2)\n if colors is None:\n if isinstance(colorscale, str):\n colorscale = [colorscale]\n assert len(colorscale) == points.size(1)\n # (1, K*P, 3)\n colors = torch.cat([\n get_plotly_colors(num_points, c) for c in colorscale], 1)\n colors = colors.repeat(batch_size, 1, 1) # (N, K*P, 3)\n elif colors is None:\n num_points = points.size(1)\n # All batch elements use the same colorscale\n if isinstance(colorscale, str):\n # (N, P, 3)\n colors = get_plotly_colors(\n points.size(1), colorscale).repeat(batch_size, 1, 1)\n else: # Each batch element uses its own colorscale\n assert len(colorscale) == batch_size\n colors = torch.cat([get_plotly_colors(num_points, c)\n for c in colorscale], 0)\n if alpha_channel is None:\n alpha_channel = torch.ones(\n batch_size, points.size(1), 1, device='cuda')\n if isinstance(sigma, (float, int)):\n sigma = torch.tensor(\n sigma, device='cuda', dtype=torch.float).view(1).repeat(batch_size)\n blank_img = torch.zeros(batch_size, images.size(1), images.size(2),\n images.size(3), device='cuda')\n blank_mask = torch.zeros(batch_size, 1, images.size(2), images.size(3),\n device='cuda')\n # (N, C, H, W)\n prop_obj_img = splat2d(blank_img, points, colors, sigma, False)\n # (N, 1, H, W)\n prop_mask_img = splat2d(blank_mask, points, alpha_channel, sigma, True)\n prop_mask_img *= opacity\n if blend_alg == 'alpha':\n # basic alpha-composite\n out = prop_mask_img * prop_obj_img + (1 - prop_mask_img) * images\n elif blend_alg == 'laplacian':\n blender = LaplacianBlender().to(images.device)\n out = blender(images, prop_obj_img, prop_mask_img)\n elif blend_alg == 'laplacian_light':\n blender = LaplacianBlender(levels=3, gaussian_kernel_size=11,\n gaussian_sigma=0.5).to(images.device)\n out = blender(images, prop_obj_img, prop_mask_img)\n return out" }, { "identifier": "load_fg_points", "path": "commons/draw.py", "snippet": "def load_fg_points(img_mask, resolution=None, normalize=False, device='cuda'):\n # returns points in XY format\n if resolution is None:\n resolution = img_mask.size(-1)\n us = vs = torch.arange(resolution)\n us, vs = torch.meshgrid(us, vs, indexing='xy')\n points = torch.stack([us.reshape(-1), vs.reshape(-1)]).permute(1, 0)\n points = points.unsqueeze(0).expand(img_mask.size(0), -1, -1)\n points = points.to(device)\n\n img_mask = img_mask.float()\n if len(img_mask.shape) == 3:\n img_mask = img_mask.unsqueeze(1)\n scale_factor = resolution / img_mask.size(2)\n if resolution != img_mask.size(2): # resize the mask:\n img_mask = F.interpolate(img_mask, scale_factor=scale_factor,\n mode='bilinear')\n\n img_mask = img_mask.squeeze(1)\n points_alpha = img_mask.reshape(img_mask.size(0), -1)\n points = points / (resolution-1)\n if not normalize:\n points *= (img_mask.size(2)/scale_factor-1)\n\n colors = color_wheel_fast_smooth(resolution).to(device)\n colors = colors.reshape(1, -1, 3).expand(img_mask.size(0), -1, -1)\n\n return points, points_alpha, colors" }, { "identifier": "concat_v", "path": "commons/draw.py", "snippet": "def concat_v(*argv, pad=0):\n width = 0\n height = 0\n count = len(argv)\n\n for img in argv:\n height += img.height\n width = max(width, img.width)\n\n dst = Image.new('RGB', (width, height + (count-1)*pad))\n start = 0\n for i, img in enumerate(argv):\n dst.paste(img, (0, start))\n start += img.height + pad\n return dst" }, { "identifier": "get_colors", "path": "commons/draw.py", "snippet": "def get_colors(N):\n # colors = torch.tensor(sns.color_palette(n_colors=N))\n if N > 15:\n cmap = plt.get_cmap('tab10')\n else:\n cmap = ListedColormap([\n \"red\", \"yellow\", \"blue\", \"lime\", \"magenta\", \"indigo\", \"orange\",\n \"cyan\", \"darkgreen\", \"maroon\", \"black\", \"white\", \"chocolate\",\n \"gray\", \"blueviolet\"])\n colors = np.array([cmap(x)[:3] for x in range(N)])\n\n return colors" }, { "identifier": "get_dense_colors", "path": "commons/draw.py", "snippet": "def get_dense_colors(points, resolution=256):\n colors = color_wheel_fast_smooth(resolution)\n if len(points.shape) == 2:\n return colors[points[:, 0], points[:, 1]]\n else:\n device = points.device\n N = len(points)\n colors = colors.permute(2, 0, 1).unsqueeze(0).expand(N, -1, -1, -1)\n points = map_minmax(points, 0, resolution-1, -1, 1).unsqueeze(-2)\n colors = F.grid_sample(colors.to(device), points, align_corners=False)\n return colors.squeeze(-1).permute(0, 2, 1)" }, { "identifier": "load_text_points", "path": "commons/draw.py", "snippet": "def load_text_points(text, pos=None, size=20, rot=0, img_size=256, colorscale='turbo'):\n # Measure the text area\n # font = ImageFont.truetype (r'Roboto-Bold.ttf', size)\n font = ImageFont.load_default()\n wi, hi = font.getbbox(text)[2:]\n\n # Create a dummy source image\n into = Image.new('1', (img_size, img_size), 0)\n # Copy the relevant area from the source image\n if pos is None:\n pos = (img_size // 2 - wi // 2, img_size // 2 - hi // 2)\n img = into.crop((pos[0], pos[1], pos[0] + wi, pos[1] + hi))\n\n # Print into the rotated area\n d = ImageDraw.Draw(img)\n d.text((0, 0), text, font=font, fill = (1))\n\n # Rotate it forward again\n img = img.rotate(rot, expand=1)\n\n # Insert it back into the source image\n into.paste(img, pos)\n text_points = np.where(np.array(into)>0)\n text_points = np.stack(text_points).transpose(1, 0)[:, [1, 0]]\n text_points = torch.from_numpy(np.ascontiguousarray(text_points)).float()\n text_colors = get_plotly_colors(len(text_points), colorscale).squeeze()\n return text_points, text_colors" }, { "identifier": "color_wheel_fast_smooth", "path": "thirdparty/colormap/colormap_flow.py", "snippet": "def color_wheel_fast_smooth(resolution=512, subdivision=16):\n lim = sqrt(2)\n colorwheel = expand_color_wheel(subdivision)\n N = colorwheel.shape[0]\n xs = torch.linspace(-1, 1, steps=resolution)\n ys = torch.linspace(-1, 1, steps=resolution)\n x, y = torch.meshgrid(xs, ys, indexing='xy')\n r = torch.sqrt(x*x + y*y) # (0, sqrt(2)]\n # https://math.stackexchange.com/questions/1327253/how-do-we-find-out-angle-from-x-y-coordinates\n theta = 2 * torch.arctan(-y / (-x+r)) + PI # [0, 2*PI]\n\n # Already got interpolated theta\n # Interpolate theta\n theta_ind = theta / (2*PI) * (N-1) # [0, N-1]\n theta_ind = torch.round(theta_ind).long()\n color = colorwheel[theta_ind]\n\n # Interpolate radius\n r = (r / lim).unsqueeze(-1)\n color = color * r + torch.ones(resolution, resolution, 3) * (1-r)\n # color = (color.numpy() * 255).astype(np.uint8)\n return color # HWC" } ]
from torch.utils.tensorboard.writer import SummaryWriter from PIL import Image from commons.utils import images2grid, map_minmax, compute_pck, sample_tuples, \ pck_loop from commons.draw import splat_points, load_fg_points, \ concat_v, get_colors, get_dense_colors, load_text_points from thirdparty.colormap.colormap_flow import color_wheel_fast_smooth import torch import torch.nn.functional as F import wandb import numpy as np
4,335
@torch.inference_mode() def log_visuals(canon, stn, dset, train_idx, writer, vis_sample=2, vis_denseres=32): device = 'cuda' if torch.cuda.is_available() else 'cpu' pseudo_kps = dset.pseudo_kps parts = dset.parts vis_sample = min(vis_sample, len(dset)) res = dset.img_size has_gt_kp = dset.kps is not None has_fixed_pairs = dset.fixed_pairs is not None # SPair # Run full test dataloader (assuming small dataset) all_imgs = dset.imgs all_masks = dset.masks all_kps = dset.kps all_flows, _ = stn(all_imgs) if has_gt_kp:
@torch.inference_mode() def log_visuals(canon, stn, dset, train_idx, writer, vis_sample=2, vis_denseres=32): device = 'cuda' if torch.cuda.is_available() else 'cpu' pseudo_kps = dset.pseudo_kps parts = dset.parts vis_sample = min(vis_sample, len(dset)) res = dset.img_size has_gt_kp = dset.kps is not None has_fixed_pairs = dset.fixed_pairs is not None # SPair # Run full test dataloader (assuming small dataset) all_imgs = dset.imgs all_masks = dset.masks all_kps = dset.kps all_flows, _ = stn(all_imgs) if has_gt_kp:
kps_cols = torch.from_numpy(get_colors(all_kps.size(1))).float()
8
2023-11-14 16:43:16+00:00
8k
AnonymGiant/ViLaM
lavis/runners/runner_iter.py
[ { "identifier": "download_cached_file", "path": "lavis/common/dist_utils.py", "snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.\n \"\"\"\n\n def get_cached_file_path():\n # a hack to sync the file path across processes\n parts = torch.hub.urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(timm_hub.get_cache_dir(), filename)\n\n return cached_file\n\n if is_main_process():\n timm_hub.download_cached_file(url, check_hash, progress)\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n return get_cached_file_path()" }, { "identifier": "is_main_process", "path": "lavis/common/dist_utils.py", "snippet": "def is_main_process():\n return get_rank() == 0" }, { "identifier": "main_process", "path": "lavis/common/dist_utils.py", "snippet": "def main_process(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rank, _ = get_dist_info()\n if rank == 0:\n return func(*args, **kwargs)\n\n return wrapper" }, { "identifier": "registry", "path": "lavis/common/registry.py", "snippet": "class Registry:\n def register_builder(cls, name):\n def wrap(builder_cls):\n def register_task(cls, name):\n def wrap(task_cls):\n def register_model(cls, name):\n def wrap(model_cls):\n def register_processor(cls, name):\n def wrap(processor_cls):\n def register_lr_scheduler(cls, name):\n def wrap(lr_sched_cls):\n def register_runner(cls, name):\n def wrap(runner_cls):\n def register_path(cls, name, path):\n def register(cls, name, obj):\n def get_builder_class(cls, name):\n def get_model_class(cls, name):\n def get_task_class(cls, name):\n def get_processor_class(cls, name):\n def get_lr_scheduler_class(cls, name):\n def get_runner_class(cls, name):\n def list_runners(cls):\n def list_models(cls):\n def list_tasks(cls):\n def list_processors(cls):\n def list_lr_schedulers(cls):\n def list_datasets(cls):\n def get_path(cls, name):\n def get(cls, name, default=None, no_warning=False):\n def unregister(cls, name):" }, { "identifier": "is_url", "path": "lavis/common/utils.py", "snippet": "def is_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\")" }, { "identifier": "concat_datasets", "path": "lavis/datasets/data_utils.py", "snippet": "def concat_datasets(datasets):\n \"\"\"\n Concatenates multiple datasets into a single dataset.\n\n It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support\n generic IterableDataset because it requires creating separate samplers.\n\n Now only supports conctenating training datasets and assuming validation and testing\n have only a single dataset. This is because metrics should not be computed on the concatenated\n datasets.\n\n Args:\n datasets: dict of torch.utils.data.Dataset objects by split.\n\n Returns:\n Dict of concatenated datasets by split, \"train\" is the concatenation of multiple datasets,\n \"val\" and \"test\" remain the same.\n\n If the input training datasets contain both map-style and DataPipeline datasets, returns\n a tuple, where the first element is a concatenated map-style dataset and the second\n element is a chained DataPipeline dataset.\n\n \"\"\"\n # concatenate datasets in the same split\n for split_name in datasets:\n if split_name != \"train\":\n assert (\n len(datasets[split_name]) == 1\n ), \"Do not support multiple {} datasets.\".format(split_name)\n datasets[split_name] = datasets[split_name][0]\n else:\n iterable_datasets, map_datasets = [], []\n for dataset in datasets[split_name]:\n if isinstance(dataset, wds.DataPipeline):\n logging.info(\n \"Dataset {} is IterableDataset, can't be concatenated.\".format(\n dataset\n )\n )\n iterable_datasets.append(dataset)\n elif isinstance(dataset, IterableDataset):\n raise NotImplementedError(\n \"Do not support concatenation of generic IterableDataset.\"\n )\n else:\n map_datasets.append(dataset)\n\n # if len(iterable_datasets) > 0:\n # concatenate map-style datasets and iterable-style datasets separately\n chained_datasets = (\n ChainDataset(iterable_datasets) if len(iterable_datasets) > 0 else None\n )\n concat_datasets = (\n ConcatDataset(map_datasets) if len(map_datasets) > 0 else None\n )\n\n train_datasets = concat_datasets, chained_datasets\n train_datasets = tuple([x for x in train_datasets if x is not None])\n train_datasets = (\n train_datasets[0] if len(train_datasets) == 1 else train_datasets\n )\n\n datasets[split_name] = train_datasets\n\n return datasets" }, { "identifier": "reorg_datasets_by_split", "path": "lavis/datasets/data_utils.py", "snippet": "def reorg_datasets_by_split(datasets):\n \"\"\"\n Organizes datasets by split.\n\n Args:\n datasets: dict of torch.utils.data.Dataset objects by name.\n\n Returns:\n Dict of datasets by split {split_name: List[Datasets]}.\n \"\"\"\n # if len(datasets) == 1:\n # return datasets[list(datasets.keys())[0]]\n # else:\n reorg_datasets = dict()\n\n # reorganize by split\n for _, dataset in datasets.items():\n for split_name, dataset_split in dataset.items():\n if split_name not in reorg_datasets:\n reorg_datasets[split_name] = [dataset_split]\n else:\n reorg_datasets[split_name].append(dataset_split)\n\n return reorg_datasets" }, { "identifier": "RunnerBase", "path": "lavis/runners/runner_base.py", "snippet": "class RunnerBase:\n \"\"\"\n A runner class to train and evaluate a model given a task and datasets.\n\n The runner uses pytorch distributed data parallel by default. Future release\n will support other distributed frameworks.\n \"\"\"\n\n def __init__(self, cfg, task, model, datasets, job_id):\n self.config = cfg\n self.job_id = job_id\n\n self.task = task\n self.datasets = datasets\n\n self._model = model\n\n self._wrapped_model = None\n self._device = None\n self._optimizer = None\n self._scaler = None\n self._dataloaders = None\n self._lr_sched = None\n\n self.start_epoch = 0\n\n # self.setup_seeds()\n self.setup_output_dir()\n\n @property\n def device(self):\n if self._device is None:\n self._device = torch.device(self.config.run_cfg.device)\n\n return self._device\n\n @property\n def use_distributed(self):\n return self.config.run_cfg.distributed\n\n @property\n def model(self):\n \"\"\"\n A property to get the DDP-wrapped model on the device.\n \"\"\"\n # move model to device\n if self._model.device != self.device:\n self._model = self._model.to(self.device)\n\n # distributed training wrapper\n if self.use_distributed:\n if self._wrapped_model is None:\n self._wrapped_model = DDP(\n self._model, device_ids=[self.config.run_cfg.gpu]\n )\n else:\n self._wrapped_model = self._model\n\n return self._wrapped_model\n\n @property\n def optimizer(self):\n # TODO make optimizer class and configurations\n if self._optimizer is None:\n lr_scale = self.config.run_cfg.get(\"lr_layer_decay\", 1)\n weight_decay = self.config.run_cfg.get(\"weight_decay\", 0.05)\n optim_params = self._model.get_optimizer_params(weight_decay,lr_scale)\n\n num_parameters = 0\n for p_group in optim_params:\n for p in p_group[\"params\"]:\n num_parameters += p.data.nelement() \n logging.info(\"number of trainable parameters: {}\".format(num_parameters)) \n \n beta2 = self.config.run_cfg.get(\"beta2\", 0.999)\n\n self._optimizer = torch.optim.AdamW(\n optim_params,\n lr=float(self.config.run_cfg.init_lr),\n betas=(0.9, beta2),\n ) \n return self._optimizer\n\n @property\n def scaler(self):\n amp = self.config.run_cfg.get(\"amp\", False)\n\n if amp:\n if self._scaler is None:\n self._scaler = torch.cuda.amp.GradScaler()\n\n return self._scaler\n\n @property\n def lr_scheduler(self):\n \"\"\"\n A property to get and create learning rate scheduler by split just in need.\n \"\"\"\n if self._lr_sched is None:\n lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched)\n\n # max_epoch = self.config.run_cfg.max_epoch\n max_epoch = self.max_epoch\n # min_lr = self.config.run_cfg.min_lr\n min_lr = self.min_lr\n # init_lr = self.config.run_cfg.init_lr\n init_lr = self.init_lr\n\n # optional parameters\n decay_rate = self.config.run_cfg.get(\"lr_decay_rate\", None)\n warmup_start_lr = self.config.run_cfg.get(\"warmup_lr\", -1)\n warmup_steps = self.config.run_cfg.get(\"warmup_steps\", 0)\n\n self._lr_sched = lr_sched_cls(\n optimizer=self.optimizer,\n max_epoch=max_epoch,\n min_lr=min_lr,\n init_lr=init_lr,\n decay_rate=decay_rate,\n warmup_start_lr=warmup_start_lr,\n warmup_steps=warmup_steps,\n )\n\n return self._lr_sched\n\n @property\n def dataloaders(self) -> dict:\n \"\"\"\n A property to get and create dataloaders by split just in need.\n\n If no train_dataset_ratio is provided, concatenate map-style datasets and\n chain wds.DataPipe datasets separately. Training set becomes a tuple\n (ConcatDataset, ChainDataset), both are optional but at least one of them is\n required. The resultant ConcatDataset and ChainDataset will be sampled evenly.\n\n If train_dataset_ratio is provided, create a MultiIterLoader to sample\n each dataset by ratios during training.\n\n Currently do not support multiple datasets for validation and test.\n\n Returns:\n dict: {split_name: (tuples of) dataloader}\n \"\"\"\n if self._dataloaders is None:\n # reoganize datasets by split and concatenate/chain if necessary\n dataset_ratios = self.config.run_cfg.get(\"train_dataset_ratios\", None)\n\n # concatenate map-style datasets and chain wds.DataPipe datasets separately\n # training set becomes a tuple (ConcatDataset, ChainDataset), both are\n # optional but at least one of them is required. The resultant ConcatDataset\n # and ChainDataset will be sampled evenly.\n logging.info(\n \"dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline).\"\n )\n\n datasets = reorg_datasets_by_split(self.datasets)\n self.datasets = concat_datasets(datasets)\n\n # print dataset statistics after concatenation/chaining\n for split_name in self.datasets:\n if isinstance(self.datasets[split_name], tuple) or isinstance(\n self.datasets[split_name], list\n ):\n # mixed wds.DataPipeline and torch.utils.data.Dataset\n num_records = sum(\n [\n len(d)\n if not type(d) in [wds.DataPipeline, ChainDataset]\n else 0\n for d in self.datasets[split_name]\n ]\n )\n\n else:\n if hasattr(self.datasets[split_name], \"__len__\"):\n # a single map-style dataset\n num_records = len(self.datasets[split_name])\n else:\n # a single wds.DataPipeline\n num_records = -1\n logging.info(\n \"Only a single wds.DataPipeline dataset, no __len__ attribute.\"\n )\n\n if num_records >= 0:\n logging.info(\n \"Loaded {} records for {} split from the dataset.\".format(\n num_records, split_name\n )\n )\n\n # create dataloaders\n split_names = sorted(self.datasets.keys())\n\n datasets = [self.datasets[split] for split in split_names]\n is_trains = [split in self.train_splits for split in split_names]\n\n batch_sizes = [\n self.config.run_cfg.batch_size_train\n if split == \"train\"\n else self.config.run_cfg.batch_size_eval\n for split in split_names\n ]\n\n collate_fns = []\n for dataset in datasets:\n if isinstance(dataset, tuple) or isinstance(dataset, list):\n collate_fns.append([getattr(d, \"collater\", None) for d in dataset])\n else:\n collate_fns.append(getattr(dataset, \"collater\", None))\n\n dataloaders = self.create_loaders(\n datasets=datasets,\n num_workers=self.config.run_cfg.num_workers,\n batch_sizes=batch_sizes,\n is_trains=is_trains,\n collate_fns=collate_fns,\n dataset_ratios=dataset_ratios,\n )\n\n self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)}\n\n return self._dataloaders\n\n @property\n def cuda_enabled(self):\n return self.device.type == \"cuda\"\n\n @property\n def max_epoch(self):\n return int(self.config.run_cfg.max_epoch)\n\n @property\n def log_freq(self):\n log_freq = self.config.run_cfg.get(\"log_freq\", 50)\n return int(log_freq)\n\n @property\n def init_lr(self):\n return float(self.config.run_cfg.init_lr)\n\n @property\n def min_lr(self):\n return float(self.config.run_cfg.min_lr)\n\n @property\n def accum_grad_iters(self):\n return int(self.config.run_cfg.get(\"accum_grad_iters\", 1))\n\n @property\n def valid_splits(self):\n valid_splits = self.config.run_cfg.get(\"valid_splits\", [])\n\n if len(valid_splits) == 0:\n logging.info(\"No validation splits found.\")\n\n return valid_splits\n\n @property\n def test_splits(self):\n test_splits = self.config.run_cfg.get(\"test_splits\", [])\n\n return test_splits\n\n @property\n def train_splits(self):\n train_splits = self.config.run_cfg.get(\"train_splits\", [])\n\n if len(train_splits) == 0:\n logging.info(\"Empty train splits.\")\n\n return train_splits\n\n @property\n def evaluate_only(self):\n \"\"\"\n Set to True to skip training.\n \"\"\"\n return self.config.run_cfg.evaluate\n\n @property\n def use_dist_eval_sampler(self):\n return self.config.run_cfg.get(\"use_dist_eval_sampler\", True)\n\n @property\n def resume_ckpt_path(self):\n return self.config.run_cfg.get(\"resume_ckpt_path\", None)\n\n @property\n def train_loader(self):\n train_dataloader = self.dataloaders[\"train\"]\n\n return train_dataloader\n\n def setup_output_dir(self):\n lib_root = Path(registry.get_path(\"library_root\"))\n\n output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id\n result_dir = output_dir / \"result\"\n\n output_dir.mkdir(parents=True, exist_ok=True)\n result_dir.mkdir(parents=True, exist_ok=True)\n\n registry.register_path(\"result_dir\", str(result_dir))\n registry.register_path(\"output_dir\", str(output_dir))\n\n self.result_dir = result_dir\n self.output_dir = output_dir\n\n def train(self):\n start_time = time.time()\n best_agg_metric = 0\n best_epoch = 0\n\n self.log_config()\n\n # resume from checkpoint if specified\n if not self.evaluate_only and self.resume_ckpt_path is not None:\n self._load_checkpoint(self.resume_ckpt_path)\n\n for cur_epoch in range(self.start_epoch, self.max_epoch):\n # training phase\n if not self.evaluate_only:\n logging.info(\"Start training\")\n train_stats = self.train_epoch(cur_epoch)\n self.log_stats(split_name=\"train\", stats=train_stats)\n \n self._save_checkpoint(cur_epoch, is_best=False)\n\n\n # evaluation phase\n if len(self.valid_splits) > 0:\n for split_name in self.valid_splits:\n logging.info(\"Evaluating on {}.\".format(split_name))\n\n val_log = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch\n )\n # if val_log is not None:\n # if is_main_process():\n # assert (\n # \"agg_metrics\" in val_log\n # ), \"No agg_metrics found in validation log.\"\n\n # agg_metrics = val_log[\"agg_metrics\"]\n # if agg_metrics > best_agg_metric and split_name == \"val\":\n # best_epoch, best_agg_metric = cur_epoch, agg_metrics\n\n # self._save_checkpoint(cur_epoch, is_best=True)\n\n # val_log.update({\"best_epoch\": best_epoch})\n # self.log_stats(val_log, split_name)\n\n else:\n # if no validation split is provided, we just save the checkpoint at the end of each epoch.\n if not self.evaluate_only:\n self._save_checkpoint(cur_epoch, is_best=False)\n\n if self.evaluate_only:\n break\n\n dist.barrier()\n\n # testing phase\n test_epoch = \"best\" if len(self.valid_splits) > 0 else cur_epoch\n self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n logging.info(\"Training time {}\".format(total_time_str))\n\n def evaluate(self, cur_epoch=\"best\", skip_reload=False):\n test_logs = dict()\n\n if len(self.test_splits) > 0:\n for split_name in self.test_splits:\n test_logs[split_name] = self.eval_epoch(\n split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload\n )\n\n return test_logs\n\n def train_epoch(self, epoch):\n # train\n self.model.train()\n\n return self.task.train_epoch(\n epoch=epoch,\n model=self.model,\n data_loader=self.train_loader,\n optimizer=self.optimizer,\n scaler=self.scaler,\n lr_scheduler=self.lr_scheduler,\n cuda_enabled=self.cuda_enabled,\n log_freq=self.log_freq,\n accum_grad_iters=self.accum_grad_iters,\n )\n\n @torch.no_grad()\n def eval_epoch(self, split_name, cur_epoch, skip_reload=False):\n \"\"\"\n Evaluate the model on a given split.\n\n Args:\n split_name (str): name of the split to evaluate on.\n cur_epoch (int): current epoch.\n skip_reload_best (bool): whether to skip reloading the best checkpoint.\n During training, we will reload the best checkpoint for validation.\n During testing, we will use provided weights and skip reloading the best checkpoint .\n \"\"\"\n data_loader = self.dataloaders.get(split_name, None)\n assert data_loader, \"data_loader for split {} is None.\".format(split_name)\n\n # TODO In validation, you need to compute loss as well as metrics\n # TODO consider moving to model.before_evaluation()\n model = self.unwrap_dist_model(self.model)\n if not skip_reload and cur_epoch == \"best\":\n model = self._reload_best_model(model)\n model.eval()\n\n self.task.before_evaluation(\n model=model,\n dataset=self.datasets[split_name],\n )\n results = self.task.evaluation(model, data_loader)\n\n if results is not None:\n return self.task.after_evaluation(\n val_result=results,\n split_name=split_name,\n epoch=cur_epoch,\n )\n\n def unwrap_dist_model(self, model):\n if self.use_distributed:\n return model.module\n else:\n return model\n\n def create_loaders(\n self,\n datasets,\n num_workers,\n batch_sizes,\n is_trains,\n collate_fns,\n dataset_ratios=None,\n ):\n \"\"\"\n Create dataloaders for training and validation.\n \"\"\"\n\n def _create_loader(dataset, num_workers, bsz, is_train, collate_fn):\n # create a single dataloader for each split\n if isinstance(dataset, ChainDataset) or isinstance(\n dataset, wds.DataPipeline\n ):\n # wds.WebdDataset instance are chained together\n # webdataset.DataPipeline has its own sampler and collate_fn\n loader = iter(\n DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n )\n )\n else:\n # map-style dataset are concatenated together\n # setup distributed sampler\n if self.use_distributed:\n sampler = DistributedSampler(\n dataset,\n shuffle=is_train,\n num_replicas=get_world_size(),\n rank=get_rank(),\n )\n if not self.use_dist_eval_sampler:\n # e.g. retrieval evaluation\n sampler = sampler if is_train else None\n else:\n sampler = None\n\n loader = DataLoader(\n dataset,\n batch_size=bsz,\n num_workers=num_workers,\n pin_memory=True,\n sampler=sampler,\n shuffle=sampler is None and is_train,\n collate_fn=collate_fn,\n drop_last=True if is_train else False,\n )\n loader = PrefetchLoader(loader)\n\n if is_train:\n loader = IterLoader(loader, use_distributed=self.use_distributed)\n\n return loader\n\n loaders = []\n\n for dataset, bsz, is_train, collate_fn in zip(\n datasets, batch_sizes, is_trains, collate_fns\n ):\n if isinstance(dataset, list) or isinstance(dataset, tuple):\n loader = MultiIterLoader(\n loaders=[\n _create_loader(d, num_workers, bsz, is_train, collate_fn[i])\n for i, d in enumerate(dataset)\n ],\n ratios=dataset_ratios,\n )\n else:\n loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn)\n\n loaders.append(loader)\n\n return loaders\n\n @main_process\n def _save_checkpoint(self, cur_epoch, is_best=False):\n \"\"\"\n Save the checkpoint at the current epoch.\n \"\"\"\n model_no_ddp = self.unwrap_dist_model(self.model)\n param_grad_dic = {\n k: v.requires_grad for (k, v) in model_no_ddp.named_parameters()\n }\n state_dict = model_no_ddp.state_dict()\n for k in list(state_dict.keys()):\n if k in param_grad_dic.keys() and not param_grad_dic[k]:\n # delete parameters that do not require gradient\n del state_dict[k]\n save_obj = {\n \"model\": state_dict,\n \"optimizer\": self.optimizer.state_dict(),\n \"config\": self.config.to_dict(),\n \"scaler\": self.scaler.state_dict() if self.scaler else None,\n \"epoch\": cur_epoch,\n }\n save_to = os.path.join(\n self.output_dir,\n \"checkpoint_{}.pth\".format(\"best\" if is_best else cur_epoch),\n )\n logging.info(\"Saving checkpoint at epoch {} to {}.\".format(cur_epoch, save_to))\n torch.save(save_obj, save_to)\n\n def _reload_best_model(self, model):\n \"\"\"\n Load the best checkpoint for evaluation.\n \"\"\"\n checkpoint_path = os.path.join(self.output_dir, \"checkpoint_best.pth\")\n\n logging.info(\"Loading checkpoint from {}.\".format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n try:\n model.load_state_dict(checkpoint[\"model\"])\n except RuntimeError as e:\n logging.warning(\n \"\"\"\n Key mismatch when loading checkpoint. This is expected if only part of the model is saved.\n Trying to load the model with strict=False.\n \"\"\"\n )\n model.load_state_dict(checkpoint[\"model\"], strict=False)\n return model\n\n def _load_checkpoint(self, url_or_filename):\n \"\"\"\n Resume from a checkpoint.\n \"\"\"\n if is_url(url_or_filename):\n cached_file = download_cached_file(\n url_or_filename, check_hash=False, progress=True\n )\n checkpoint = torch.load(cached_file, map_location=self.device)\n elif os.path.isfile(url_or_filename):\n checkpoint = torch.load(url_or_filename, map_location=self.device)\n else:\n raise RuntimeError(\"checkpoint url or path is invalid\")\n\n state_dict = checkpoint[\"model\"]\n self.unwrap_dist_model(self.model).load_state_dict(state_dict)\n\n self.optimizer.load_state_dict(checkpoint[\"optimizer\"])\n if self.scaler and \"scaler\" in checkpoint:\n self.scaler.load_state_dict(checkpoint[\"scaler\"])\n\n self.start_epoch = checkpoint[\"epoch\"] + 1\n logging.info(\"Resume checkpoint from {}\".format(url_or_filename))\n\n @main_process\n def log_stats(self, stats, split_name):\n if isinstance(stats, dict):\n log_stats = {**{f\"{split_name}_{k}\": v for k, v in stats.items()}}\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n elif isinstance(stats, list):\n pass\n\n @main_process\n def log_config(self):\n with open(os.path.join(self.output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(self.config.to_dict(), indent=4) + \"\\n\")" } ]
import datetime import logging import os import time import torch import torch.distributed as dist import webdataset as wds from lavis.common.dist_utils import download_cached_file, is_main_process, main_process from lavis.common.registry import registry from lavis.common.utils import is_url from lavis.datasets.data_utils import concat_datasets, reorg_datasets_by_split from lavis.runners.runner_base import RunnerBase from torch.utils.data.dataset import ChainDataset
7,109
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ @registry.register_runner("runner_iter") class RunnerIter(RunnerBase): """ Run training based on the number of iterations. This is common when the training dataset size is large. Underhood logic is similar to epoch-based training by considering every #iters_per_inner_epoch as an inner epoch. In iter-based runner, after every #iters_per_inner_epoch steps, we 1) do a validation epoch; 2) schedule the learning rate; 3) save the checkpoint. We refer every #iters_per_inner_epoch steps as an inner epoch. """ def __init__(self, cfg, task, model, datasets, job_id): super().__init__(cfg, task, model, datasets, job_id) self.start_iters = 0 self.max_iters = int(self.config.run_cfg.get("max_iters", -1)) assert self.max_iters > 0, "max_iters must be greater than 0." self.iters_per_inner_epoch = int( self.config.run_cfg.get("iters_per_inner_epoch", -1) ) assert ( self.iters_per_inner_epoch > 0 ), "iters_per_inner_epoch must be greater than 0." @property def max_epoch(self): return int(self.max_iters / self.iters_per_inner_epoch) @property def cur_epoch(self): try: return self.train_loader.epoch except AttributeError: # pipeline data (e.g. LAION) is streaming, have no concept of epoch return 0 def _progress(self, cur_iters): return "{}_iters={}".format(self.cur_epoch, cur_iters) def train(self): start_time = time.time() best_agg_metric = 0 best_iters = 0 self.log_config() # resume from checkpoint if specified if not self.evaluate_only and self.resume_ckpt_path is not None: self._load_checkpoint(self.resume_ckpt_path) for start_iters in range( self.start_iters, self.max_iters, self.iters_per_inner_epoch ): end_iters = start_iters + self.iters_per_inner_epoch # training phase if not self.evaluate_only: logging.info( "Start training, max_iters={}, in total {} inner epochs.".format( self.max_iters, int(self.max_iters / self.iters_per_inner_epoch) ) ) train_stats = self.train_iters(self.cur_epoch, start_iters) self.log_stats(split_name="train", stats=train_stats) self._save_checkpoint(end_iters, is_best=False) # evaluation phase if len(self.valid_splits) > 0: for split_name in self.valid_splits: logging.info("Evaluating on {}.".format(split_name)) val_log = self.eval_epoch( split_name=split_name, cur_epoch=self._progress(end_iters) ) if val_log is not None:
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ @registry.register_runner("runner_iter") class RunnerIter(RunnerBase): """ Run training based on the number of iterations. This is common when the training dataset size is large. Underhood logic is similar to epoch-based training by considering every #iters_per_inner_epoch as an inner epoch. In iter-based runner, after every #iters_per_inner_epoch steps, we 1) do a validation epoch; 2) schedule the learning rate; 3) save the checkpoint. We refer every #iters_per_inner_epoch steps as an inner epoch. """ def __init__(self, cfg, task, model, datasets, job_id): super().__init__(cfg, task, model, datasets, job_id) self.start_iters = 0 self.max_iters = int(self.config.run_cfg.get("max_iters", -1)) assert self.max_iters > 0, "max_iters must be greater than 0." self.iters_per_inner_epoch = int( self.config.run_cfg.get("iters_per_inner_epoch", -1) ) assert ( self.iters_per_inner_epoch > 0 ), "iters_per_inner_epoch must be greater than 0." @property def max_epoch(self): return int(self.max_iters / self.iters_per_inner_epoch) @property def cur_epoch(self): try: return self.train_loader.epoch except AttributeError: # pipeline data (e.g. LAION) is streaming, have no concept of epoch return 0 def _progress(self, cur_iters): return "{}_iters={}".format(self.cur_epoch, cur_iters) def train(self): start_time = time.time() best_agg_metric = 0 best_iters = 0 self.log_config() # resume from checkpoint if specified if not self.evaluate_only and self.resume_ckpt_path is not None: self._load_checkpoint(self.resume_ckpt_path) for start_iters in range( self.start_iters, self.max_iters, self.iters_per_inner_epoch ): end_iters = start_iters + self.iters_per_inner_epoch # training phase if not self.evaluate_only: logging.info( "Start training, max_iters={}, in total {} inner epochs.".format( self.max_iters, int(self.max_iters / self.iters_per_inner_epoch) ) ) train_stats = self.train_iters(self.cur_epoch, start_iters) self.log_stats(split_name="train", stats=train_stats) self._save_checkpoint(end_iters, is_best=False) # evaluation phase if len(self.valid_splits) > 0: for split_name in self.valid_splits: logging.info("Evaluating on {}.".format(split_name)) val_log = self.eval_epoch( split_name=split_name, cur_epoch=self._progress(end_iters) ) if val_log is not None:
if is_main_process():
1
2023-11-14 08:57:59+00:00
8k
MorrisNein/pecapiku
tests/unit/interface/test_cache_classes.py
[ { "identifier": "CacheDict", "path": "pecapiku/cache_dict.py", "snippet": "class CacheDict(BaseCache, Generic[DecoratedCallable]):\n \"\"\" Decorator/context manager for caching of evaluation results.\n Creates a \"pickle\" file at disk space on a specified path.\n\n If used as a context, provides a dictionary to put/read values in.\n To do so, use the syntax \"with *instance*: ...\".\n\n If used as a decorator, wraps a function and stores its execution results in s dictionary.\n To do so, use the method ``CacheDict.decorate()``.\n\n Args:\n\n file_path - a path to an existing or non-existent pickle file.\n If a relative path or a filename is given, puts it into the framework cache directory.\n\n access - cache access indicators. The string may include the following indicators:\n - ``r`` - read - grants access to read the cache file content\n - ``e`` - execute/evaluate - grants access to evaluate the decorated function (if such is present)\n - ``w`` - write - grants access to modify the cache file content\n\n Examples\n --------\n Example 1:\n\n >>> with CacheDict('example_cache_dict.pkl') as cache_dict:\n ... x = np.array([[1, 2], [3, 4]])\n ... x_T = cache_dict['x_T'] # Read the cache first\n ... if isinstance(x_T, NoCache): # If cache not found,\n ... x_T = x.T # then execute the value\n ... cache_dict['x_T'] = x_T # Put the value in cache\n ... print(cache_dict)\n ...\n {'x_T': array([[1, 3],\n [2, 4]])}\n\n Example 2:\n\n >>> import numpy as np\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6], [7, 8]])\n >>> cached_mult = CacheDict.decorate(np.multiply,file_path='np_multiplication.pkl') # Retrieve hashable representation of args.\n ...\n >>> cached_mult(a, b)\n array([[ 5, 12],\n [21, 32]])\n\n \"\"\"\n\n @classmethod\n def _get_default_file_path(cls):\n return COMP_CACHE_FILE_NAME\n\n def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'):\n super().__init__(file_path, access)\n self.cache_dict = None\n\n def __call__(self,\n func: DecoratedCallable | None = None,\n outer_key: Hashable | None = None,\n inner_key: str | Callable[[Any], Hashable] | None = None) -> DecoratedCallable | Decorator:\n return self.decorate(func=func, outer_key=outer_key, inner_key=inner_key)\n\n def _get_cache_val(self, key: Hashable) -> Any:\n initialize_cache_dict(self.file_path)\n return self.cache_dict[key]\n\n def _put_cache_val(self, key: Hashable, value: Any) -> None:\n self.cache_dict[key] = value\n\n def _key_func(self, func, func_agrs, func_kwargs, inner_key, outer_key) -> Hashable:\n if outer_key is not None:\n key = outer_key\n elif inner_key is not None:\n key = parse_key(inner_key, func, *func_agrs, **func_kwargs)\n else:\n hash_objects = [func.__name__, func_agrs, func_kwargs]\n\n if ismethod(func):\n hash_objects.insert(0, func.__self__)\n\n key = get_hash(hash_objects)\n return key\n\n @classmethod\n def _decorate(cls,\n func: DecoratedCallable | None = None,\n file_path: os.PathLike | str | None = None,\n access: CacheAccess = 'rew',\n outer_key: Hashable | None = None,\n inner_key: str | Callable[[Any], Hashable] | None = None) -> DecoratedCallable | Decorator:\n if outer_key is not None and inner_key is not None:\n raise ValueError('At most one of (outer key, inner key) can be specified.')\n\n file_path = _resolve_filepath(file_path)\n\n @wraps(func)\n def decorated(*args, **kwargs):\n instance = cls(file_path, access)\n with instance:\n val = instance._read_execute_write(func, func_args=args, func_kwargs=kwargs, access=access,\n key_kwargs=dict(outer_key=outer_key, inner_key=inner_key))\n return val\n if func is None:\n decorator_return = partial(\n cls._decorate,\n file_path=file_path,\n access=access,\n outer_key=outer_key,\n inner_key=inner_key)\n else:\n decorator_return = decorated\n return decorator_return\n\n @omnimethod\n def decorate(self, func: DecoratedCallable | None = None,\n file_path: os.PathLike | str | None = None,\n access: CacheAccess | None = None,\n outer_key: Hashable | None = None,\n inner_key: str | Callable[[Any], Hashable] | None = None) -> DecoratedCallable | Decorator:\n \"\"\" Wraps a function and stores its execution results into a pickled cache dictionary.\n\n Examples:\n\n >>> import numpy as np\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6], [7, 8]])\n >>> cached_mult = CacheDict.decorate(\n ... np.multiply,\n ... file_path='np_multiplication.pkl',\n ... inner_key='tuple(map(lambda a: a.data.tobytes(), args))')\n ...\n >>> cached_mult(a, b)\n array([[ 5, 12],\n [21, 32]])\n\n >>> import time\n >>> def do_some_heavy_computing(how_heavy):\n ... time.sleep(how_heavy)\n ... return how_heavy ** 2\n ...\n >>> c_do_some_heavy_computing = CacheDict.decorate(\n ... do_some_heavy_computing,\n ... file_path='sheer_chaos.pkl',\n ... inner_key='how_heavy')\n ...\n >>> for i in range(10):\n ... c_do_some_heavy_computing(i)\n ...\n >>> with CacheDict('sheer_chaos.pkl') as cache:\n ... print(cache)\n ...\n {0: 0, 1: 1, 2: 4, 3: 9, 4: 16, 5: 25, 6: 36, 7: 49, 8: 64, 9: 81}\n\n Params:\n\n func - a function to decorate.\n\n file_path - a path to an existing or non-existent pickle file.\n If a relative path or a filename is given, puts it into the framework cache directory.\n\n access - cache access indicators. The string may include the following indicators:\n - ``r`` - read - grants access to read the cache file content\n - ``e`` - execute/evaluate - grants access to evaluate the decorated function (if such is present)\n - ``w`` - write - grants access to modify the cache file content\n\n outer_key - a constant hashable key to store the function call's result.\n\n inner_key - a callable or a code expression that evaluates a hashable key to store\n the function call's result. To do so, use argument names that are used inside the function.\n Some functions do not support signatures and will throw an error.\n You may use \"args\" and \"kwargs\" in your expression instead.\n \"\"\"\n if outer_key is not None and inner_key is not None:\n raise ValueError('At most one of (outer key, inner key) can be specified.')\n\n return super().decorate(\n func=func,\n file_path=file_path,\n access=access,\n outer_key=outer_key,\n inner_key=inner_key,\n )\n\n def __enter__(self) -> MyDefaultDict:\n if 'r' in self.access:\n self.file_path = _resolve_filepath(self.file_path)\n self.cache_dict = initialize_cache_dict(self.file_path)\n else:\n self.cache_dict = MyDefaultDict(NoCache)\n return self.cache_dict\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if 'w' in self.access:\n update_cache(self.cache_dict, self.file_path)\n self.cache_dict.clear()\n self.cache_dict = None\n\n def get(self, key: None | Hashable) -> NoCache | MyDefaultDict | Any:\n file_path = _resolve_filepath(self.file_path)\n cache_dict = _initialize_cache(file_path)\n if key is None:\n return cache_dict\n return cache_dict[key]" }, { "identifier": "SingleValueCache", "path": "pecapiku/single_value_cache.py", "snippet": "class SingleValueCache(BaseCache, Generic[DecoratedCallable]):\n \"\"\" Decorator for caching of evaluation results.\n Creates a \"pickle\" file at disk space on a specified path.\n Wraps a function and stores its execution result in the file.\n To apply, use the method ``SingleValueCache.decorate()`` or ``SingleValueCache(...)()``.\n\n Args:\n\n file_path - a path to an existing or non-existent pickle file.\n If a relative path or a filename is given, puts it into the framework cache directory.\n\n access - cache access indicators. The string may include the following indicators:\n - ``r`` - read - grants access to read the cache file content\n - ``e`` - execute/evaluate - grants access to evaluate the decorated function (if such is present)\n - ``w`` - write - grants access to modify the cache file content\n\n Example\n -------\n >>> import time\n >>> from timeit import timeit\n >>> def a_heavy_function():\n ... time.sleep(1)\n ...\n ... @SingleValueCache('a_heavy_function.pkl') # or @SingleValueCache.decorate(file_path='a_heavy_function.pkl')\n >>> def a_heavy_function_cached():\n ... time.sleep(1)\n >>> print(timeit(a_heavy_function, number=10)) # 10.070\n >>> print(timeit(a_heavy_function_cached, number=10)) # 1.015\n \"\"\"\n\n @classmethod\n def _get_default_file_path(cls) -> None:\n return None\n\n def __init__(self, file_path: os.PathLike | str | None = None, access: CacheAccess = 'rew'):\n super().__init__(file_path, access)\n self.cache_dict = None\n\n def __call__(self,\n func: DecoratedCallable | None = None, *, file_path: os.PathLike | str | None = None,\n access: CacheAccess | None = None) -> DecoratedCallable | Decorator:\n return self.decorate(func=func, file_path=file_path, access=access)\n\n def _get_cache_val(self, key: Hashable) -> Any:\n return _initialize_cache(self.file_path)\n\n def _put_cache_val(self, key: Hashable, value: Any):\n return update_cache(value, self.file_path)\n\n def _key_func(self, *args, **kwargs) -> Hashable:\n return 0\n\n @classmethod\n def _decorate(cls, func: DecoratedCallable | None = None,\n file_path: os.PathLike | str | None = None,\n access: CacheAccess = 'rew') -> DecoratedCallable | Decorator:\n \"\"\" Wraps a function and stores its execution results into a pickle cache file.\n\n Example\n -------\n >>> import time\n >>> from timeit import timeit\n >>> def a_heavy_function():\n ... time.sleep(1)\n ... return 42\n ...\n >>> cached_func = SingleValueCache.decorate(a_heavy_function,'a_heavy_function.pkl')\n >>> print(timeit(a_heavy_function, number=10)) # 10.070\n >>> print(timeit(cached_func, number=10)) # 1.015\n\n Params:\n\n func - a function to decorate.\n\n file_path - a path to an existing or non-existent pickle file.\n If a relative path or a filename is given, puts it into the framework cache directory.\n\n access - cache access indicators. The string may include the following indicators:\n - ``r`` - read - grants access to read the cache file content\n - ``e`` - execute/evaluate - grants access to evaluate the decorated function\n - ``w`` - write - grants access to modify the cache file content\n \"\"\"\n if file_path is None:\n raise ValueError(f'A \"file_path\" should be specified for \"{cls.__name__}\", got \"None\".')\n file_path = _resolve_filepath(file_path)\n\n @wraps(func)\n def decorated(*args, **kwargs):\n instance = cls(file_path, access)\n val = instance._read_execute_write(func, func_args=args, func_kwargs=kwargs, access=access)\n return val\n\n if func is None:\n decorator_return = partial(cls._decorate, file_path=file_path, access=access)\n else:\n decorator_return = decorated\n return decorator_return\n\n @staticmethod\n def get(file_path: os.PathLike | str) -> NoCache | Any:\n file_path = _resolve_filepath(file_path)\n return _initialize_cache(file_path)\n\n @omnimethod\n def decorate(self, func: DecoratedCallable | None = None, *, file_path: os.PathLike | str | None = None,\n access: CacheAccess | None = None, **kwargs) -> DecoratedCallable | Decorator:\n \"\"\" Wraps a function and stores its execution results into a pickle cache file.\n\n Example\n --------\n >>> import time\n >>> from timeit import timeit\n >>> def a_heavy_function():\n ... time.sleep(1)\n ... return 42\n ...\n >>> cached_func = SingleValueCache.decorate(a_heavy_function,'a_heavy_function.pkl')\n >>> print(timeit(a_heavy_function, number=10)) # 10.070\n >>> print(timeit(cached_func, number=10)) # 1.015\n\n Params:\n\n func - a function to decorate.\n\n file_path - a path to an existing or non-existent pickle file.\n If a relative path or a filename is given, puts it into the framework cache directory.\n\n access - cache access indicators. The string may include the following indicators:\n - ``r`` - read - grants access to read the cache file content\n - ``e`` - execute/evaluate - grants access to evaluate the decorated function\n - ``w`` - write - grants access to modify the cache file content\n \"\"\"\n return super().decorate(\n func=func,\n file_path=file_path,\n access=access\n )" }, { "identifier": "get_cache_dir", "path": "tests/conftest.py", "snippet": "@pytest.fixture(scope='function')\ndef get_cache_dir(set_cache_dir):\n cache_dir = config.get_cache_dir()\n return cache_dir" }, { "identifier": "set_cache_dir", "path": "tests/conftest.py", "snippet": "@pytest.fixture(scope='function', autouse=True)\ndef set_cache_dir():\n cache_dir = get_project_root() / 'tests' / '.proj_cache'\n config.set_cache_dir(cache_dir)\n yield\n [f.unlink() for f in cache_dir.glob(\"*\") if f.is_file()]" } ]
from functools import wraps from itertools import product from time import sleep, time from typing import Any from pecapiku import CacheDict, SingleValueCache from tests.conftest import get_cache_dir, set_cache_dir # noqa import pytest
3,998
class TestObject: def __init__(self, foo: Any): self.foo = foo def sleep(self, time_: float) -> float: sleep(time_) return time_ class TestObjectWithCounter: def __init__(self, foo: Any): self.foo = foo self.counter = 0 def sleep(self, time_: float) -> float: self.counter += 1 sleep(time_) return time_ def sleep_(time_: float): sleep(time_) return time_ def timed(func): @wraps(func) def wrapper(*args, **kwargs): t1 = time() res = func(*args, **kwargs) t2 = time() t = t2 - t1 return res, t return wrapper @pytest.mark.parametrize('sleep_func', [sleep_, TestObject(1).sleep]) @pytest.mark.parametrize('cache_decorator, cache_kwargs', [
class TestObject: def __init__(self, foo: Any): self.foo = foo def sleep(self, time_: float) -> float: sleep(time_) return time_ class TestObjectWithCounter: def __init__(self, foo: Any): self.foo = foo self.counter = 0 def sleep(self, time_: float) -> float: self.counter += 1 sleep(time_) return time_ def sleep_(time_: float): sleep(time_) return time_ def timed(func): @wraps(func) def wrapper(*args, **kwargs): t1 = time() res = func(*args, **kwargs) t2 = time() t = t2 - t1 return res, t return wrapper @pytest.mark.parametrize('sleep_func', [sleep_, TestObject(1).sleep]) @pytest.mark.parametrize('cache_decorator, cache_kwargs', [
*product([SingleValueCache(), SingleValueCache.decorate], [dict(file_path='some.pkl')]),
1
2023-11-17 12:10:01+00:00
8k
mmjing/BalancedOSDA
utils2.py
[ { "identifier": "VonMisesFisher", "path": "hyperspherical_vae/distributions/von_mises_fisher.py", "snippet": "class VonMisesFisher(torch.distributions.Distribution):\n\n arg_constraints = {'loc': torch.distributions.constraints.real,\n 'scale': torch.distributions.constraints.positive}\n support = torch.distributions.constraints.real\n has_rsample = True\n _mean_carrier_measure = 0\n\n @property\n def mean(self):\n return self.loc * (ive(self.__m / 2, self.scale) / ive(self.__m / 2 - 1, self.scale))\n\n @property\n def stddev(self):\n return self.scale\n\n def __init__(self, loc, scale, validate_args=None):\n self.dtype = loc.dtype\n self.loc = loc\n self.scale = scale\n self.device = loc.device\n self.__m = loc.shape[-1]\n self.__e1 = (torch.Tensor([1.] + [0] * (loc.shape[-1] - 1))).to(self.device)\n\n super(VonMisesFisher, self).__init__(self.loc.size(), validate_args=validate_args)\n\n def sample(self, shape=torch.Size()):\n with torch.no_grad():\n return self.rsample(shape)\n\n def rsample(self, shape=torch.Size()):\n shape = shape if isinstance(shape, torch.Size) else torch.Size([shape])\n\n w = self.__sample_w3(shape=shape) if self.__m == 3 else self.__sample_w_rej(shape=shape)\n\n v = (torch.distributions.Normal(0, 1).sample(\n shape + torch.Size(self.loc.shape)).to(self.device).transpose(0, -1)[1:]).transpose(0, -1)\n v = v / v.norm(dim=-1, keepdim=True)\n\n w_ = torch.sqrt(torch.clamp(1 - (w ** 2), 1e-10))\n x = torch.cat((w, w_ * v), -1)\n z = self.__householder_rotation(x)\n\n return z.type(self.dtype)\n\n def __sample_w3(self, shape):\n shape = shape + torch.Size(self.scale.shape)\n u = torch.distributions.Uniform(0, 1).sample(shape).to(self.device)\n self.__w = 1 + torch.stack([torch.log(u), torch.log(1 - u) - 2 * self.scale], dim=0).logsumexp(0) / self.scale\n return self.__w\n\n def __sample_w_rej(self, shape):\n c = torch.sqrt((4 * (self.scale ** 2)) + (self.__m - 1) ** 2)\n b_true = (-2 * self.scale + c) / (self.__m - 1)\n\n # using Taylor approximation with a smooth swift from 10 < scale < 11\n # to avoid numerical errors for large scale\n b_app = (self.__m - 1) / (4 * self.scale)\n s = torch.min(torch.max(torch.tensor([0.], device=self.device),\n self.scale - 10), torch.tensor([1.], device=self.device))\n b = b_app * s + b_true * (1 - s)\n\n a = (self.__m - 1 + 2 * self.scale + c) / 4\n d = (4 * a * b) / (1 + b) - (self.__m - 1) * math.log(self.__m - 1)\n\n self.__b, (self.__e, self.__w) = b, self.__while_loop(b, a, d, shape)\n return self.__w\n\n def __while_loop(self, b, a, d, shape):\n\n b, a, d = [e.repeat(*shape, *([1] * len(self.scale.shape))) for e in (b, a, d)]\n w, e, bool_mask = torch.zeros_like(b).to(self.device), torch.zeros_like(\n b).to(self.device), (torch.ones_like(b) == 1).to(self.device)\n\n shape = shape + torch.Size(self.scale.shape)\n\n while bool_mask.sum() != 0:\n e_ = torch.distributions.Beta((self.__m - 1) / 2, (self.__m - 1) /\n 2).sample(shape[:-1]).reshape(shape).to(self.device)\n u = torch.distributions.Uniform(0, 1).sample(shape).to(self.device)\n\n w_ = (1 - (1 + b) * e_) / (1 - (1 - b) * e_)\n t = (2 * a * b) / (1 - (1 - b) * e_)\n\n accept = ((self.__m - 1) * t.log() - t + d) > torch.log(u)\n reject = 1 - accept\n\n w[bool_mask * accept] = w_[bool_mask * accept]\n e[bool_mask * accept] = e_[bool_mask * accept]\n\n bool_mask[bool_mask * accept] = reject[bool_mask * accept]\n\n return e, w\n\n def __householder_rotation(self, x):\n u = (self.__e1 - self.loc)\n u = u / (u.norm(dim=-1, keepdim=True) + 1e-5)\n z = x - 2 * (x * u).sum(-1, keepdim=True) * u\n return z\n\n def entropy(self):\n output = - self.scale * ive(self.__m / 2, self.scale) / ive((self.__m / 2) - 1, self.scale)\n\n return output.view(*(output.shape[:-1])) + self._log_normalization()\n\n def log_prob(self, x):\n return self._log_unnormalized_prob(x) - self._log_normalization()\n\n def _log_unnormalized_prob(self, x):\n output = self.scale * (self.loc * x).sum(-1, keepdim=True)\n\n return output.view(*(output.shape[:-1]))\n\n def _log_normalization(self):\n output = - ((self.__m / 2 - 1) * torch.log(self.scale) - (self.__m / 2) * math.log(2 * math.pi) - (\n self.scale + torch.log(ive(self.__m / 2 - 1, self.scale))))\n\n return output.view(*(output.shape[:-1]))" }, { "identifier": "HypersphericalUniform", "path": "hyperspherical_vae/distributions/hyperspherical_uniform.py", "snippet": "class HypersphericalUniform(torch.distributions.Distribution):\n\n support = torch.distributions.constraints.real\n has_rsample = False\n _mean_carrier_measure = 0\n\n @property\n def dim(self):\n return self._dim\n\n @property\n def device(self):\n return self._device\n\n @device.setter\n def device(self, val):\n self._device = val if isinstance(val, torch.device) else torch.device(val)\n\n def __init__(self, dim, validate_args=None, device=\"cpu\"):\n super(HypersphericalUniform, self).__init__(torch.Size([dim]), validate_args=validate_args)\n self._dim = dim\n self.device = device\n\n def sample(self, shape=torch.Size()):\n output = torch.distributions.Normal(0, 1).sample(\n (shape if isinstance(shape, torch.Size) else torch.Size([shape])) + torch.Size([self._dim + 1])).to(self.device)\n\n return output / output.norm(dim=-1, keepdim=True)\n\n def entropy(self):\n return self.__log_surface_area().cuda()\n\n def log_prob(self, x):\n return - torch.ones(x.shape[:-1], device=self.device) * self.__log_surface_area()\n\n def __log_surface_area(self):\n return math.log(2) + ((self._dim + 1) / 2) * math.log(math.pi) - torch.lgamma(\n torch.Tensor([(self._dim + 1) / 2], device=self.device))" }, { "identifier": "angular_dist", "path": "utils.py", "snippet": "def angular_dist(inputs, centers, eps=1e-6): \n # num_class = len(inputs)\n inputs = F.normalize(inputs)\n centers = F.normalize(centers)\n dist = torch.acos(torch.clamp(torch.matmul(inputs,centers.transpose(0,1)), -1.+eps, 1-eps))\n return dist" } ]
import torch.optim as opt import torch import gc import os import libmr from basenet import * from copy import copy, deepcopy from hyperspherical_vae.distributions import VonMisesFisher from hyperspherical_vae.distributions import HypersphericalUniform from utils import angular_dist
5,037
data_outlier_probs (list): List of outlier probabilities for an entire dataset, categorized by class. num_classes (int): Number of classes. num_outlier_threshs (int): Number of outlier rejection priors (evenly spread over the interval (0,1)). Returns: dict: Dictionary containing outlier percentages and corresponding rejection prior values. """ dataset_outliers = [] threshs = [] # loop through each rejection prior value and evaluate the percentage of the dataset being considered as # statistical outliers, i.e. each data point's outlier probability > rejection prior. for i in range(num_outlier_threshs - 1): outlier_threshold = (i + 1) * (1.0 / num_outlier_threshs) threshs.append(outlier_threshold) dataset_outliers.append(0) total_dataset = 0 for j in range(num_classes): total_dataset += len(data_outlier_probs[j]) for k in range(len(data_outlier_probs[j])): if data_outlier_probs[j][k] > outlier_threshold: dataset_outliers[i] += 1 dataset_outliers[i] = dataset_outliers[i] / float(total_dataset) return {"thresholds": threshs, "outlier_percentage": dataset_outliers} def calc_mean_class_acc(data_outlier_probs,label_pred_class_list_t, num_class): threshs = [0.98] num_outlier_threshs = len(threshs) label_pred_class_list_t = np.array(label_pred_class_list_t,dtype=object) best_OS_star_acc = 0 best_unk = 0 best_H = 0 for i in range(num_outlier_threshs): total_dataset = 0 label_pred_class_list_t_copy = deepcopy(label_pred_class_list_t) for j in range(num_class-1): total_dataset += len(data_outlier_probs[j]) for k in range(len(data_outlier_probs[j])): if data_outlier_probs[j][k] > threshs[i]: label_pred_class_list_t_copy[1][j][k] = num_class-1 all_pred = np.concatenate(np.array(label_pred_class_list_t_copy[1]),axis=0) all_label = np.concatenate(np.array(label_pred_class_list_t_copy[0]),axis=0) per_class_num = np.zeros((num_class)) per_class_correct1 = np.zeros((num_class)).astype(np.float32) for t in range(num_class): ind = np.where(all_label==t)[0] if len(ind) == 0: continue correct_ind = np.where(all_pred[ind] == t)[0] per_class_correct1[t] += float(len(correct_ind)) per_class_num[t] += float(len(ind)) per_class_acc1 = per_class_correct1 / per_class_num OS_acc1 = float(per_class_acc1.mean()) OS_star_acc1 = float(per_class_acc1[:-1].mean()) unk_acc1 = float(per_class_acc1[-1]) H_acc = 0 if OS_star_acc1 > 0 and unk_acc1 > 0: H_acc = 2*OS_star_acc1*unk_acc1/(OS_star_acc1+unk_acc1) if H_acc > best_H: best_H = H_acc best_OS_star_acc = OS_star_acc1 best_unk = unk_acc1 return best_OS_star_acc, best_unk, best_H def correct_dist(distances_to_z_means_threshset,centroid_distance): num_class = centroid_distance.shape[0] for i in range(num_class-1): len_class = len(distances_to_z_means_threshset[i]) if len_class>0: distances_to_z_means_threshset[i] = torch.clamp(distances_to_z_means_threshset[i] - 1.0*centroid_distance[i].expand(len_class),min=0.0) return distances_to_z_means_threshset def inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75, max_iter=1000): return initial_lr * ((1 + gamma * min(1.0, step / float(max_iter))) ** (- power)) class OptimWithSheduler: def __init__(self, optimizer, scheduler_func): self.optimizer = optimizer self.scheduler_func = scheduler_func self.global_step = 0.0 for g in self.optimizer.param_groups: g['initial_lr'] = g['lr'] def zero_grad(self): self.optimizer.zero_grad() def step(self): for g in self.optimizer.param_groups: g['lr'] = self.scheduler_func(step=self.global_step, initial_lr = g['initial_lr']) self.optimizer.step() self.global_step += 1 def reparameterize(z_mean, z_var, distribution='vmf'): if distribution == 'normal': q_z = torch.distributions.normal.Normal(z_mean, z_var) elif distribution == 'vmf': q_z = VonMisesFisher(z_mean, z_var) else: raise NotImplemented return q_z def reparameterize2(z_mean, z_var, z_dim, distribution='vmf'): if distribution == 'normal': q_z = torch.distributions.normal.Normal(z_mean, z_var) p_z = torch.distributions.normal.Normal(torch.zeros_like(z_mean), torch.ones_like(z_var)) elif distribution == 'vmf': q_z = VonMisesFisher(z_mean, z_var)
from __future__ import division from __future__ import print_function from __future__ import unicode_literals # os.system('file ./libmr.so') def get_model(net, num_class=13, feat_size=100, d_hid_size=2048): if net == 'vgg1': model_g = VGGBase(feat_size=feat_size) model_c1 = Classifier(num_classes=num_class,feat_size=feat_size) model_d = AdversarialNetwork(in_feature=feat_size, hidden_size=d_hid_size) if net == 'vgg2': model_g = VGGFc(vgg_name='VGG19BN',bottleneck_dim=feat_size) model_c1 = Classifier(num_classes=num_class,feat_size=feat_size) model_d = AdversarialNetwork(in_feature=feat_size, hidden_size=d_hid_size) if net == 'vgg3': model_g = VGGFc(vgg_name='VGG19',bottleneck_dim=feat_size) model_c1 = Classifier(num_classes=num_class,feat_size=feat_size) model_d = AdversarialNetwork(in_feature=feat_size, hidden_size=d_hid_size) if net == 'resnet1': model_g = ResBase(option='resnet50', pret=True, feat_size=feat_size) model_c1 = ResClassifier(num_classes=num_class, feat_size=feat_size) model_d = AdversarialNetwork(in_feature=feat_size, hidden_size=d_hid_size) if net == 'resnet2': model_g = ResNetFc(resnet_name='ResNet50', bottleneck_dim=feat_size) model_c1 = ResClassifier(num_classes=num_class, feat_size=feat_size) model_d = AdversarialNetwork(in_feature=feat_size, hidden_size=d_hid_size) return model_g, model_c1, model_d def get_optimizer_visda(args, G, C1, C2, D): update_lower=args.update_lower if not update_lower: print('NOT update lower!') params = list(list(G.linear1.parameters()) + list(G.linear2.parameters()) + list( G.bn1.parameters()) + list(G.bn2.parameters())) #+ list(G.bn4.parameters()) + list( #G.bn3.parameters()) + list(G.linear3.parameters()) + list(G.linear4.parameters())) else: print('update lower!') params = G.parameters() optimizer_g = opt.SGD(params, lr=args.lr_g, momentum=0.9, weight_decay=0.0005,nesterov=True) optimizer_c1 = opt.SGD(list(C1.parameters()), momentum=0.9, lr=args.lr_c1,weight_decay=0.0005, nesterov=True) optimizer_c2 = opt.SGD(list(C2.parameters()), momentum=0.9, lr=args.lr_c2,weight_decay=0.0005, nesterov=True) optimizerD = opt.Adam(D.parameters(), lr=args.lr_d) return optimizer_g, optimizer_c1, optimizer_c2, optimizerD def bce_loss(output, target): output_neg = 1 - output target_neg = 1 - target result = torch.mean(target * torch.log(output + 1e-6)) result += torch.mean(target_neg * torch.log(output_neg + 1e-6)) return -torch.mean(result) def Entropy(input_): bs = input_.size(0) epsilon = 1e-5 entropy = -input_ * torch.log(input_ + epsilon) entropy = torch.sum(entropy, dim=1) return entropy def DiscrepancyLoss(input_1, input_2, m = 2.0): soft_1 = nn.functional.softmax(input_1, dim=1) soft_2 = nn.functional.softmax(input_2, dim=1) entropy_1 = - soft_1 * nn.functional.log_softmax(input_1, dim=1) entropy_2 = - soft_2 * nn.functional.log_softmax(input_2, dim=1) entropy_1 = torch.sum(entropy_1, dim=1) entropy_2 = torch.sum(entropy_2, dim=1) loss = torch.nn.ReLU()(m - torch.mean(entropy_1 - entropy_2)) return loss def EntropyLoss(input_1): soft_1 = nn.functional.softmax(input_1, dim=1) entropy_1 = - soft_1 * nn.functional.log_softmax(input_1, dim=1) entropy_1 = torch.sum(entropy_1, dim=1) # loss = torch.nn.ReLU()(m - torch.mean(entropy_1)) loss = -torch.mean(entropy_1) return loss def calc_entropy(input_1): soft_1 = nn.functional.softmax(input_1, dim=1) entropy_1 = - soft_1 * nn.functional.log_softmax(input_1, dim=1) entropy_1 = torch.sum(entropy_1, dim=1) return entropy_1 def save_model(encoder, classifier, centroid_distance, save_path): save_dic = { 'encoder': encoder.state_dict(), 'classifier': classifier.state_dict(), 'centroid_distance':centroid_distance } torch.save(save_dic, save_path) def load_model(encoder, classifier, load_path): checkpoint = torch.load(load_path) encoder.load_state_dict(checkpoint['encoder']) classifier.load_state_dict(checkpoint['classifier']) centroid_distance = checkpoint['centroid_distance'] return encoder, classifier,centroid_distance def adjust_learning_rate(optimizer, lr, batch_id, max_id, epoch, max_epoch): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" beta = 0.75 alpha = 10 p = min(1, (batch_id + max_id * epoch) / float(max_id * max_epoch)) lr = lr / (1 + alpha * p) ** (beta) # min(1, 2 - epoch/float(20)) for param_group in optimizer.param_groups: param_group['lr'] = lr return lr def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.01) m.bias.data.normal_(0.0, 0.01) elif classname.find('BatchNorm') != -1: m.weight.data.normal_(1.0, 0.01) m.bias.data.fill_(0) def segment_sum(data, segment_ids): """ Analogous to tf.segment_sum (https://www.tensorflow.org/api_docs/python/tf/math/segment_sum). :param data: A pytorch tensor of the data for segmented summation. :param segment_ids: A 1-D tensor containing the indices for the segmentation. :return: a tensor of the same type as data containing the results of the segmented summation. """ if not all(segment_ids[i] <= segment_ids[i + 1] for i in range(len(segment_ids) - 1)): raise AssertionError("elements of segment_ids must be sorted") if len(segment_ids.shape) != 1: raise AssertionError("segment_ids have be a 1-D tensor") if data.shape[0] != segment_ids.shape[0]: raise AssertionError("segment_ids should be the same size as dimension 0 of input.") num_segments = len(torch.unique(segment_ids)) return unsorted_segment_sum(data, segment_ids, num_segments) def unsorted_segment_sum(data, segment_ids, num_segments): """ Computes the sum along segments of a tensor. Analogous to tf.unsorted_segment_sum. :param data: A tensor whose segments are to be summed. :param segment_ids: The segment indices tensor. :param num_segments: The number of segments. :return: A tensor of same data type as the data argument. """ assert all([i in data.shape for i in segment_ids.shape]), "segment_ids.shape should be a prefix of data.shape" # segment_ids is a 1-D tensor repeat it to have the same shape as data if len(segment_ids.shape) == 1: s = torch.prod(torch.tensor(data.shape[1:])).long().cuda() segment_ids = segment_ids.repeat_interleave(s).view(segment_ids.shape[0], *data.shape[1:]) assert data.shape == segment_ids.shape, "data.shape and segment_ids.shape should be equal" shape = [num_segments] + list(data.shape[1:]) centroid = torch.zeros(*shape).cuda().scatter_add(0, segment_ids, data.float()) centroid = centroid.type(data.dtype) return centroid def unsorted_segment_sum_cpu(data, segment_ids, num_segments): """ Computes the sum along segments of a tensor. Analogous to tf.unsorted_segment_sum. :param data: A tensor whose segments are to be summed. :param segment_ids: The segment indices tensor. :param num_segments: The number of segments. :return: A tensor of same data type as the data argument. """ assert all([i in data.shape for i in segment_ids.shape]), "segment_ids.shape should be a prefix of data.shape" # segment_ids is a 1-D tensor repeat it to have the same shape as data if len(segment_ids.shape) == 1: s = torch.prod(torch.tensor(data.shape[1:])).long() segment_ids = segment_ids.repeat_interleave(s).view(segment_ids.shape[0], *data.shape[1:]) assert data.shape == segment_ids.shape, "data.shape and segment_ids.shape should be equal" shape = [num_segments] + list(data.shape[1:]) centroid = torch.zeros(*shape).scatter_add(0, segment_ids, data.float()) centroid = centroid.type(data.dtype) return centroid def get_means(tensors_list): """ Calculate the mean of a list of tensors for each tensor in the list. In our case the list typically contains a tensor for each class, such as the per class z values. Parameters: tensors_list (list): List of Tensors Returns: list: List of Tensors containing mean vectors """ means = [] for i in range(len(tensors_list)): if isinstance(tensors_list[i], torch.Tensor): means.append(torch.mean(tensors_list[i], dim=0)) else: means.append([]) return means def calc_distances_to_means(means, tensors, distance_function='angular'): """ Function to calculate distances between tensors, in our case the mean zs per class and z for each input. Wrapper around torch.nn.functonal distances with specification of which distance function to choose. Parameters: means (list): List of length corresponding to number of classes containing torch tensors (typically mean zs). tensors (list): List of length corresponding to number of classes containing tensors (typically zs). distance_function (str): Specification of distance function. Choice of cosine|euclidean|mix. Returns: list: List of length corresponding to number of classes containing tensors with distance values """ def distance_func(a, b, distance_function): if distance_function == 'euclidean': d = torch.nn.functional.pairwise_distance(a.view(1, -1), b, p=2) elif distance_function == 'cosine': d = (1 - torch.nn.functional.cosine_similarity(a.view(1, -1), b)) elif distance_function == 'angular': eps = 1e-6 d = angular_dist(b,a.unsqueeze(0)).squeeze() # a = F.normalize(a.unsqueeze(0)) # b = F.normalize(a.unsqueeze(0)) # d = torch.acos(torch.clamp(torch.matmul(a,b.transpose(0,1)), -1.+eps, 1-eps)) return d distances = [] # loop through each class in means and calculate the distances with the respective tensor. for i in range(len(means)): # check for tensor type, e.g. list could be empty if isinstance(tensors[i], torch.Tensor) and isinstance(means[i], torch.Tensor): dist_tensor = distance_func(means[i], tensors[i], distance_function) if torch.numel(dist_tensor) == 1: dist_tensor = dist_tensor.unsqueeze(0) distances.append(dist_tensor) else: distances.append([]) return distances def fit_weibull_models(distribution_values, tailsizes, num_max_fits=5): """ Function to fit weibull models on distribution values per class. The distribution values in our case are the distances of an inputs approximate posterior value to the per class mean latent z, i.e. The Weibull model fits regions of high density and gives credible intervals. The tailsize specifies how many outliers are expected in the dataset for which the model has been trained. We use libmr https://github.com/Vastlab/libMR (installable through e.g. pip) for the Weibull model fitting. Parameters: distribution_values (list): Values on which the fit is conducted. In our case latent space distances. tailsizes (list): List of integers, specifying tailsizes per class. For a balanced dataset typically the same. num_max_fits (int): Number of attempts to fit the Weibull models before timing out and returning unsuccessfully. Returns: list: List of Weibull models with their respective parameters (stored in libmr class instances). """ weibull_models = [] # loop through the list containing distance values per class for i in range(len(distribution_values)): # for each class set the initial success to False and number of attempts to 0 is_valid = False count = 0 # If the list contains distance values conduct a fit. If it is empty, e.g. because there is not a single # prediction for the corresponding class, continue with the next class. Note that the latter isn't expected for # a model that has been trained for even just a short while. if isinstance(distribution_values[i], torch.Tensor): distribution_values[i] = distribution_values[i].cpu().numpy().astype(np.double) # weibull model per class weibull_models.append(libmr.MR(verbose=False,alpha=10.0)) # attempt num_max_fits many fits before aborting while is_valid is False and count < num_max_fits: # conduct the fit with libmr weibull_models[i].fit_high(distribution_values[i], tailsizes[i]) is_valid = weibull_models[i].is_valid count += 1 if not is_valid: # print("Weibull fit for class " + str(i) + " not successful after " + str(num_max_fits) + " attempts") weibull_models[i] = [] else: weibull_models.append([]) return weibull_models, True def calc_outlier_probs(weibull_models, distances): """ Calculates statistical outlier probability using the weibull models' CDF. Note that we have coded this function to loop over each class because we have previously categorized the distances into their respective classes already. Parameters: weibull_models (list): List of libmr class instances containing the Weibull model parameters and functions. distances (list): List of per class torch tensors or numpy arrays with latent space distance values. Returns: list: List of length corresponding to number of classes with outlier probabilities for each respective input. """ outlier_probs = [] # loop through all classes, i.e. all available weibull models as there is one weibull model per class. for i in range(len(weibull_models)): # optionally convert the type of the distance vectors if isinstance(weibull_models[i],list): outlier_probs.append([]) continue if isinstance(distances[i], torch.Tensor): distances[i] = distances[i].cpu().numpy().astype(np.double) elif isinstance(distances[i], list): # empty list outlier_probs.append([]) continue else: distances[i] = distances[i].astype(np.double) # use the Weibull models' CDF to evaluate statistical outlier rejection probabilities. outlier_probs.append(weibull_models[i].w_score_vector(distances[i])) return outlier_probs def calc_openset_classification(data_outlier_probs, num_classes, num_outlier_threshs=50): """ Calculates the percentage of dataset outliers given a set of outlier probabilities over a range of rejection priors. Parameters: data_outlier_probs (list): List of outlier probabilities for an entire dataset, categorized by class. num_classes (int): Number of classes. num_outlier_threshs (int): Number of outlier rejection priors (evenly spread over the interval (0,1)). Returns: dict: Dictionary containing outlier percentages and corresponding rejection prior values. """ dataset_outliers = [] threshs = [] # loop through each rejection prior value and evaluate the percentage of the dataset being considered as # statistical outliers, i.e. each data point's outlier probability > rejection prior. for i in range(num_outlier_threshs - 1): outlier_threshold = (i + 1) * (1.0 / num_outlier_threshs) threshs.append(outlier_threshold) dataset_outliers.append(0) total_dataset = 0 for j in range(num_classes): total_dataset += len(data_outlier_probs[j]) for k in range(len(data_outlier_probs[j])): if data_outlier_probs[j][k] > outlier_threshold: dataset_outliers[i] += 1 dataset_outliers[i] = dataset_outliers[i] / float(total_dataset) return {"thresholds": threshs, "outlier_percentage": dataset_outliers} def calc_mean_class_acc(data_outlier_probs,label_pred_class_list_t, num_class): threshs = [0.98] num_outlier_threshs = len(threshs) label_pred_class_list_t = np.array(label_pred_class_list_t,dtype=object) best_OS_star_acc = 0 best_unk = 0 best_H = 0 for i in range(num_outlier_threshs): total_dataset = 0 label_pred_class_list_t_copy = deepcopy(label_pred_class_list_t) for j in range(num_class-1): total_dataset += len(data_outlier_probs[j]) for k in range(len(data_outlier_probs[j])): if data_outlier_probs[j][k] > threshs[i]: label_pred_class_list_t_copy[1][j][k] = num_class-1 all_pred = np.concatenate(np.array(label_pred_class_list_t_copy[1]),axis=0) all_label = np.concatenate(np.array(label_pred_class_list_t_copy[0]),axis=0) per_class_num = np.zeros((num_class)) per_class_correct1 = np.zeros((num_class)).astype(np.float32) for t in range(num_class): ind = np.where(all_label==t)[0] if len(ind) == 0: continue correct_ind = np.where(all_pred[ind] == t)[0] per_class_correct1[t] += float(len(correct_ind)) per_class_num[t] += float(len(ind)) per_class_acc1 = per_class_correct1 / per_class_num OS_acc1 = float(per_class_acc1.mean()) OS_star_acc1 = float(per_class_acc1[:-1].mean()) unk_acc1 = float(per_class_acc1[-1]) H_acc = 0 if OS_star_acc1 > 0 and unk_acc1 > 0: H_acc = 2*OS_star_acc1*unk_acc1/(OS_star_acc1+unk_acc1) if H_acc > best_H: best_H = H_acc best_OS_star_acc = OS_star_acc1 best_unk = unk_acc1 return best_OS_star_acc, best_unk, best_H def correct_dist(distances_to_z_means_threshset,centroid_distance): num_class = centroid_distance.shape[0] for i in range(num_class-1): len_class = len(distances_to_z_means_threshset[i]) if len_class>0: distances_to_z_means_threshset[i] = torch.clamp(distances_to_z_means_threshset[i] - 1.0*centroid_distance[i].expand(len_class),min=0.0) return distances_to_z_means_threshset def inverseDecaySheduler(step, initial_lr, gamma=10, power=0.75, max_iter=1000): return initial_lr * ((1 + gamma * min(1.0, step / float(max_iter))) ** (- power)) class OptimWithSheduler: def __init__(self, optimizer, scheduler_func): self.optimizer = optimizer self.scheduler_func = scheduler_func self.global_step = 0.0 for g in self.optimizer.param_groups: g['initial_lr'] = g['lr'] def zero_grad(self): self.optimizer.zero_grad() def step(self): for g in self.optimizer.param_groups: g['lr'] = self.scheduler_func(step=self.global_step, initial_lr = g['initial_lr']) self.optimizer.step() self.global_step += 1 def reparameterize(z_mean, z_var, distribution='vmf'): if distribution == 'normal': q_z = torch.distributions.normal.Normal(z_mean, z_var) elif distribution == 'vmf': q_z = VonMisesFisher(z_mean, z_var) else: raise NotImplemented return q_z def reparameterize2(z_mean, z_var, z_dim, distribution='vmf'): if distribution == 'normal': q_z = torch.distributions.normal.Normal(z_mean, z_var) p_z = torch.distributions.normal.Normal(torch.zeros_like(z_mean), torch.ones_like(z_var)) elif distribution == 'vmf': q_z = VonMisesFisher(z_mean, z_var)
p_z = HypersphericalUniform(z_dim - 1)
1
2023-11-13 09:00:25+00:00
8k
SitaoLuan/When-Do-GNNs-Help
homophily_tests.py
[ { "identifier": "random_disassortative_splits", "path": "utils/homophily_metrics.py", "snippet": "def remove_self_loops(edge_index, edge_attr=None):\ndef edge_homophily(A, labels, ignore_negative=False):\ndef node_homophily(A, labels):\ndef node_homophily_edge_idx(edge_idx, labels, num_nodes):\ndef compact_matrix_edge_idx(edge_idx, labels):\ndef our_measure(edge_index, label):\ndef class_distribution(A, labels):\ndef adjusted_homo(A, label):\ndef label_informativeness(A, label):\ndef generalized_edge_homophily(adj, features, label, sample_max=75000, iteration=10):\ndef similarity(features, adj, label, hard=None, LP=1, ifsum=1, idx_train=None):\ndef gntk_homophily_(features, adj, sample, n_layers):\ndef classifier_based_performance_metric(features, adj, labels, sample_max, base_classifier='kernel_reg1', epochs=100):\n H = torch.zeros((c, c)).to(edge_index.to(device))\n H = H / torch.sum(H, axis=1, keepdims=True)\n H = compact_matrix_edge_idx(edge_index.to(device), label.to(device))\n LI = 2 - torch.sum(pc * torch.log(pc)) / torch.sum(p_bar * torch.log(p_bar))\n K_G = 1 / pi * (G_gram * (pi - arccos) + sqrt)\n K_G = G_gram\n K_X = 1 / pi * (gram * (pi - arccos) + sqrt)\n K_X = gram\n X = features[sample].cpu()\n X = features[sample].cpu()" }, { "identifier": "row_normalized_adjacency", "path": "utils/util_funcs.py", "snippet": "def row_normalized_adjacency(adj):\n # adj = sp.coo_matrix(adj)\n adj = adj + sp.eye(adj.shape[0])\n adj_normalized = sk_normalize(adj, norm='l1', axis=1)\n # row_sum = np.array(adj.sum(1))\n # row_sum = (row_sum == 0)*1+row_sum\n # adj_normalized = adj/row_sum\n return sp.coo_matrix(adj_normalized)" }, { "identifier": "sys_normalized_adjacency", "path": "utils/util_funcs.py", "snippet": "def sys_normalized_adjacency(adj):\n adj = sp.coo_matrix(adj)\n adj = adj + sp.eye(adj.shape[0])\n row_sum = np.array(adj.sum(1))\n row_sum = (row_sum == 0) * 1 + row_sum\n d_inv_sqrt = np.power(row_sum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return d_mat_inv_sqrt.dot(adj).dot(d_mat_inv_sqrt).tocoo()" }, { "identifier": "full_load_data_large", "path": "utils/util_funcs.py", "snippet": "def full_load_data_large(dataset_name, sage_data=False):\n if dataset_name in {'cora', 'citeseer', 'pubmed'}:\n adj, features, labels = load_data(dataset_name)\n labels = np.argmax(labels, axis=-1)\n features = features.todense()\n elif dataset_name in {'CitationFull_dblp', 'Coauthor_CS', 'Coauthor_Physics', 'Amazon_Computers', 'Amazon_Photo'}:\n dataset, name = dataset_name.split(\"_\")\n adj, features, labels = load_torch_geometric_data(dataset, name)\n\n elif dataset_name in {'Flickr', 'WikiCS'}:\n adj, features, labels = load_torch_geometric_data(dataset_name, None)\n elif dataset_name in {'Crocodile-5'}:\n adj, features, labels = read_WGCN_crocodile()\n\n elif dataset_name in {'Crocodile-6'}:\n adj, features, labels = read_SuperGAT_crocodile()\n elif dataset_name == 'deezer-europe':\n dataset = load_deezer_dataset()\n dataset.graph['edge_index'] = to_undirected(dataset.graph['edge_index'])\n row, col = dataset.graph['edge_index']\n N = dataset.graph['num_nodes']\n adj = sp.coo_matrix((np.ones(row.shape[0]), (row, col)), shape=(N, N))\n features, labels = dataset.graph['node_feat'], dataset.label\n elif dataset_name == 'yelp-chi':\n dataset = load_yelpchi_dataset()\n dataset.graph['edge_index'] = to_undirected(dataset.graph['edge_index'])\n row, col = dataset.graph['edge_index']\n N = dataset.graph['num_nodes']\n adj = sp.coo_matrix((np.ones(row.shape[0]), (row, col)), shape=(N, N))\n features, labels = dataset.graph['node_feat'], dataset.label.unsqueeze(1)\n elif dataset_name == 'Penn94':\n dataset = load_fb100_dataset('Penn94')\n dataset.graph['edge_index'] = to_undirected(dataset.graph['edge_index'])\n row, col = dataset.graph['edge_index']\n N = dataset.graph['num_nodes']\n adj = sp.coo_matrix((np.ones(row.shape[0]), (row, col)), shape=(N, N))\n features, labels = dataset.graph['node_feat'], dataset.label\n elif dataset_name == 'arxiv-year':\n dataset = load_arxiv_year_dataset()\n dataset.graph['edge_index'] = to_undirected(dataset.graph['edge_index'])\n row, col = dataset.graph['edge_index']\n N = dataset.graph['num_nodes']\n adj = sp.coo_matrix((np.ones(row.shape[0]), (row, col)), shape=(N, N))\n features, labels = dataset.graph['node_feat'], dataset.label\n elif dataset_name == 'pokec':\n dataset = load_pokec_mat()\n dataset.graph['edge_index'] = to_undirected(dataset.graph['edge_index'])\n row, col = dataset.graph['edge_index']\n N = dataset.graph['num_nodes']\n adj = sp.coo_matrix((np.ones(row.shape[0]), (row, col)), shape=(N, N))\n features, labels = dataset.graph['node_feat'], dataset.label\n elif dataset_name == 'snap-patents':\n dataset = load_snap_patents_mat()\n print('Done Loading...')\n dataset.graph['edge_index'] = to_undirected(dataset.graph['edge_index'])\n print('Done To Undirected...')\n row, col = dataset.graph['edge_index']\n N = dataset.graph['num_nodes']\n adj = sp.coo_matrix((np.ones(row.shape[0]), (row, col)), shape=(N, N))\n features, labels = dataset.graph['node_feat'], dataset.label\n elif dataset_name == \"genius\":\n dataset = load_genius()\n dataset.graph['edge_index'] = to_undirected(dataset.graph['edge_index'])\n row, col = dataset.graph['edge_index']\n N = dataset.graph['num_nodes']\n adj = sp.coo_matrix((np.ones(row.shape[0]), (row, col)), shape=(N, N))\n features, labels = dataset.graph['node_feat'], dataset.label\n elif dataset_name == \"twitch-gamer\":\n dataset = load_twitch_gamer_dataset()\n dataset.graph['edge_index'] = to_undirected(dataset.graph['edge_index'])\n row, col = dataset.graph['edge_index']\n N = dataset.graph['num_nodes']\n adj = sp.coo_matrix((np.ones(row.shape[0]), (row, col)), shape=(N, N))\n features, labels = dataset.graph['node_feat'], dataset.label\n elif dataset_name == \"wiki\":\n dataset = load_wiki()\n dataset.graph['edge_index'] = to_undirected(dataset.graph['edge_index'])\n row, col = dataset.graph['edge_index']\n N = dataset.graph['num_nodes']\n adj = sp.coo_matrix((np.ones(row.shape[0]), (row, col)), shape=(N, N))\n features, labels = dataset.graph['node_feat'], dataset.label\n else:\n graph_adjacency_list_file_path = os.path.join(path.dirname(path.abspath(__file__)), '../new_data', dataset_name,\n 'out1_graph_edges.txt')\n graph_node_features_and_labels_file_path = os.path.join(path.dirname(path.abspath(__file__)), '../new_data',\n dataset_name,\n 'out1_node_feature_label.txt')\n\n G = nx.DiGraph().to_undirected()\n graph_node_features_dict = {}\n graph_labels_dict = {}\n\n if dataset_name == 'film':\n with open(graph_node_features_and_labels_file_path) as graph_node_features_and_labels_file:\n graph_node_features_and_labels_file.readline()\n for line in graph_node_features_and_labels_file:\n line = line.rstrip().split('\\t')\n assert (len(line) == 3)\n assert (int(line[0]) not in graph_node_features_dict and int(line[0]) not in graph_labels_dict)\n feature_blank = np.zeros(932, dtype=np.uint8)\n feature_blank[np.array(line[1].split(','), dtype=np.uint16)] = 1\n graph_node_features_dict[int(line[0])] = feature_blank\n graph_labels_dict[int(line[0])] = int(line[2])\n else:\n with open(graph_node_features_and_labels_file_path) as graph_node_features_and_labels_file:\n graph_node_features_and_labels_file.readline()\n for line in graph_node_features_and_labels_file:\n line = line.rstrip().split('\\t')\n assert (len(line) == 3)\n assert (int(line[0]) not in graph_node_features_dict and int(line[0]) not in graph_labels_dict)\n graph_node_features_dict[int(line[0])] = np.array(line[1].split(','), dtype=np.uint8)\n graph_labels_dict[int(line[0])] = int(line[2])\n\n with open(graph_adjacency_list_file_path) as graph_adjacency_list_file:\n graph_adjacency_list_file.readline()\n for line in graph_adjacency_list_file:\n line = line.rstrip().split('\\t')\n assert (len(line) == 2)\n if int(line[0]) not in G:\n G.add_node(int(line[0]), features=graph_node_features_dict[int(line[0])],\n label=graph_labels_dict[int(line[0])])\n if int(line[1]) not in G:\n G.add_node(int(line[1]), features=graph_node_features_dict[int(line[1])],\n label=graph_labels_dict[int(line[1])])\n G.add_edge(int(line[0]), int(line[1]))\n\n adj = nx.adjacency_matrix(G, sorted(G.nodes()))\n features = np.array(\n [features for _, features in sorted(G.nodes(data='features'), key=lambda x: x[0])])\n labels = np.array(\n [label for _, label in sorted(G.nodes(data='label'), key=lambda x: x[0])])\n\n features = th.FloatTensor(features).to(device)\n labels = th.LongTensor(labels).to(device)\n\n if sage_data == True:\n if dataset_name in {'yelp-chi', 'deezer-europe'}:\n g = dgl.DGLGraph(adj + sp.eye(N)).to(device)\n else:\n g = dgl.DGLGraph(adj + sp.eye(adj.shape[0])).to(device)\n # Adapted from https://docs.dgl.ai/tutorials/models/1_gnn/1_gcn.html\n g.ndata['features'] = features\n g.ndata['labels'] = labels\n degs = g.in_degrees().float()\n norm = th.pow(degs, -1).to(device)\n norm[th.isinf(norm)] = 0\n g.ndata['norm'] = norm.unsqueeze(1)\n return g, features, labels\n\n if dataset_name in {'Crocodile-5', 'Crocodile-6'}:\n adj = torch.tensor(adj).to(torch.float32).to_sparse()\n else:\n adj = sparse_mx_to_torch_sparse_tensor(adj) # .to(device)\n print('Done Processing...')\n\n return adj, features, labels" }, { "identifier": "normalize_tensor", "path": "utils/util_funcs.py", "snippet": "def normalize_tensor(mx, symmetric=0):\n \"\"\"Row-normalize sparse matrix\"\"\"\n rowsum = torch.sum(mx, 1)\n if symmetric == 0:\n r_inv = torch.pow(rowsum, -1).flatten()\n r_inv[torch.isinf(r_inv)] = 0.\n r_mat_inv = torch.diag(r_inv)\n mx = torch.mm(r_mat_inv, mx)\n return mx\n\n else:\n r_inv = torch.pow(rowsum, -0.5).flatten()\n r_inv[torch.isinf(r_inv)] = 0.\n r_mat_inv = torch.diag(r_inv)\n mx = torch.mm(torch.mm(r_mat_inv, mx), r_mat_inv)\n return mx" }, { "identifier": "sparse_mx_to_torch_sparse_tensor", "path": "utils/util_funcs.py", "snippet": "def sparse_mx_to_torch_sparse_tensor(sparse_mx):\n \"\"\"Convert a scipy sparse matrix to a torch sparse tensor.\"\"\"\n sparse_mx = sparse_mx.tocoo().astype(np.float32)\n indices = torch.from_numpy(\n np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))\n values = torch.from_numpy(sparse_mx.data)\n shape = torch.Size(sparse_mx.shape)\n return torch.sparse.FloatTensor(indices, values, shape)" } ]
import argparse import os import numpy as np import torch import torch.nn.functional as f from pathlib import Path from torch_geometric.utils.convert import to_scipy_sparse_matrix from utils.homophily_metrics import random_disassortative_splits, classifier_based_performance_metric, similarity, \ adjusted_homo, \ label_informativeness, node_homophily, our_measure, edge_homophily, generalized_edge_homophily from utils.util_funcs import row_normalized_adjacency, sys_normalized_adjacency, full_load_data_large, normalize_tensor, \ sparse_mx_to_torch_sparse_tensor
4,306
if torch.cuda.is_available(): device = 'cuda:0' else: device = 'cpu' device = torch.device(device) ifsum = 1 num_exp = 10 ACMGCN_FEATURES_PATH = os.path.dirname(os.path.abspath(__file__)) + '/data/acmgcn_features/' Path(ACMGCN_FEATURES_PATH).mkdir(parents=True, exist_ok=True) BASE_CLASSIFIERS = ['kernel_reg0', 'kernel_reg1', 'gnb'] SMALL_DATASETS = ['cornell', 'wisconsin', 'texas', 'film', 'chameleon', 'squirrel', 'cora', 'citeseer', 'pubmed'] LARGE_DATASETS = ['deezer-europe', 'Penn94', 'arxiv-year', "genius", "twitch-gamer", 'pokec', 'snap-patents'] DATASETS = SMALL_DATASETS + LARGE_DATASETS METRIC_LIST = { "node_homo": lambda adj, labels: node_homophily(adj, labels), "edge_homo": lambda adj, labels: edge_homophily(adj, labels), "class_homo": lambda adj, labels: our_measure(adj, labels), "node_hom_generalized": lambda adj, features, labels: generalized_edge_homophily(adj, features, labels), "agg_homo_soft": lambda x: np.mean(x), "agg_homo_hard": lambda x: np.mean(x), "adj_homo": lambda adj, labels: adjusted_homo(adj, labels), "label_info": lambda adj, labels: label_informativeness(adj, labels), "kernel_reg0_based_homo": lambda *args, **kwargs: classifier_based_performance_metric(*args, **kwargs), "kernel_reg1_based_homo": lambda *args, **kwargs: classifier_based_performance_metric(*args, **kwargs), "gnb_based_homo": lambda *args, **kwargs: classifier_based_performance_metric(*args, **kwargs) } parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.') parser.add_argument('--dataset_name', type=str, required=True, choices=DATASETS, help=f"The data set name, please select from the following list: \n" f"{DATASETS}") parser.add_argument('--symmetric', type=float, default=0, help='1 for symmetric renormalized adj, 0 for random walk renormalized adj') parser.add_argument('--sample_max', type=float, default=500, help='maxinum number of samples used in gntk') parser.add_argument('--base_classifier', type=str, default='kernel_reg1', choices=BASE_CLASSIFIERS, help='The classifier used for performance metric(kernel_reg1, kernel_reg0, svm_linear, svm_rbf, svm_poly, gnb)') parser.add_argument('--homophily_metric', required=True, choices=list(METRIC_LIST.keys()), help="The metric to measure homophily, please select from the following list: \n" "[ \n" " node_homo (node homophily), \n" " edge_homo (edge homophily), \n" " class_homo (class homophily), \n" " node_hom_generalized (generalized node homophily), \n" " agg_homo_soft (aggreation homophily with soft LAS), \n" " agg_homo_hard (aggreation homophily with hard LAS), \n" " adj_homo (adjusted homophily), \n" " label_info (label informativeness), \n" " kernel_reg0_based_homo (kernel based homophily with reg0), \n" " kernel_reg1_based_homo (kernel based homophily with reg1), \n" " gnb_based_homo (gnd-based homophily) \n" "]") args = parser.parse_args() dataset_name = args.dataset_name homophily_metric = args.homophily_metric homophily_lvl = -1 if dataset_name in SMALL_DATASETS: adj_low_unnormalized, features, labels = full_load_data_large(dataset_name) features = normalize_tensor(features).to(device) nnodes = (labels.shape[0]) adj = normalize_tensor(torch.eye(nnodes) + adj_low_unnormalized.to_dense(), symmetric=args.symmetric).to(device) adj = adj.to_sparse().to(device) labels = labels.to(device) else: adj_low_unnormalized, features, labels = full_load_data_large(dataset_name) nnodes = (labels.shape[0]) adj_low_pt = ACMGCN_FEATURES_PATH + dataset_name + '_adj_low.pt' adj_high_pt = ACMGCN_FEATURES_PATH + dataset_name + '_adj_high.pt' features = f.normalize(features, p=1, dim=1) if os.path.exists(adj_low_pt) and os.path.exists(adj_high_pt): adj = torch.load(adj_low_pt) else: adj = to_scipy_sparse_matrix(adj_low_unnormalized.coalesce().indices()) if args.symmetric == 1:
if torch.cuda.is_available(): device = 'cuda:0' else: device = 'cpu' device = torch.device(device) ifsum = 1 num_exp = 10 ACMGCN_FEATURES_PATH = os.path.dirname(os.path.abspath(__file__)) + '/data/acmgcn_features/' Path(ACMGCN_FEATURES_PATH).mkdir(parents=True, exist_ok=True) BASE_CLASSIFIERS = ['kernel_reg0', 'kernel_reg1', 'gnb'] SMALL_DATASETS = ['cornell', 'wisconsin', 'texas', 'film', 'chameleon', 'squirrel', 'cora', 'citeseer', 'pubmed'] LARGE_DATASETS = ['deezer-europe', 'Penn94', 'arxiv-year', "genius", "twitch-gamer", 'pokec', 'snap-patents'] DATASETS = SMALL_DATASETS + LARGE_DATASETS METRIC_LIST = { "node_homo": lambda adj, labels: node_homophily(adj, labels), "edge_homo": lambda adj, labels: edge_homophily(adj, labels), "class_homo": lambda adj, labels: our_measure(adj, labels), "node_hom_generalized": lambda adj, features, labels: generalized_edge_homophily(adj, features, labels), "agg_homo_soft": lambda x: np.mean(x), "agg_homo_hard": lambda x: np.mean(x), "adj_homo": lambda adj, labels: adjusted_homo(adj, labels), "label_info": lambda adj, labels: label_informativeness(adj, labels), "kernel_reg0_based_homo": lambda *args, **kwargs: classifier_based_performance_metric(*args, **kwargs), "kernel_reg1_based_homo": lambda *args, **kwargs: classifier_based_performance_metric(*args, **kwargs), "gnb_based_homo": lambda *args, **kwargs: classifier_based_performance_metric(*args, **kwargs) } parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.') parser.add_argument('--dataset_name', type=str, required=True, choices=DATASETS, help=f"The data set name, please select from the following list: \n" f"{DATASETS}") parser.add_argument('--symmetric', type=float, default=0, help='1 for symmetric renormalized adj, 0 for random walk renormalized adj') parser.add_argument('--sample_max', type=float, default=500, help='maxinum number of samples used in gntk') parser.add_argument('--base_classifier', type=str, default='kernel_reg1', choices=BASE_CLASSIFIERS, help='The classifier used for performance metric(kernel_reg1, kernel_reg0, svm_linear, svm_rbf, svm_poly, gnb)') parser.add_argument('--homophily_metric', required=True, choices=list(METRIC_LIST.keys()), help="The metric to measure homophily, please select from the following list: \n" "[ \n" " node_homo (node homophily), \n" " edge_homo (edge homophily), \n" " class_homo (class homophily), \n" " node_hom_generalized (generalized node homophily), \n" " agg_homo_soft (aggreation homophily with soft LAS), \n" " agg_homo_hard (aggreation homophily with hard LAS), \n" " adj_homo (adjusted homophily), \n" " label_info (label informativeness), \n" " kernel_reg0_based_homo (kernel based homophily with reg0), \n" " kernel_reg1_based_homo (kernel based homophily with reg1), \n" " gnb_based_homo (gnd-based homophily) \n" "]") args = parser.parse_args() dataset_name = args.dataset_name homophily_metric = args.homophily_metric homophily_lvl = -1 if dataset_name in SMALL_DATASETS: adj_low_unnormalized, features, labels = full_load_data_large(dataset_name) features = normalize_tensor(features).to(device) nnodes = (labels.shape[0]) adj = normalize_tensor(torch.eye(nnodes) + adj_low_unnormalized.to_dense(), symmetric=args.symmetric).to(device) adj = adj.to_sparse().to(device) labels = labels.to(device) else: adj_low_unnormalized, features, labels = full_load_data_large(dataset_name) nnodes = (labels.shape[0]) adj_low_pt = ACMGCN_FEATURES_PATH + dataset_name + '_adj_low.pt' adj_high_pt = ACMGCN_FEATURES_PATH + dataset_name + '_adj_high.pt' features = f.normalize(features, p=1, dim=1) if os.path.exists(adj_low_pt) and os.path.exists(adj_high_pt): adj = torch.load(adj_low_pt) else: adj = to_scipy_sparse_matrix(adj_low_unnormalized.coalesce().indices()) if args.symmetric == 1:
adj = sys_normalized_adjacency(adj)
2
2023-11-12 22:52:06+00:00
8k
fkostadinov/pygape
test/test_pygape.py
[ { "identifier": "openai_completion", "path": "pygape/completion.py", "snippet": "def openai_completion(prompt: str) -> any:\n response = (None, None, None)\n try:\n client = OpenAI()\n completion = client.chat.completions.create(\n model='gpt-3.5-turbo',\n messages = [\n {\"role\": \"user\", \"content\": prompt},\n ],\n temperature=0.00000001)\n\n # Async\n #from openai import AsyncOpenAI\n #client = AsyncOpenAI()\n #completion = await client.chat.completions.create(model=\"gpt-3.5-turbo\", messages=[{\"role\": \"user\", \"content\": \"Hello world\"}])\n\n # Extract the json string response from the OpenAIObject\n json_str = completion.choices[0].message.content\n\n # Creaate a json object of type dict. According to prompt conventions, each json objct\n # should at least have a key \"result\" and \"reason\" plus optionally other keys\n response = json.loads(json_str)\n\n except Exception as e:\n logging.error(e)\n\n return response" }, { "identifier": "sort", "path": "pygape/pygape.py", "snippet": "def sort(prompt: str, completion: Callable[[str], any]) -> dict:\n \"\"\"Sorts a list of concepts (strings) according to a given criterion\n\n Parameters\n ----------\n prompt : str\n A prompt string assembled from SortPrompt. Defines:\n 1. a list of concepts to be sorted,\n 2. a sort order (either ascending or descending)\n 3. a sort criterion.\n\n completion : Callable[[str], any]\n A completion function that accepts a prompt as a string and returns a json object\n\n Returns\n -------\n dict\n A json object as the output of the language model.\n The dict must contain at least two key-value pairs:\n 1. \"result\": [...the list of sorted concepts...]\n 2. \"reason\": \"...the reasoning provided for the sorting decisions...\"\n 3. (Optional) Further key-value pairs such as the sort order or the sort criterion\n \"\"\"\n logging.debug(\"pygape.sort: Sending prompt to completion API\")\n response = _run_prompt(prompt, completion) # return type: json object as a dict\n return response" }, { "identifier": "SortPrompt", "path": "pygape/pygape.py", "snippet": "class SortPrompt:\n system_role: str\n items: List[str]\n order: SortOrder\n criterion: str\n\n def to_str(self) -> str:\n items = \", \".join(self.items)\n prompt = \"\"\"\n ### Instructions\n You are {system_role}.\n Your task is to sort below list of items in {order} order according to {criterion}.\n Once you have sorted them, provide a reasoning for the sort order you have selected.\n Return the output as a JSON object in the format:\n {{\n \"result\": [\"item 1\", \"item 2\", ..., \"item n\"],\n \"sort_order\": \"EITHER ascending OR descending\",\n \"sort_criterion\": \"the criterion applied to sort\",\n \"reason\": \"description why you sorted all items accordigly\"\n }}\n ### Example\n List of items to sort: [\"house\", \"racing horse\", \"bicyle\", \"pizza\"]\n Sort order: ascending\n Sort criterion: purchasing price\n Expected output:\n {{\n \"result\": [\"pizza\", \"bicycle\", \"racing horse\", \"house\"],\n \"reason\": \"A house is more expensive than a racing horse. A racing horse is more expensive than a bicycle. A bicycle is more expensive than a pizza.\",\n \"sort_order\": \"ascending\",\n \"sort_criterion\": \"purchasing price\"\n }}\n ### Input\n {items}\"\"\".format(system_role=self.system_role, order=self.order.name, criterion=self.criterion, items=items) \n return textwrap.dedent(prompt).strip()" }, { "identifier": "SortOrder", "path": "pygape/pygape.py", "snippet": "class SortOrder(Enum):\n ascending = 1\n descending = -1" }, { "identifier": "filter", "path": "pygape/pygape.py", "snippet": "def filter(prompt: str, completion: Callable[[str], any]) -> dict:\n \"\"\"Filters a list of concepts (strings) according to a given criterion\n\n Parameters\n ----------\n prompt : str\n A prompt string assembled from FilterPrompt. Defines:\n 1. a list of concepts to be filtered,\n 2. a filter criterion.\n\n completion : Callable[[str], any]\n A completion function that accepts a prompt as a string and returns a json object\n\n Returns\n -------\n dict\n A json object as the output of the language model.\n The dict must contain at least two key-value pairs:\n 1. \"result\": [...the list of filtered concepts...]\n 2. \"reason\": \"[...a list of reasons for filter deicions...]\"\n 3. (Optional) Further key-value pairs such as the filter criterion applied\n \"\"\"\n logging.debug(\"pygape.filter: Sending prompt to completion API\")\n response = _run_prompt(prompt, completion) # return type: json object as a dict\n return response" }, { "identifier": "FilterPrompt", "path": "pygape/pygape.py", "snippet": "class FilterPrompt:\n system_role: str\n items: List[str]\n criterion: str\n\n def to_str(self) -> str:\n items = \", \".join(self.items)\n prompt = \"\"\"\n ### Instructions\n You are {system_role}.\n Your task is to filter below list of items according to {criterion}.\n Also provide a reason for each item that you kept why you did not filter it out.\n Return the output as a JSON object in the format:\n {{\n \"result\": [\"item 1\", \"item 2\", ..., \"item n\"],\n \"reason\": [\"reason to keep item 1\", \"reason to keep item 2\", ..., \"reason to keep item n\"],\n \"filter_criterion\": \"the criterion applied to filter\"\n }}\n ### Example\n List of items to filter: [\"house\", \"racing horse\", \"bicyle\", \"pizza\"]\n Filter criterion: a person can ride this object\n Expected output:\n {{\n \"result\": [\"pizza\", \"bicycle\", \"racing horse\", \"house\"],\n \"reason\": [\"a person cannot ride a pizza\", \"a person can ride a bidycle\", \"a person can ride a racing horse\", \"a person cannot ride a house\"],\n \"filter_criterion\": \"\" \n }}\n ### Input\n {items}\"\"\".format(system_role=self.system_role, criterion=self.criterion, items=items)\n return textwrap.dedent(prompt).strip()" }, { "identifier": "find", "path": "pygape/pygape.py", "snippet": "def find(prompt: str, completion: Callable[[str], any]) -> dict:\n \"\"\"Finds the first concept (list item) in a list of concepts that matches a certain criterion \n\n Parameters\n ----------\n prompt : str\n A prompt string assembled from FindPrompt. Defines:\n 1. a list of concepts to be searched,\n 2. a matching criterion.\n\n completion : Callable[[str], any]\n A completion function that accepts a prompt as a string and returns a json object\n\n Returns\n -------\n dict\n A json object as the output of the language model.\n The dict must contain at least two key-value pairs:\n 1. \"result\": \"the first concept (item) found matching in the list\"\n 2. \"reason\": \"the reasoning provided why the item matches\"\n 3. (Optional) Further key-value pairs such as the matching criterion applied\n \"\"\"\n logging.debug(\"pygape.find: Sending prompt to completion API\")\n response = _run_prompt(prompt, completion) # return type: json object as a dict\n return response" }, { "identifier": "FindPrompt", "path": "pygape/pygape.py", "snippet": "class FindPrompt:\n system_role: str\n items: List[str]\n criterion: str\n \n def to_str(self) -> str:\n items = \", \".join(self.items)\n prompt = \"\"\"\n ### Instructions\n You are {system_role}.\n Your task is to find the first item in the list that matches the criterion: {criterion}.\n Also provide a reason why you picked this item but not any of the other items in the list.\n Return the output as a JSON object in the format:\n {{\n \"result\": \"the first item found in the list that matches the given criterion\",\n \"reason\": \"reason why you picked this item and not any others prior to it\",\n \"matching_criterion\": \"the matching criterion applied\"\n }}\n ### Example\n List of items [\"Paris\", \"Rome\", \"Canberra\", \"Singapore\", \"Albuquerque\", \"Berlin\", \"London\", \"Krakow\", \"Dar Es Salaam\"]\n Criterion: This city is not a capital of its country.\n Expected output:\n {{\n \"result\": \"Albuquerque\",\n \"reason\": \"Albuquerque is the first item in the list that is not the capital of its country (USA). Also Krakow is not the capital of Poland, and Dar Es Salaam is not the capital of Tanzania, but they occurr later in the list.\",\n \"matching_criterion\": \"is not the capital city of the country it belongs to\" \n }}\n ### Input\n {items}\"\"\".format(system_role=self.system_role, criterion=self.criterion, items=items)\n return textwrap.dedent(prompt).strip() " }, { "identifier": "truthy", "path": "pygape/pygape.py", "snippet": "def truthy(prompt: str, completion: Callable[[str], any]) -> dict:\n \"\"\"Returns either True or False given a certain statement\n\n Parameters\n ----------\n prompt : str\n A prompt string assembled from TruthyPrompt. Defines:\n 1. a statement that can either be true or false\n\n completion : Callable[[str], any]\n A completion function that accepts a prompt as a string and returns a json object\n\n Returns\n -------\n dict\n A json object as the output of the language model.\n The dict must contain at least two key-value pairs:\n 1. \"result\": \"True\" OR \"False\"\n 2. \"reason\": \"the reasoning provided why the statement is true or false\"\n 3. (Optional) Further key-value pairs\n \"\"\"\n logging.debug(\"pygape.truthy: Sending prompt to completion API\")\n response = _run_prompt(prompt, completion) # return type: json object as a dict\n return response" }, { "identifier": "TruthyPrompt", "path": "pygape/pygape.py", "snippet": "class TruthyPrompt:\n system_role: str\n statement: str\n\n def to_str(self) -> str:\n prompt = \"\"\"\n ### Instructions\n You are {system_role}.\n Your task is to decide if the statement given below is true or false.\n Return the output as a JSON object in the format:\n {{\n \"result\": \"true OR false\",\n \"reason\": \"The reason for your decision\"\n }}\n ### Example\n Statement: Water freezes at 100 degrees Celsius.\n Expected output:\n {{\n \"result\": \"false\",\n \"reason\": \"Water freezes at 0 degree Celsious, but it boils at 100 degrees Celsius.\"\n }}\n ### Input\n {statement}\"\"\".format(system_role=self.system_role, statement=self.statement)\n return textwrap.dedent(prompt).strip()" }, { "identifier": "condition", "path": "pygape/pygape.py", "snippet": "def condition(prompt: str, completion: Callable[[str], any]) -> dict:\n \"\"\"Returns True if a given statement fulfills a given criterion, and False otherwise\n\n Parameters\n ----------\n prompt : str\n A prompt string assembled from ConditionPrompt. Defines:\n 1. a statement that is evaluated according to a given criterion\n 2. a criterion to apply to the given statement\n\n completion : Callable[[str], any]\n A completion function that accepts a prompt as a string and returns a json object\n\n Returns\n -------\n dict\n A json object as the output of the language model.\n The dict must contain at least two key-value pairs:\n 1. \"result\": \"True\" OR \"False\"\n 2. \"reason\": \"the reasoning provided why the statement is true or false\"\n 3. (Optional) Further key-value pairs such as the criterion applied\n \"\"\"\n logging.debug(\"pygape.condition: Sending prompt to completion API\")\n response = _run_prompt(prompt, completion) # return type: json object as a dict\n return response" }, { "identifier": "ConditionPrompt", "path": "pygape/pygape.py", "snippet": "class ConditionPrompt:\n system_role: str\n statement: str\n criterion: str\n\n def to_str(self) -> str:\n prompt = \"\"\"\n ### Instructions\n You are {system_role}.\n Your task is to decide if the statement given below fulfills the following criterion or not: {criterion}.\n Also provide a reason why the statement fulfills the criterion or not.\n Return the output as a JSON object in the format:\n {{\n \"result\": \"true OR false\",\n \"reason\": \"The reason for your decision\",\n \"criterion\": \"The criterion applied\"\n }}\n ### Example\n Statement: Lilies are delicate flowers - give them too much water and they wil die!\n Criterion: The statement contains reference to at least one type of plant, but there may also be references to other concepts.\n Expected output:\n {{\n \"result\": \"true\",\n \"reason\": \"The statement refers to lilies which are a type of plant.\",\n \"criterion\": \"Contains a reference to at least one type of plant\"\n }}\n ### Input\n {statement}\"\"\".format(system_role=self.system_role, statement=self.statement, criterion=self.criterion)\n return textwrap.dedent(prompt).strip()" } ]
import unittest import logging import os import openai import json from dotenv import load_dotenv from pygape.completion import openai_completion from pygape.pygape import \ sort, SortPrompt, SortOrder, \ filter, FilterPrompt, \ find, FindPrompt, \ truthy, TruthyPrompt, \ condition, ConditionPrompt
4,078
### # How to start: python -m unittest test.test_pygape.PyGapeTestCase -v ### class PyGapeTestCase(unittest.TestCase): @classmethod def setUpClass(cls): # TODO: Put this in a config file logging.basicConfig(filename='test_out.log', encoding='utf-8', level=logging.DEBUG) # Add parent path and .env file in root directory to the test case paths dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env') load_dotenv(dotenv_path=dotenv_path) openai.api_key = os.getenv("OPENAI_API_KEY") super().setUpClass() def test_sort(self): logging.info("################################ test_sort ################################ ") sort_params = SortPrompt( system_role = "a helpful assistant", items = ["cat", "rat", "mouse", "elephant", "fly", "tiger", "bacteria", "goldfish"], order = SortOrder.descending, criterion = "their physical weight" ) expected = ["elephant", "tiger", "cat", "rat", "mouse", "goldfish", "fly", "bacteria"] prompt = sort_params.to_str() json_response = sort(prompt, openai_completion) retrieved = json_response["result"] # type: list self.assertEqual(len(retrieved), len(expected)) for i in range(len(expected)): self.assertEqual(retrieved[i], expected[i]) def test_filter(self): logging.info("################################ test_filter ################################ ") filter_params = FilterPrompt( system_role = "a helpful assistant", items = ["cat", "rock", "house", "elephant", "airplane", "tiger", "bottle", "gold"], criterion = "whether they are inanimate. Only keep the animate ones" ) expected = ["cat", "elephant", "tiger"] prompt = filter_params.to_str() json_response = filter(prompt, openai_completion) filtered_items = [item.lower() for item in json_response["result"]] # Ensure all strings are lower case; type: list reasons_to_keep = json_response["reason"] # type: list self.assertEqual(len(filtered_items), len(expected)) for i in range(len(expected)): self.assertTrue(expected[i].lower() in filtered_items) # Make sure all strings are lower case def test_find(self): logging.info("################################ test_find ################################ ") find_params = FindPrompt( system_role = "a helpful assistant", items = ["Lise Meitner", "Marie Curie", "Chien-Shiung Wu", "Alice Augusta Ball", "Marilyn Monroe", "Katherine Johnson"], criterion = "this person is or was not a female scientist. In case there exist multiple people with the same name, count them as a female scientist" ) expected = "Marilyn Monroe" prompt = find_params.to_str() json_response = find(prompt, openai_completion) found_item = json_response["result"] # Ensure all strings are lower case; type: list reason = json_response["reason"] # type: list self.assertEqual(found_item, expected) def test_truthy(self): logging.info("################################ test_truthy ################################ ")
### # How to start: python -m unittest test.test_pygape.PyGapeTestCase -v ### class PyGapeTestCase(unittest.TestCase): @classmethod def setUpClass(cls): # TODO: Put this in a config file logging.basicConfig(filename='test_out.log', encoding='utf-8', level=logging.DEBUG) # Add parent path and .env file in root directory to the test case paths dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env') load_dotenv(dotenv_path=dotenv_path) openai.api_key = os.getenv("OPENAI_API_KEY") super().setUpClass() def test_sort(self): logging.info("################################ test_sort ################################ ") sort_params = SortPrompt( system_role = "a helpful assistant", items = ["cat", "rat", "mouse", "elephant", "fly", "tiger", "bacteria", "goldfish"], order = SortOrder.descending, criterion = "their physical weight" ) expected = ["elephant", "tiger", "cat", "rat", "mouse", "goldfish", "fly", "bacteria"] prompt = sort_params.to_str() json_response = sort(prompt, openai_completion) retrieved = json_response["result"] # type: list self.assertEqual(len(retrieved), len(expected)) for i in range(len(expected)): self.assertEqual(retrieved[i], expected[i]) def test_filter(self): logging.info("################################ test_filter ################################ ") filter_params = FilterPrompt( system_role = "a helpful assistant", items = ["cat", "rock", "house", "elephant", "airplane", "tiger", "bottle", "gold"], criterion = "whether they are inanimate. Only keep the animate ones" ) expected = ["cat", "elephant", "tiger"] prompt = filter_params.to_str() json_response = filter(prompt, openai_completion) filtered_items = [item.lower() for item in json_response["result"]] # Ensure all strings are lower case; type: list reasons_to_keep = json_response["reason"] # type: list self.assertEqual(len(filtered_items), len(expected)) for i in range(len(expected)): self.assertTrue(expected[i].lower() in filtered_items) # Make sure all strings are lower case def test_find(self): logging.info("################################ test_find ################################ ") find_params = FindPrompt( system_role = "a helpful assistant", items = ["Lise Meitner", "Marie Curie", "Chien-Shiung Wu", "Alice Augusta Ball", "Marilyn Monroe", "Katherine Johnson"], criterion = "this person is or was not a female scientist. In case there exist multiple people with the same name, count them as a female scientist" ) expected = "Marilyn Monroe" prompt = find_params.to_str() json_response = find(prompt, openai_completion) found_item = json_response["result"] # Ensure all strings are lower case; type: list reason = json_response["reason"] # type: list self.assertEqual(found_item, expected) def test_truthy(self): logging.info("################################ test_truthy ################################ ")
truthy_params = TruthyPrompt(
9
2023-11-13 21:47:18+00:00
8k
doodledood/chat-flock
examples/three_way_ai_conductor.py
[ { "identifier": "InMemoryChatDataBackingStore", "path": "chatflock/backing_stores/in_memory.py", "snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __init__(\n self, messages: Optional[List[ChatMessage]] = None, participants: Optional[List[ChatParticipant]] = None\n ):\n self.messages = messages or []\n self.participants = {participant.name: participant for participant in (participants or [])}\n self.last_message_id = None if len(self.messages) == 0 else self.messages[-1].id\n\n def get_messages(self) -> List[ChatMessage]:\n return self.messages\n\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime.datetime] = None) -> ChatMessage:\n self.last_message_id = self.last_message_id + 1 if self.last_message_id is not None else 1\n\n message = ChatMessage(\n id=self.last_message_id,\n sender_name=sender_name,\n content=content,\n timestamp=timestamp or datetime.datetime.now(),\n )\n\n self.messages.append(message)\n\n return message\n\n def clear_messages(self):\n self.messages = []\n self.last_message_id = None\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n participants = list(self.participants.values())\n active_participants = [\n participant for participant in participants if isinstance(participant, ActiveChatParticipant)\n ]\n\n return active_participants\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n participants = list(self.participants.values())\n participants = [\n participant for participant in participants if not isinstance(participant, ActiveChatParticipant)\n ]\n\n return participants\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if not isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if participant.name in self.participants:\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.participants[participant.name] = participant\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n if participant.name not in self.participants:\n raise ChatParticipantNotJoinedToChatError(participant.name)\n\n self.participants.pop(participant.name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return isinstance(participant, ActiveChatParticipant)\n\n return False\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return not isinstance(participant, ActiveChatParticipant)\n\n return False" }, { "identifier": "Chat", "path": "chatflock/base.py", "snippet": "class Chat:\n backing_store: ChatDataBackingStore\n renderer: ChatRenderer\n name: Optional[str] = None\n max_total_messages: Optional[int] = None\n hide_messages: bool = False\n\n def __init__(\n self,\n backing_store: ChatDataBackingStore,\n renderer: ChatRenderer,\n initial_participants: Optional[Sequence[ChatParticipant]] = None,\n name: Optional[str] = None,\n max_total_messages: Optional[int] = None,\n hide_messages: bool = False,\n ):\n if max_total_messages is not None and max_total_messages <= 0:\n raise ValueError(\"Max total messages must be None or greater than 0.\")\n\n self.backing_store = backing_store\n self.renderer = renderer\n self.name = name\n self.hide_messages = hide_messages\n self.max_total_messages = max_total_messages\n\n for i, participant in enumerate(initial_participants or []):\n self.add_participant(participant)\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if self.has_active_participant_with_name(participant.name) or self.has_non_active_participant_with_name(\n participant.name\n ):\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.backing_store.add_participant(participant)\n\n all_participants = (\n self.backing_store.get_active_participants() + self.backing_store.get_non_active_participants()\n )\n for participant in all_participants:\n participant.on_participant_joined_chat(chat=self, participant=participant)\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n self.backing_store.remove_participant(participant)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_participant_left_chat(chat=self, participant=participant)\n\n def add_message(self, sender_name: str, content: str) -> None:\n sender = self.backing_store.get_active_participant_by_name(sender_name)\n if sender is None:\n raise ChatParticipantNotJoinedToChatError(sender_name)\n\n message = self.backing_store.add_message(sender_name=sender_name, content=content)\n\n self.renderer.render_new_chat_message(chat=self, message=message)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_new_chat_message(chat=self, message=message)\n\n def get_messages(self) -> List[ChatMessage]:\n return self.backing_store.get_messages()\n\n def clear_messages(self):\n self.backing_store.clear_messages()\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n return self.backing_store.get_active_participants()\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n return self.backing_store.get_non_active_participants()\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n return self.backing_store.get_active_participant_by_name(name=name)\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n return self.backing_store.get_non_active_participant_by_name(name=name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_active_participant_with_name(participant_name=participant_name)\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_non_active_participant_with_name(participant_name=participant_name)\n\n @property\n def active_participants_str(self):\n return \"\\n\\n\".join([participant.detailed_str() for participant in self.get_active_participants()])" }, { "identifier": "LangChainBasedAIChatConductor", "path": "chatflock/conductors/langchain.py", "snippet": "class LangChainBasedAIChatConductor(ChatConductor):\n def __init__(\n self,\n chat_model: BaseChatModel,\n goal: str = \"No explicit goal provided.\",\n composition_generator: Optional[ChatCompositionGenerator] = None,\n interaction_schema: Optional[str] = None,\n retriever: Optional[BaseRetriever] = None,\n spinner: Optional[Halo] = None,\n tools: Optional[List[BaseTool]] = None,\n chat_model_args: Optional[Dict[str, Any]] = None,\n ):\n self.chat_model = chat_model\n self.chat_model_args = chat_model_args or {}\n self.goal = goal\n self.tools = tools\n self.retriever = retriever\n self.composition_generator = composition_generator\n self.interaction_schema = interaction_schema\n self.spinner = spinner\n\n self.composition_initialized = False\n\n def create_next_speaker_system_prompt(self, chat: \"Chat\") -> str:\n chat_messages = chat.get_messages()\n\n if self.retriever is not None and len(chat_messages) > 0:\n relevant_docs = self.get_relevant_docs(messages=chat_messages)\n else:\n relevant_docs = []\n\n system_message = StructuredString(\n sections=[\n Section(\n name=\"Mission\",\n text=\"Select the next speaker in the conversation based on the previous messages in the \"\n \"conversation and an optional INTERACTION SCHEMA. If it seems to you that the chat \"\n \"should end instead of selecting a next speaker, terminate it.\",\n ),\n Section(name=\"Rules\", list=[\"You can only select one of the participants in the group chat.\"]),\n Section(\n name=\"Process\",\n list=[\n \"Look at the last message in the conversation and determine who should speak next based on the \"\n \"INTERACTION SCHEMA, if provided.\",\n \"If you determine that the chat should end, you should return the \"\n \"string TERMINATE instead of a participant name. For example, when the goal has been achieved, \"\n \", it is impossible to reach, or if the user asks to terminate the chat.\",\n ],\n ),\n Section(\n name=\"Input\",\n list=[\n \"Chat goal\",\n \"Currently active participants in the conversation\",\n \"Speaker interaction schema\",\n \"Previous messages from the conversation\",\n ],\n ),\n Section(\n name=\"Output\",\n text=\"The name of the next speaker in the conversation. Or, TERMINATE if the chat should end, \"\n \"instead.\",\n ),\n Section(name=\"Example Outputs\", list=['\"John\"', '\"TERMINATE\"']),\n Section(\n name=\"Additional Context for Selection\",\n text=\"None\"\n if len(relevant_docs) == 0\n else \"The following documents may be relevant for your selection of the \"\n \"next speaker, only use them for context for a better response, \"\n \"if applicable\",\n sub_sections=[\n Section(name=f\"Document {i + 1}\", text=f\"```{doc.page_content}```\")\n for i, doc in enumerate(relevant_docs)\n ],\n ),\n ]\n )\n\n return str(system_message)\n\n def create_next_speaker_first_human_prompt(self, chat: \"Chat\", goal: str) -> str:\n messages = chat.get_messages()\n messages_list = [f\"- {message.sender_name}: {message.content}\" for message in messages]\n\n participants = chat.get_active_participants()\n\n prompt = StructuredString(\n sections=[\n Section(name=\"Goal\", text=goal or \"No explicit goal provided.\"),\n Section(\n name=\"Currently Active Participants\", list=[f\"{str(participant)}\" for participant in participants]\n ),\n Section(\n name=\"Interaction Schema\",\n text=self.interaction_schema or \"Not provided. Use your best judgement.\",\n ),\n Section(\n name=\"Chat Messages\",\n text=\"No messages yet.\" if len(messages_list) == 0 else None,\n list=messages_list if len(messages_list) > 0 else [],\n ),\n ]\n )\n\n return str(prompt)\n\n def prepare_chat(self, chat: \"Chat\", **kwargs: Any) -> None:\n # If a composition generator is provided, generate a new composition for the chat before starting.\n if self.composition_generator is not None and not self.composition_initialized:\n composition_suggestion = kwargs.get(\"composition_suggestion\", None)\n new_composition = self.composition_generator.generate_composition_for_chat(\n chat=chat,\n goal=self.goal,\n composition_suggestion=composition_suggestion,\n interaction_schema=self.interaction_schema,\n )\n\n # Sync participants with the new composition.\n current_active_participants = chat.get_active_participants()\n new_participants_names = {p.name for p in new_composition.participants}\n\n for participant in new_composition.participants:\n # Add missing participants.\n if not chat.has_active_participant_with_name(participant.name):\n chat.add_participant(participant)\n continue\n\n # Remove other participants not mentioned in the new composition.\n if participant.name not in new_participants_names:\n chat.remove_participant(participant)\n\n self.interaction_schema = new_composition.participants_interaction_schema\n\n self.composition_initialized = True\n\n super().prepare_chat(chat=chat, **kwargs)\n\n def select_next_speaker(self, chat: Chat) -> Optional[ActiveChatParticipant]:\n participants = chat.get_active_participants()\n if len(participants) == 0:\n return None\n\n if self.spinner is not None:\n if chat.name is None:\n self.spinner.start(text=\"The Chat Conductor is selecting the next speaker...\")\n else:\n self.spinner.start(text=f\"The Chat Conductor ({chat.name}) is selecting the next speaker...\")\n\n # Ask the AI to select the next speaker.\n messages = [\n SystemMessage(content=self.create_next_speaker_system_prompt(chat=chat)),\n HumanMessage(content=self.create_next_speaker_first_human_prompt(chat=chat, goal=self.goal)),\n ]\n\n result = self.execute_messages(messages=messages)\n next_speaker_name = result.strip()\n\n while not chat.has_active_participant_with_name(next_speaker_name) and next_speaker_name != \"TERMINATE\":\n messages.append(AIMessage(content=next_speaker_name))\n messages.append(\n HumanMessage(\n content=f'Speaker \"{next_speaker_name}\" is not a participant in the chat. Choose another one.'\n )\n )\n\n result = self.execute_messages(messages=messages)\n next_speaker_name = result.strip()\n\n if next_speaker_name == \"TERMINATE\":\n if self.spinner is not None:\n if chat.name is None:\n self.spinner.stop_and_persist(\n symbol=\"👥\", text=\"The Chat Conductor has decided to terminate the chat.\"\n )\n else:\n self.spinner.stop_and_persist(\n symbol=\"👥\", text=f\"The Chat Conductor ({chat.name}) has decided to terminate the \" f\"chat.\"\n )\n\n return None\n\n next_speaker = chat.get_active_participant_by_name(next_speaker_name)\n if next_speaker is None:\n raise ChatParticipantNotJoinedToChatError(next_speaker_name)\n\n if self.spinner is not None:\n if chat.name is None:\n self.spinner.succeed(\n text=f'The Chat Conductor has selected \"{str(next_speaker)}\" ' f\"as the next speaker.\"\n )\n else:\n self.spinner.succeed(\n text=f'The Chat Conductor ({chat.name}) has selected \"{str(next_speaker)}\" ' f\"as the next speaker.\"\n )\n\n return next_speaker\n\n def execute_messages(self, messages: Sequence[BaseMessage]) -> str:\n return execute_chat_model_messages(\n messages=messages,\n chat_model=self.chat_model,\n tools=self.tools,\n spinner=self.spinner,\n chat_model_args=self.chat_model_args,\n )\n\n def get_relevant_docs(self, messages: Sequence[ChatMessage]) -> List[Document]:\n if self.retriever is None:\n return []\n\n return self.retriever.get_relevant_documents(query=messages[-1].content)" }, { "identifier": "LangChainBasedAIChatParticipant", "path": "chatflock/participants/langchain.py", "snippet": "class LangChainBasedAIChatParticipant(ActiveChatParticipant):\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self,\n name: str,\n chat_model: BaseChatModel,\n symbol: str = \"🤖\",\n role: str = \"AI Assistant\",\n personal_mission: str = \"Be a helpful AI assistant.\",\n other_prompt_sections: Optional[List[Section]] = None,\n retriever: Optional[BaseRetriever] = None,\n tools: Optional[List[BaseTool]] = None,\n chat_model_args: Optional[Dict[str, Any]] = None,\n spinner: Optional[Halo] = None,\n ignore_group_chat_environment: bool = False,\n include_timestamp_in_messages: bool = False,\n **kwargs: Any,\n ):\n super().__init__(name=name, symbol=symbol, **kwargs)\n\n self.role = role\n self.chat_model = chat_model\n self.chat_model_args = chat_model_args or {}\n self.other_prompt_sections = other_prompt_sections or []\n self.ignore_group_chat_environment = ignore_group_chat_environment\n self.include_timestamp_in_messages = include_timestamp_in_messages\n self.retriever = retriever\n self.tools = tools\n self.spinner = spinner\n self.personal_mission = personal_mission\n\n def create_system_message(self, chat: \"Chat\", relevant_docs: Sequence[Document]) -> str:\n now = datetime.now()\n pretty_datetime = now.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n base_sections = [\n Section(name=\"Current Time\", text=pretty_datetime),\n Section(name=\"Name\", text=self.name),\n Section(name=\"Role\", text=self.role),\n Section(name=\"Personal Mission\", text=self.personal_mission),\n Section(\n name=\"Additional Context for Response\",\n text=\"None\"\n if len(relevant_docs) == 0\n else \"The following documents may be relevant for your response, only use \"\n \"them for context for a better response, if applicable\",\n sub_sections=[\n Section(name=f\"Document {i + 1}\", text=f\"```{doc.page_content}```\")\n for i, doc in enumerate(relevant_docs)\n ],\n ),\n Section(\n name=\"Response Message Format\",\n list=[\n \"Your response should be the message you want to send to the group chat as your own name, \"\n \"role, and personal mission.\",\n \"Must not include any prefix (e.g., timestamp, sender name, etc.).\",\n \"Response must be a message as will be shown in the chat (timestamp and sender name are \"\n \"system-generated for you).\",\n ],\n sub_sections=[\n Section(name=\"Well-Formatted Chat Response Examples\", list=['\"Hello, how are you?\"']),\n Section(\n name=\"Badly-Formatted Chat Response Examples\",\n list=[\n (\n '\"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else '\"John: Hello, how are you?\"'\n ),\n ],\n ),\n ],\n ),\n ]\n\n active_participants = chat.get_active_participants()\n if self.ignore_group_chat_environment:\n system_message = StructuredString(sections=[*base_sections, *self.other_prompt_sections])\n else:\n system_message = StructuredString(\n sections=[\n *base_sections,\n Section(\n name=\"Chat\",\n sub_sections=[\n Section(name=\"Name\", text=chat.name or \"No name provided. Just a general chat.\"),\n Section(\n name=\"Participants\",\n text=\"\\n\".join(\n [\n f'- {str(p)}{\" -> This is you.\" if p.name == self.name else \"\"}'\n for p in active_participants\n ]\n ),\n ),\n Section(\n name=\"Guidelines\",\n list=[\n \"Your personal mission is the most important thing to you. You should always \"\n \"prioritize it.\",\n \"If a chat goal is provided, you should still follow your personal mission but \"\n \"in a way that helps the group achieve the chat goal.\",\n \"If you are the only participant in the chat, you should act as if the chat is now \"\n \"a scratch pad for you to write down your thoughts, ideas, and work on your \"\n \"mission by yourself. \"\n \"In the messages do not refer to another entity, but rather to yourself \"\n \"(I instead of You); the messages should read and sound like \"\n \"your internal thoughts and should be succinct, unless they are concrete work \"\n \"(for example, implementing something, calculating things, etc.). \"\n \"You have all the time in the world to build your thoughts, ideas, and do the \"\n \"work needed. The chat is now your place to think and iterate on your mission and \"\n \" achieve it.\",\n ],\n ),\n Section(\n name=\"Rules\",\n list=[\n \"You do not have to respond directly to the one who sent you a message. You can respond \"\n \"to anyone in the group chat.\",\n \"You cannot have private conversations with other participants. Everyone can see all \"\n \"messages sent by all other participants.\",\n ],\n ),\n Section(\n name=\"Previous Chat Messages\",\n list=[\n \"Messages are prefixed by a timestamp and the sender's name (could also be everyone). \",\n \"The prefix is for context only; it's not actually part of the message they sent. \",\n (\n 'Example: \"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else 'Example: \"John: Hello, how are you?\"'\n ),\n \"Some messages could have been sent by participants who are no longer a part of this \"\n \"conversation. Use their contents for context only; do not talk to them.\",\n \"In your response only include the message without the prefix.\",\n \"If you are the only participant in the chat, the previous chat messages are your \"\n \" memories or internal thoughts instead.\",\n ],\n ),\n ],\n ),\n *self.other_prompt_sections,\n ]\n )\n\n return str(system_message)\n\n def chat_messages_to_chat_model_messages(\n self, chat_messages: Sequence[ChatMessage], active_participants: Sequence[ActiveChatParticipant]\n ) -> List[BaseMessage]:\n messages: List[BaseMessage] = []\n for i, message in enumerate(chat_messages):\n if self.include_timestamp_in_messages:\n pretty_datetime = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n content = f\"[{pretty_datetime}] \"\n else:\n content = \"\"\n\n if self.ignore_group_chat_environment:\n content += f\"{message.sender_name}: {message.content}\"\n else:\n content += message.content\n\n if message.sender_name == self.name:\n if len(active_participants) > 1 or i == len(active_participants) - 1:\n messages.append(AIMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n\n if len(messages) == 0:\n messages.append(HumanMessage(content=f\"SYSTEM: The chat has started.\"))\n\n return messages\n\n def respond_to_chat(self, chat: Chat) -> str:\n if self.spinner is not None:\n self.spinner.start(text=f\"{str(self)} is thinking...\")\n\n chat_messages = chat.get_messages()\n\n if self.retriever is not None and len(chat_messages) > 0:\n relevant_docs = self.get_relevant_docs(messages=chat_messages)\n else:\n relevant_docs = []\n\n system_message = self.create_system_message(chat=chat, relevant_docs=relevant_docs)\n\n active_participants = chat.get_active_participants()\n all_messages = self.chat_messages_to_chat_model_messages(chat_messages, active_participants)\n all_messages = [SystemMessage(content=system_message), *all_messages]\n\n message_content = self.execute_messages(messages=all_messages)\n\n if self.spinner is not None:\n self.spinner.stop()\n\n potential_prefix = f\"{self.name}:\"\n if message_content.startswith(potential_prefix):\n message_content = message_content[len(potential_prefix) :].strip()\n\n return message_content\n\n def get_relevant_docs(self, messages: Sequence[ChatMessage]) -> List[Document]:\n if self.retriever is None:\n return []\n\n return self.retriever.get_relevant_documents(query=messages[-1].content)\n\n def execute_messages(self, messages: Sequence[BaseMessage]) -> str:\n return execute_chat_model_messages(\n messages=messages,\n chat_model=self.chat_model,\n tools=self.tools,\n spinner=self.spinner,\n chat_model_args=self.chat_model_args,\n )\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n\n tool_names = \", \".join([tool.name for tool in self.tools or []])\n if tool_names == \"\":\n tool_names = \"None\"\n\n return (\n f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\\n\"\n f'{prefix} Personal Mission: \"{self.personal_mission}\"\\n{prefix} Tools: {tool_names}'\n )" }, { "identifier": "UserChatParticipant", "path": "chatflock/participants/user.py", "snippet": "class UserChatParticipant(ActiveChatParticipant):\n def __init__(self, name: str = \"User\", role: str = \"User\", symbol: str = \"👤\", **kwargs: Any):\n super().__init__(name, messages_hidden=True, **kwargs)\n\n self.role = role\n self.symbol = symbol\n\n def respond_to_chat(self, chat: Chat) -> str:\n return input(f\"{self.symbol} ({self.name}): \")\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n return f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\"" }, { "identifier": "TerminalChatRenderer", "path": "chatflock/renderers/terminal.py", "snippet": "class TerminalChatRenderer(ChatRenderer):\n def __init__(self, print_timestamps: bool = False):\n self.print_timestamps = print_timestamps\n\n def render_new_chat_message(self, chat: Chat, message: ChatMessage) -> None:\n if chat.hide_messages:\n return\n\n pretty_timestamp_with_date = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n sender = chat.get_active_participant_by_name(message.sender_name)\n if sender is None:\n symbol = \"❓\"\n\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {symbol} {message.sender_name}: {message.content}\")\n else:\n print(f\"{symbol} {message.sender_name}: {message.content}\")\n else:\n if sender.messages_hidden:\n return\n\n if chat.name is None:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {str(sender)}: {message.content}\")\n else:\n print(f\"{str(sender)}: {message.content}\")\n else:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {chat.name} > {str(sender)}: {message.content}\")\n else:\n print(f\"{chat.name} > {str(sender)}: {message.content}\")" }, { "identifier": "create_chat_model", "path": "examples/common.py", "snippet": "def create_chat_model(\n model: str = \"gpt-4-1106-preview\",\n temperature: float = 0.0,\n cache_db_file_path: Optional[str] = \"output/llm_cache.db\",\n) -> BaseChatModel:\n if cache_db_file_path is not None:\n Path(cache_db_file_path).parent.mkdir(parents=True, exist_ok=True)\n\n set_llm_cache(SQLiteCache(database_path=cache_db_file_path))\n\n chat_model = ChatOpenAI(temperature=temperature, model=model)\n\n return chat_model" } ]
import typer from dotenv import load_dotenv from halo import Halo from chatflock.backing_stores.in_memory import InMemoryChatDataBackingStore from chatflock.base import Chat from chatflock.conductors.langchain import LangChainBasedAIChatConductor from chatflock.participants.langchain import LangChainBasedAIChatParticipant from chatflock.participants.user import UserChatParticipant from chatflock.renderers.terminal import TerminalChatRenderer from examples.common import create_chat_model
7,092
def three_way_ai_conductor(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None: chat_model = create_chat_model(model=model, temperature=temperature) spinner = Halo(spinner="dots") bartender = LangChainBasedAIChatParticipant( name="Johnny", role="Bartender", personal_mission="You are a bartender at a Cafe called 'Coffee Time'. You are a friendly guy who likes to " "chat with customers. You should collaborate with the Cook when the customer asks for food. " "You are the one in front, greeting the customer.", chat_model=chat_model, spinner=spinner, ) cook = LangChainBasedAIChatParticipant( name="Greg", role="Cook", personal_mission="You are a cook at a Cafe called 'Coffee Time'. You are an impatient and serious guy who " "doesn't like to chat with customers. You should collaborate with the Bartender when the " "customer asks for food. You are the one in the back, preparing the food.", chat_model=chat_model, spinner=spinner, ) user = UserChatParticipant(name="User") participants = [user, bartender, cook] chat = Chat( backing_store=InMemoryChatDataBackingStore(),
def three_way_ai_conductor(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None: chat_model = create_chat_model(model=model, temperature=temperature) spinner = Halo(spinner="dots") bartender = LangChainBasedAIChatParticipant( name="Johnny", role="Bartender", personal_mission="You are a bartender at a Cafe called 'Coffee Time'. You are a friendly guy who likes to " "chat with customers. You should collaborate with the Cook when the customer asks for food. " "You are the one in front, greeting the customer.", chat_model=chat_model, spinner=spinner, ) cook = LangChainBasedAIChatParticipant( name="Greg", role="Cook", personal_mission="You are a cook at a Cafe called 'Coffee Time'. You are an impatient and serious guy who " "doesn't like to chat with customers. You should collaborate with the Bartender when the " "customer asks for food. You are the one in the back, preparing the food.", chat_model=chat_model, spinner=spinner, ) user = UserChatParticipant(name="User") participants = [user, bartender, cook] chat = Chat( backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
5
2023-11-12 11:10:58+00:00
8k
yfqiu-nlp/temporal-llms
denoising_event_lm/data/dataset_readers/event_lm/event_seq2seq_transformer_reader.py
[ { "identifier": "Seq2SeqTransformerReader", "path": "denoising_event_lm/data/dataset_readers/seq2seq/seq2seq_transformer_reader.py", "snippet": "class Seq2SeqTransformerReader(DatasetReader):\n \"\"\"\n Reads a Pickle QA file and returns a ``Dataset`` where the ``Instances`` have four\n fields:\n * ``question_with_context``, a ``TextField`` that contains the concatenation of question and context,\n * ``answer_span``, a ``SpanField`` into the ``question`` ``TextField`` denoting the answer.\n * ``context_span`` a ``SpanField`` into the ``question`` ``TextField`` denoting the context, i.e., the part of\n the text that potential answers can come from.\n * A ``MetadataField`` that stores the instance's ID, the original question, the original passage text, both of\n these in tokenized form, and the gold answer strings, accessible as ``metadata['id']``,\n ``metadata['question']``, ``metadata['context']``, ``metadata['question_tokens']``,\n ``metadata['context_tokens']``, and ``metadata['answers']. This is so that we can more easily use the\n official SQuAD evaluation script to get metrics.\n Parameters\n ----------\n transformer_model_name : ``str``, optional (default=``bert-base-cased``)\n This reader chooses tokenizer and token indexer according to this setting.\n length_limit : ``int``, optional (default=self._tokenizer.model_max_length)\n We will make sure that the length of all input text never exceeds this many word pieces.\n stride : ``int``, optional (default=-1)\n If this is -1, we truncate the context as specified when calling ``self.tokenizer.encode``.\n Otherwise, when context are too long for the length limit, we emit multiple instances for one question,\n where the context is shifted. This parameter specifies the overlap between the shifted context window.\n truncation_strategy : `str`, optional (default=`'longest_first'`)\n String selected in the following options:\n - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length\n starting from the longest one at each token (when there is a pair of input sequences)\n - 'only_first': Only truncate the first sequence\n - 'only_second': Only truncate the second sequence\n - 'do_not_truncate': Do not truncate (raise an error if the input sequence is longer than max_length)\n test_mode : ``bool``, optional (default=True)\n whether we are in the test mode.\n context_prefix : ``str``, optional (default=\"\")\n the string to prepend on context. Mainly for T5 models.\n question_prefix : ``str``, optional (default=\"\")\n the string to prepend on question. Mainly for T5 models.\n target_suffix : ``str``, optional (default=\"\")\n the string to append on target. Mainly for T5 models.\n \"\"\"\n\n def __init__(\n self,\n tokenizer_model_name: str,\n tokenizer_kwargs: Optional[Dict[str, Any]] = None,\n lowercase: bool = False,\n length_limit: Optional[int] = None,\n truncation_strategy: str = \"longest_first\",\n test_mode: bool = False,\n source_prefix: str = \"\",\n target_prefix: str = \"\",\n target_suffix: str = \"\",\n task_specific_args: Optional[Dict[str, Any]] = None,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self._tokenizer = get_huggingface_tokenizer(tokenizer_model_name, **(tokenizer_kwargs or {}))\n self._lowercase = lowercase\n self._length_limit = length_limit or self._tokenizer.model_max_length # since truncation in tokenizer will consider added special tokens\n self._truncation_strategy = truncation_strategy\n self._test_mode = test_mode\n\n self._source_prefix = source_prefix if len(source_prefix) == 0 else (source_prefix + ('' if source_prefix[-1] == ' ' else ' '))\n self._target_prefix = target_prefix if len(target_prefix) == 0 else (target_prefix + ('' if target_prefix[-1] == ' ' else ' '))\n self._target_suffix = target_suffix if len(target_suffix) == 0 else (('' if target_suffix[0] == ' ' else ' ') + target_suffix)\n #self._source_prefix = source_prefix\n #self._target_prefix = target_prefix\n #self._target_suffix = target_suffix\n\n self._return_token_type_ids = \"token_type_ids\" in self._tokenizer.model_input_names\n self._return_attention_mask = \"attention_mask\" in self._tokenizer.model_input_names\n\n if len(self._source_prefix) > 0:\n self._source_prefix_tok_ids = self.tokenize_text(self._source_prefix, add_special_tokens=False, add_prefix_space=False)[\"input_ids\"]\n else:\n self._source_prefix_tok_ids = []\n\n # get default task-specific arguments for multi-task training\n self._default_task_specific_args = self.get_default_task_specific_args()\n self._task_specific_args = task_specific_args or {}\n self._task_specific_args[''] = self._default_task_specific_args\n\n @staticmethod\n def get_answer_strings(tokens, answer_tok_idxs):\n if answer_tok_idxs is not None:\n answers_str = [\" \".join(tokens[p] for p in idxs) for idxs in answer_tok_idxs]\n else:\n answers_str = None\n return answers_str\n\n def get_default_task_specific_args(self):\n # remember to deepcopy if needed\n default_args = {\n \"_length_limit\": self._length_limit\n }\n return default_args\n\n def set_task_specific_args(self, kwargs):\n for k, v in kwargs.items():\n assert hasattr(self, k)\n setattr(self, k, v)\n\n def preprocess_example(self, example, data_src):\n example['data_src'] = data_src\n # metadata\n if not \"doc_id\" in example:\n example[\"doc_id\"] = None\n # process source if needed\n if \"source\" in example:\n pass\n elif \"context\" in example:\n example[\"source\"] = example[\"context\"]\n # process answers if needed\n if \"answers_str\" in example:\n pass\n elif \"answers\" in example:\n example[\"answers_str\"] = example[\"answers\"]\n elif \"answer_tok_idxs\" in example:\n # get answer strings\n context_toks = example[\"context_tokens\"]\n answer_tok_idxs = example[\"answer_tok_idxs\"]\n answers_str = self.get_answer_strings(context_toks, answer_tok_idxs)\n example[\"answers_str\"] = answers_str\n\n\n @overrides\n def _read(self, file_path: str):\n if file_path[0] == '{' and file_path[-1] == '}':\n file_path_dict = json.loads(file_path)\n dataset = []\n for data_src, file_path in file_path_dict.items():\n file_params = self._task_specific_args[data_src]\n data_src_weight = file_params.pop('weight') if 'weight' in file_params else 1\n assert type(data_src_weight) == int\n # if `file_path` is a URL, redirect to the cache\n file_path = cached_path(file_path)\n\n self.set_task_specific_args(file_params)\n logger.info(\"Reading file at %s\", file_path)\n with open(file_path, 'rb') as dataset_file:\n cur_dataset = pickle.load(dataset_file)\n for example in cur_dataset:\n self.preprocess_example(example, data_src)\n logger.info(f\"Up-sample {data_src} dataset by {data_src_weight} ({len(cur_dataset)} -> {len(cur_dataset)*data_src_weight})\")\n dataset += cur_dataset * data_src_weight\n self.set_task_specific_args(self._default_task_specific_args)\n else:\n # if `file_path` is a URL, redirect to the cache\n file_path = cached_path(file_path)\n\n logger.info(\"Reading file at %s\", file_path)\n with open(file_path, 'rb') as dataset_file:\n dataset = pickle.load(dataset_file)\n for example in dataset:\n self.preprocess_example(example, '')\n # now allennlp's lazy dataset only works with unshuffle, so manually shuffle here.\n np.random.shuffle(dataset)\n logger.info(\"Reading the dataset\")\n\n # yield instances\n num_instances = 0\n num_valid_examples = 0\n examples_with_more_than_one_instance = 0\n self._instances_exceed_length_limit = 0\n for example in dataset:\n self.set_task_specific_args(self._task_specific_args[example['data_src']])\n instances = self.make_instances(example)\n instances_yielded = 0\n for instance in instances:\n yield instance\n num_instances += 1\n instances_yielded += 1\n num_valid_examples += 1\n if instances_yielded > 1:\n examples_with_more_than_one_instance += 1\n self.set_task_specific_args(self._default_task_specific_args)\n\n\n logger.info(\"Total instances yielded: %d\", num_instances)\n logger.info(\"%d (%.2f%%) examples have more than one instance\",\n examples_with_more_than_one_instance,\n 100 * examples_with_more_than_one_instance / num_valid_examples)\n logger.info(\"%d (%.2f%%) instances exceed the length limit\",\n self._instances_exceed_length_limit,\n 100 * self._instances_exceed_length_limit / num_instances)\n\n def tokenize_text(self, text, text_pair=None, add_special_tokens=True, add_prefix_space=False):\n # note: default set ``add_prefix_space`` to True.\n # This makes roberta-style encoders produce correct results when special tokens are added.\n encodes = self._tokenizer.encode_plus(\n text=text,\n text_pair=text_pair,\n add_special_tokens=add_special_tokens,\n max_length=self._length_limit,\n truncation=self._truncation_strategy,\n return_tensors=None,\n return_token_type_ids=self._return_token_type_ids,\n return_attention_mask=self._return_attention_mask,\n return_overflowing_tokens=False,\n return_special_tokens_mask=False,\n return_offsets_mapping=False,\n add_prefix_space=add_prefix_space\n )\n return encodes\n\n def make_single_instance(\n self,\n example: Dict[str, Any]\n ) -> Iterable[Instance]:\n\n source_str = example[\"source\"]\n answers_str = None if self._test_mode else example[\"answers_str\"]\n if self._lowercase:\n source_str = source_str.lower()\n answers_str = None if self._test_mode else [ans.lower() for ans in answers_str]\n _id = example[\"_id\"]\n doc_id = example[\"doc_id\"]\n data_src = example['data_src']\n\n # preprocess target strings. Mainly for T5 models.\n target_idx = 0\n target_str = None if answers_str is None else self._target_prefix + answers_str[target_idx] + self._target_suffix # use first one as the target\n # tokenize the target.\n target_encodes = None if answers_str is None else self.tokenize_text(target_str)\n\n additional_metadata = {\n \"raw_source_str\": source_str,\n \"source_str\": None,\n \"target_str\": self._tokenizer.decode(target_encodes[\"input_ids\"]),\n \"answers_str\": answers_str,\n \"_id\": _id,\n \"doc_id\": doc_id,\n \"data_src\": data_src,\n }\n\n source_str = self._source_prefix + source_str\n source_encodes = self.tokenize_text(source_str)\n additional_metadata[\"source_str\"] = self._tokenizer.decode(source_encodes[\"input_ids\"])\n instance = self.text_to_instance(\n source_encodes,\n target_encodes,\n deepcopy(additional_metadata),\n )\n return instance\n\n def make_instances(\n self,\n example: Dict[str, Any]\n ) -> Iterable[Instance]:\n\n yield self.make_single_instance(example)\n\n @overrides\n def text_to_instance(\n self, # type: ignore\n source_encodes: Dict[str, List[int]],\n target_encodes: Dict[str, List[int]] = None,\n additional_metadata: Dict[str, Any] = None,\n ) -> Instance:\n\n fields = {}\n\n # make the token_ids fields (array fields)\n pad_id = self._tokenizer.pad_token_id\n fields[\"source_tok_ids\"] = ArrayField(np.array(source_encodes[\"input_ids\"]), padding_value=pad_id, dtype=np.int64)\n if target_encodes is not None:\n fields[\"target_tok_ids\"] = ArrayField(np.array(target_encodes[\"input_ids\"]), padding_value=pad_id, dtype=np.int64)\n # make the token_type_ids fields (array fields)\n if self._return_token_type_ids:\n pad_id = self._tokenizer.pad_token_type_id\n fields[\"source_tok_type_ids\"] = ArrayField(np.array(source_encodes[\"token_type_ids\"]), padding_value=pad_id, dtype=np.int64)\n if target_encodes is not None:\n fields[\"target_tok_type_ids\"] = ArrayField(np.array(target_encodes[\"token_type_ids\"]), padding_value=pad_id, dtype=np.int64)\n if self._return_attention_mask:\n pad_id = 0\n fields[\"source_attention_mask\"] = ArrayField(np.array(source_encodes[\"attention_mask\"]), padding_value=pad_id, dtype=np.int64)\n if target_encodes is not None:\n fields[\"target_attention_mask\"] = ArrayField(np.array(target_encodes[\"attention_mask\"]), padding_value=pad_id, dtype=np.int64)\n\n '''\n print(\"source:\")\n print(source_encodes)\n print(self._tokenizer.decode(source_encodes[\"input_ids\"], skip_special_tokens=False, clean_up_tokenization_spaces=True))\n print(\"target:\")\n print(target_encodes)\n print(self._tokenizer.decode(target_encodes[\"input_ids\"], skip_special_tokens=False, clean_up_tokenization_spaces=True))\n print(\"meta\")\n print(json.dumps(additional_metadata, indent=2))\n print(\"---\"*20, '\\n')\n input()\n '''\n if len(source_encodes[\"input_ids\"]) >= self._length_limit:\n self._instances_exceed_length_limit += 1\n\n # make the metadata\n metadata = {}\n if additional_metadata is not None:\n metadata.update(additional_metadata)\n fields[\"metadata\"] = MetadataField(metadata)\n\n return Instance(fields) " }, { "identifier": "print_stat_with_posneg", "path": "denoising_event_lm/data/data_utils/event_lm/utils.py", "snippet": "def print_stat_with_posneg(data, print_func=print):\n print_func(\"Num Seq: {:d}\".format(len(data)))\n print_func(\"Avg Seq Length: {:5.3f}\".format(sum(len(d['varg_seq']) for d in data) / len(data)))\n print_func(\"Num POS Seq: {:d}\".format(len([d for d in data if d['label'] == 'POS'])))\n if len([d for d in data if d['label'] == 'POS']) > 0:\n print_func(\"Avg POS Seq Length: {:5.3f}\".format(sum(len(d['varg_seq']) for d in data if d['label'] == 'POS') / len([d for d in data if d['label'] == 'POS'])))\n else:\n print_func(\"Avg POS Seq Length: 0.\")\n print_func(\"Num NEG Seq: {:d}\".format(len([d for d in data if d['label'] == 'NEG'])))\n if len([d for d in data if d['label'] == 'NEG']) > 0:\n print_func(\"Avg NEG Seq Length: {:5.3f}\".format(sum(len(d['varg_seq']) for d in data if d['label'] == 'NEG') / len([d for d in data if d['label'] == 'NEG'])))\n else:\n print_func(\"Avg NEG Seq Length: 0.\")" }, { "identifier": "print_stat_chainlen", "path": "denoising_event_lm/data/data_utils/event_lm/utils.py", "snippet": "def print_stat_chainlen(data, print_func=print):\n ls = [len(d['varg_seq']) for d in data]\n for i in range(max(ls)+1):\n print_func(\"length {:d}: {:5.3f}%\".format(i, (sum([l == i for l in ls]) / len(ls)) * 100))" }, { "identifier": "EVENT_TAG", "path": "denoising_event_lm/utils/constants.py", "snippet": "EVENT_TAG = '<EVENT>'" }, { "identifier": "ARGS_TAG", "path": "denoising_event_lm/utils/constants.py", "snippet": "ARGS_TAG = '<ARGS>'" }, { "identifier": "POINTER_EVENT_TAGS", "path": "denoising_event_lm/utils/constants.py", "snippet": "POINTER_EVENT_TAGS = ['<EVENT{:d}>'.format(i) for i in range(30)]" } ]
import json, pickle import logging import numpy as np import random import pandas as pd from typing import Any, Dict, List, Tuple, Optional, Iterable from copy import deepcopy from allennlp.data.fields import MetadataField, ArrayField from overrides import overrides from allennlp.common.file_utils import cached_path from allennlp.data.dataset_readers.dataset_reader import DatasetReader from allennlp.data.instance import Instance from transformers import PreTrainedTokenizer from transformers.tokenization_auto import AutoTokenizer from denoising_event_lm.data.dataset_readers.seq2seq.seq2seq_transformer_reader import Seq2SeqTransformerReader from denoising_event_lm.data.data_utils.event_lm.utils import print_stat_with_posneg, print_stat_chainlen from denoising_event_lm.utils.constants import EVENT_TAG, ARGS_TAG, POINTER_EVENT_TAGS
4,719
logger = logging.getLogger(__name__) @DatasetReader.register("event_seq2seq_transformer_reader") class EventSeq2SeqTransformerReader(Seq2SeqTransformerReader): """ Reads a Pickle QA file and returns a ``Dataset`` where the ``Instances`` have four fields: * ``question_with_context``, a ``TextField`` that contains the concatenation of question and context, * ``answer_span``, a ``SpanField`` into the ``question`` ``TextField`` denoting the answer. * ``context_span`` a ``SpanField`` into the ``question`` ``TextField`` denoting the context, i.e., the part of the text that potential answers can come from. * A ``MetadataField`` that stores the instance's ID, the original question, the original passage text, both of these in tokenized form, and the gold answer strings, accessible as ``metadata['id']``, ``metadata['question']``, ``metadata['context']``, ``metadata['question_tokens']``, ``metadata['context_tokens']``, and ``metadata['answers']. This is so that we can more easily use the official SQuAD evaluation script to get metrics. Parameters ---------- transformer_model_name : ``str``, optional (default=``bert-base-cased``) This reader chooses tokenizer and token indexer according to this setting. length_limit : ``int``, optional (default=self._tokenizer.model_max_length) We will make sure that the length of all input text never exceeds this many word pieces. truncation_strategy : `str`, optional (default=`'longest_first'`) String selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Do not truncate (raise an error if the input sequence is longer than max_length) test_mode : ``bool``, optional (default=True) whether we are in the test mode. source_prefix : ``str``, optional (default="") the string to prepend on context. Mainly for T5 models. target_suffix : ``str``, optional (default="") the string to append on target. Mainly for T5 models. """ def __init__( self, tokenizer_model_name: str, tokenizer_kwargs: Optional[Dict[str, Any]] = None, lowercase: bool = False, length_limit: Optional[int] = None, truncation_strategy: str = "longest_first", test_mode: bool = False, source_prefix: str = "", target_prefix: str = "", target_suffix: str = "", task_specific_args: Optional[Dict[str, Any]] = None, event_sep: str = EVENT_TAG,
logger = logging.getLogger(__name__) @DatasetReader.register("event_seq2seq_transformer_reader") class EventSeq2SeqTransformerReader(Seq2SeqTransformerReader): """ Reads a Pickle QA file and returns a ``Dataset`` where the ``Instances`` have four fields: * ``question_with_context``, a ``TextField`` that contains the concatenation of question and context, * ``answer_span``, a ``SpanField`` into the ``question`` ``TextField`` denoting the answer. * ``context_span`` a ``SpanField`` into the ``question`` ``TextField`` denoting the context, i.e., the part of the text that potential answers can come from. * A ``MetadataField`` that stores the instance's ID, the original question, the original passage text, both of these in tokenized form, and the gold answer strings, accessible as ``metadata['id']``, ``metadata['question']``, ``metadata['context']``, ``metadata['question_tokens']``, ``metadata['context_tokens']``, and ``metadata['answers']. This is so that we can more easily use the official SQuAD evaluation script to get metrics. Parameters ---------- transformer_model_name : ``str``, optional (default=``bert-base-cased``) This reader chooses tokenizer and token indexer according to this setting. length_limit : ``int``, optional (default=self._tokenizer.model_max_length) We will make sure that the length of all input text never exceeds this many word pieces. truncation_strategy : `str`, optional (default=`'longest_first'`) String selected in the following options: - 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_length starting from the longest one at each token (when there is a pair of input sequences) - 'only_first': Only truncate the first sequence - 'only_second': Only truncate the second sequence - 'do_not_truncate': Do not truncate (raise an error if the input sequence is longer than max_length) test_mode : ``bool``, optional (default=True) whether we are in the test mode. source_prefix : ``str``, optional (default="") the string to prepend on context. Mainly for T5 models. target_suffix : ``str``, optional (default="") the string to append on target. Mainly for T5 models. """ def __init__( self, tokenizer_model_name: str, tokenizer_kwargs: Optional[Dict[str, Any]] = None, lowercase: bool = False, length_limit: Optional[int] = None, truncation_strategy: str = "longest_first", test_mode: bool = False, source_prefix: str = "", target_prefix: str = "", target_suffix: str = "", task_specific_args: Optional[Dict[str, Any]] = None, event_sep: str = EVENT_TAG,
args_sep: str = ARGS_TAG,
4
2023-11-14 11:57:28+00:00
8k
CryptoFuzzPy/cryptofuzz
cryptofuzz/utils.py
[ { "identifier": "b58encode", "path": "cryptofuzz/bs58.py", "snippet": "def b58encode(\n v: Union[str, bytes], alphabet: bytes = ALPHABET\n) -> bytes:\n \"\"\"\n Encode a string using Base58\n \"\"\"\n v = scrub_input(v)\n\n mainSize = len(v)\n v = v.lstrip(b'\\0')\n newSize = len(v)\n\n acc = int.from_bytes(v, byteorder='big') # first byte is most significant\n\n result = b58encode_int(acc, default_one=False, alphabet=alphabet)\n return alphabet[0:1] * (mainSize - newSize) + result" }, { "identifier": "b58decode", "path": "cryptofuzz/bs58.py", "snippet": "def b58decode(\n v: Union[str, bytes], alphabet: bytes = ALPHABET, *,\n autofix: bool = False\n) -> bytes:\n \"\"\"\n Decode a Base58 encoded string\n \"\"\"\n v = v.rstrip()\n v = scrub_input(v)\n\n mainSize = len(v)\n v = v.lstrip(alphabet[0:1])\n newSize = len(v)\n\n acc = b58decode_int(v, alphabet=alphabet, autofix=autofix)\n\n return acc.to_bytes(mainSize - newSize + (acc.bit_length() + 7) // 8, 'big')" }, { "identifier": "base58_check_encode", "path": "cryptofuzz/bs58.py", "snippet": "def base58_check_encode(payload, prefix=0x00):\n payload = bytes([prefix]) + payload\n checksum = sha256(sha256(payload).digest()).digest()[:4]\n return base58_encode(payload.hex() + checksum.hex())" }, { "identifier": "base58encodeCheck", "path": "cryptofuzz/bs58.py", "snippet": "def base58encodeCheck(prefix, payload):\n s = prefix + payload\n raw = sha256(sha256(s).digest()).digest()[:4]\n return base58encode(int.from_bytes(s + raw, 'big'))" }, { "identifier": "base58decode", "path": "cryptofuzz/bs58.py", "snippet": "def base58decode(raw):\n decoded = 0\n for char in raw:\n decoded = decoded * 58 + BASE58_ALPHABET.index(char)\n bytes_rep = decoded.to_bytes((decoded.bit_length() + 7) // 8, byteorder='big')\n landing = bytes_rep.lstrip(MAIN_DIGEST_RMD160)\n data_size = (len(raw) - len(landing))\n return MAIN_DIGEST_RMD160 * data_size + bytes_rep" }, { "identifier": "base58encode", "path": "cryptofuzz/bs58.py", "snippet": "def base58encode(num):\n if num == 0:\n return BASE58_ALPHABET[0]\n arr = []\n while num:\n num, rem = divmod(num, 58)\n arr.append(BASE58_ALPHABET[rem])\n arr.reverse()\n return ''.join(arr)" }, { "identifier": "MAIN_DIGEST_RMD160", "path": "cryptofuzz/assest.py", "snippet": "MAIN_DIGEST_RMD160 = b\"\\x00\"" }, { "identifier": "MAX_PRIVATE_KEY", "path": "cryptofuzz/assest.py", "snippet": "MAX_PRIVATE_KEY = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364140" }, { "identifier": "MAIN_PREFIX", "path": "cryptofuzz/assest.py", "snippet": "MAIN_PREFIX = b'\\x80'" }, { "identifier": "MAIN_SUFFIX", "path": "cryptofuzz/assest.py", "snippet": "MAIN_SUFFIX = b'\\x01'" }, { "identifier": "ZERO_BASE_NET", "path": "cryptofuzz/assest.py", "snippet": "ZERO_BASE_NET = \"0000000000000000000000000000000000000000000000000000000000000000\"" }, { "identifier": "VERSION_NETWORK", "path": "cryptofuzz/assest.py", "snippet": "VERSION_NETWORK = b'\\x04\\x88\\xAD\\xE4'" }, { "identifier": "BASE58_ALPHABET", "path": "cryptofuzz/assest.py", "snippet": "BASE58_ALPHABET = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'" }, { "identifier": "FINGERPRINT_RMD160", "path": "cryptofuzz/assest.py", "snippet": "FINGERPRINT_RMD160 = ZERO_BYTES" }, { "identifier": "COMPRESSED_PREFIX", "path": "cryptofuzz/assest.py", "snippet": "COMPRESSED_PREFIX = b'\\x03'" }, { "identifier": "COMPRESSED_PREFIX2", "path": "cryptofuzz/assest.py", "snippet": "COMPRESSED_PREFIX2 = b'\\x02'" }, { "identifier": "UNCOMPRESSED_PREFIX", "path": "cryptofuzz/assest.py", "snippet": "UNCOMPRESSED_PREFIX = b'\\x04'" }, { "identifier": "MAIN_DIGEST", "path": "cryptofuzz/assest.py", "snippet": "MAIN_DIGEST = b\"\\x00\"" }, { "identifier": "XPUB_PREFIX", "path": "cryptofuzz/assest.py", "snippet": "XPUB_PREFIX = \"0488B21E\"" }, { "identifier": "ZERO_BYTES", "path": "cryptofuzz/assest.py", "snippet": "ZERO_BYTES = b'\\x00\\x00\\x00\\x00'" }, { "identifier": "BIP39", "path": "cryptofuzz/assest.py", "snippet": "BIP39 = \"AbandonAbilityAbleAboutAboveAbsentAbsorbAbstractAbsurdAbuseAccessAccidentAccountAccuseAchieveAcidAcousticAcquireAcrossActActionActorActressActualAdaptAddAddictAddressAdjustAdmitAdultAdvanceAdviceAerobicAffairAffordAfraidAgainAgeAgentAgreeAheadAimAirAirportAisleAlarmAlbumAlcoholAlertAlienAllAlleyAllowAlmostAloneAlphaAlreadyAlsoAlterAlwaysAmateurAmazingAmongAmountAmusedAnalystAnchorAncientAngerAngleAngryAnimalAnkleAnnounceAnnualAnotherAnswerAntennaAntiqueAnxietyAnyApartApologyAppearAppleApproveAprilArchArcticAreaArenaArgueArmArmedArmorArmyAroundArrangeArrestArriveArrowArtArtefactArtistArtworkAskAspectAssaultAssetAssistAssumeAsthmaAthleteAtomAttackAttendAttitudeAttractAuctionAuditAugustAuntAuthorAutoAutumnAverageAvocadoAvoidAwakeAwareAwayAwesomeAwfulAwkwardAxisBabyBachelorBaconBadgeBagBalanceBalconyBallBambooBananaBannerBarBarelyBargainBarrelBaseBasicBasketBattleBeachBeanBeautyBecauseBecomeBeefBeforeBeginBehaveBehindBelieveBelowBeltBenchBenefitBestBetrayBetterBetweenBeyondBicycleBidBikeBindBiologyBirdBirthBitterBlackBladeBlameBlanketBlastBleakBlessBlindBloodBlossomBlouseBlueBlurBlushBoardBoatBodyBoilBombBoneBonusBookBoostBorderBoringBorrowBossBottomBounceBoxBoyBracketBrainBrandBrassBraveBreadBreezeBrickBridgeBriefBrightBringBriskBroccoliBrokenBronzeBroomBrotherBrownBrushBubbleBuddyBudgetBuffaloBuildBulbBulkBulletBundleBunkerBurdenBurgerBurstBusBusinessBusyButterBuyerBuzzCabbageCabinCableCactusCageCakeCallCalmCameraCampCanCanalCancelCandyCannonCanoeCanvasCanyonCapableCapitalCaptainCarCarbonCardCargoCarpetCarryCartCaseCashCasinoCastleCasualCatCatalogCatchCategoryCattleCaughtCauseCautionCaveCeilingCeleryCementCensusCenturyCerealCertainChairChalkChampionChangeChaosChapterChargeChaseChatCheapCheckCheeseChefCherryChestChickenChiefChildChimneyChoiceChooseChronicChuckleChunkChurnCigarCinnamonCircleCitizenCityCivilClaimClapClarifyClawClayCleanClerkCleverClickClientCliffClimbClinicClipClockClogCloseClothCloudClownClubClumpClusterClutchCoachCoastCoconutCodeCoffeeCoilCoinCollectColorColumnCombineComeComfortComicCommonCompanyConcertConductConfirmCongressConnectConsiderControlConvinceCookCoolCopperCopyCoralCoreCornCorrectCostCottonCouchCountryCoupleCourseCousinCoverCoyoteCrackCradleCraftCramCraneCrashCraterCrawlCrazyCreamCreditCreekCrewCricketCrimeCrispCriticCropCrossCrouchCrowdCrucialCruelCruiseCrumbleCrunchCrushCryCrystalCubeCultureCupCupboardCuriousCurrentCurtainCurveCushionCustomCuteCycleDadDamageDampDanceDangerDaringDashDaughterDawnDayDealDebateDebrisDecadeDecemberDecideDeclineDecorateDecreaseDeerDefenseDefineDefyDegreeDelayDeliverDemandDemiseDenialDentistDenyDepartDependDepositDepthDeputyDeriveDescribeDesertDesignDeskDespairDestroyDetailDetectDevelopDeviceDevoteDiagramDialDiamondDiaryDiceDieselDietDifferDigitalDignityDilemmaDinnerDinosaurDirectDirtDisagreeDiscoverDiseaseDishDismissDisorderDisplayDistanceDivertDivideDivorceDizzyDoctorDocumentDogDollDolphinDomainDonateDonkeyDonorDoorDoseDoubleDoveDraftDragonDramaDrasticDrawDreamDressDriftDrillDrinkDripDriveDropDrumDryDuckDumbDuneDuringDustDutchDutyDwarfDynamicEagerEagleEarlyEarnEarthEasilyEastEasyEchoEcologyEconomyEdgeEditEducateEffortEggEightEitherElbowElderElectricElegantElementElephantElevatorEliteElseEmbarkEmbodyEmbraceEmergeEmotionEmployEmpowerEmptyEnableEnactEndEndlessEndorseEnemyEnergyEnforceEngageEngineEnhanceEnjoyEnlistEnoughEnrichEnrollEnsureEnterEntireEntryEnvelopeEpisodeEqualEquipEraEraseErodeErosionErrorEruptEscapeEssayEssenceEstateEternalEthicsEvidenceEvilEvokeEvolveExactExampleExcessExchangeExciteExcludeExcuseExecuteExerciseExhaustExhibitExileExistExitExoticExpandExpectExpireExplainExposeExpressExtendExtraEyeEyebrowFabricFaceFacultyFadeFaintFaithFallFalseFameFamilyFamousFanFancyFantasyFarmFashionFatFatalFatherFatigueFaultFavoriteFeatureFebruaryFederalFeeFeedFeelFemaleFenceFestivalFetchFeverFewFiberFictionFieldFigureFileFilmFilterFinalFindFineFingerFinishFireFirmFirstFiscalFishFitFitnessFixFlagFlameFlashFlatFlavorFleeFlightFlipFloatFlockFloorFlowerFluidFlushFlyFoamFocusFogFoilFoldFollowFoodFootForceForestForgetForkFortuneForumForwardFossilFosterFoundFoxFragileFrameFrequentFreshFriendFringeFrogFrontFrostFrownFrozenFruitFuelFunFunnyFurnaceFuryFutureGadgetGainGalaxyGalleryGameGapGarageGarbageGardenGarlicGarmentGasGaspGateGatherGaugeGazeGeneralGeniusGenreGentleGenuineGestureGhostGiantGiftGiggleGingerGiraffeGirlGiveGladGlanceGlareGlassGlideGlimpseGlobeGloomGloryGloveGlowGlueGoatGoddessGoldGoodGooseGorillaGospelGossipGovernGownGrabGraceGrainGrantGrapeGrassGravityGreatGreenGridGriefGritGroceryGroupGrowGruntGuardGuessGuideGuiltGuitarGunGymHabitHairHalfHammerHamsterHandHappyHarborHardHarshHarvestHatHaveHawkHazardHeadHealthHeartHeavyHedgehogHeightHelloHelmetHelpHenHeroHiddenHighHillHintHipHireHistoryHobbyHockeyHoldHoleHolidayHollowHomeHoneyHoodHopeHornHorrorHorseHospitalHostHotelHourHoverHubHugeHumanHumbleHumorHundredHungryHuntHurdleHurryHurtHusbandHybridIceIconIdeaIdentifyIdleIgnoreIllIllegalIllnessImageImitateImmenseImmuneImpactImposeImproveImpulseInchIncludeIncomeIncreaseIndexIndicateIndoorIndustryInfantInflictInformInhaleInheritInitialInjectInjuryInmateInnerInnocentInputInquiryInsaneInsectInsideInspireInstallIntactInterestIntoInvestInviteInvolveIronIslandIsolateIssueItemIvoryJacketJaguarJarJazzJealousJeansJellyJewelJobJoinJokeJourneyJoyJudgeJuiceJumpJungleJuniorJunkJustKangarooKeenKeepKetchupKeyKickKidKidneyKindKingdomKissKitKitchenKiteKittenKiwiKneeKnifeKnockKnowLabLabelLaborLadderLadyLakeLampLanguageLaptopLargeLaterLatinLaughLaundryLavaLawLawnLawsuitLayerLazyLeaderLeafLearnLeaveLectureLeftLegLegalLegendLeisureLemonLendLengthLensLeopardLessonLetterLevelLiarLibertyLibraryLicenseLifeLiftLightLikeLimbLimitLinkLionLiquidListLittleLiveLizardLoadLoanLobsterLocalLockLogicLonelyLongLoopLotteryLoudLoungeLoveLoyalLuckyLuggageLumberLunarLunchLuxuryLyricsMachineMadMagicMagnetMaidMailMainMajorMakeMammalManManageMandateMangoMansionManualMapleMarbleMarchMarginMarineMarketMarriageMaskMassMasterMatchMaterialMathMatrixMatterMaximumMazeMeadowMeanMeasureMeatMechanicMedalMediaMelodyMeltMemberMemoryMentionMenuMercyMergeMeritMerryMeshMessageMetalMethodMiddleMidnightMilkMillionMimicMindMinimumMinorMinuteMiracleMirrorMiseryMissMistakeMixMixedMixtureMobileModelModifyMomMomentMonitorMonkeyMonsterMonthMoonMoralMoreMorningMosquitoMotherMotionMotorMountainMouseMoveMovieMuchMuffinMuleMultiplyMuscleMuseumMushroomMusicMustMutualMyselfMysteryMythNaiveNameNapkinNarrowNastyNationNatureNearNeckNeedNegativeNeglectNeitherNephewNerveNestNetNetworkNeutralNeverNewsNextNiceNightNobleNoiseNomineeNoodleNormalNorthNoseNotableNoteNothingNoticeNovelNowNuclearNumberNurseNutOakObeyObjectObligeObscureObserveObtainObviousOccurOceanOctoberOdorOffOfferOfficeOftenOilOkayOldOliveOlympicOmitOnceOneOnionOnlineOnlyOpenOperaOpinionOpposeOptionOrangeOrbitOrchardOrderOrdinaryOrganOrientOriginalOrphanOstrichOtherOutdoorOuterOutputOutsideOvalOvenOverOwnOwnerOxygenOysterOzonePactPaddlePagePairPalacePalmPandaPanelPanicPantherPaperParadeParentParkParrotPartyPassPatchPathPatientPatrolPatternPausePavePaymentPeacePeanutPearPeasantPelicanPenPenaltyPencilPeoplePepperPerfectPermitPersonPetPhonePhotoPhrasePhysicalPianoPicnicPicturePiecePigPigeonPillPilotPinkPioneerPipePistolPitchPizzaPlacePlanetPlasticPlatePlayPleasePledgePluckPlugPlungePoemPoetPointPolarPolePolicePondPonyPoolPopularPortionPositionPossiblePostPotatoPotteryPovertyPowderPowerPracticePraisePredictPreferPreparePresentPrettyPreventPricePridePrimaryPrintPriorityPrisonPrivatePrizeProblemProcessProduceProfitProgramProjectPromoteProofPropertyProsperProtectProudProvidePublicPuddingPullPulpPulsePumpkinPunchPupilPuppyPurchasePurityPurposePursePushPutPuzzlePyramidQualityQuantumQuarterQuestionQuickQuitQuizQuoteRabbitRaccoonRaceRackRadarRadioRailRainRaiseRallyRampRanchRandomRangeRapidRareRateRatherRavenRawRazorReadyRealReasonRebelRebuildRecallReceiveRecipeRecordRecycleReduceReflectReformRefuseRegionRegretRegularRejectRelaxReleaseReliefRelyRemainRememberRemindRemoveRenderRenewRentReopenRepairRepeatReplaceReportRequireRescueResembleResistResourceResponseResultRetireRetreatReturnReunionRevealReviewRewardRhythmRibRibbonRiceRichRideRidgeRifleRightRigidRingRiotRippleRiskRitualRivalRiverRoadRoastRobotRobustRocketRomanceRoofRookieRoomRoseRotateRoughRoundRouteRoyalRubberRudeRugRuleRunRunwayRuralSadSaddleSadnessSafeSailSaladSalmonSalonSaltSaluteSameSampleSandSatisfySatoshiSauceSausageSaveSayScaleScanScareScatterSceneSchemeSchoolScienceScissorsScorpionScoutScrapScreenScriptScrubSeaSearchSeasonSeatSecondSecretSectionSecuritySeedSeekSegmentSelectSellSeminarSeniorSenseSentenceSeriesServiceSessionSettleSetupSevenShadowShaftShallowShareShedShellSheriffShieldShiftShineShipShiverShockShoeShootShopShortShoulderShoveShrimpShrugShuffleShySiblingSickSideSiegeSightSignSilentSilkSillySilverSimilarSimpleSinceSingSirenSisterSituateSixSizeSkateSketchSkiSkillSkinSkirtSkullSlabSlamSleepSlenderSliceSlideSlightSlimSloganSlotSlowSlushSmallSmartSmileSmokeSmoothSnackSnakeSnapSniffSnowSoapSoccerSocialSockSodaSoftSolarSoldierSolidSolutionSolveSomeoneSongSoonSorrySortSoulSoundSoupSourceSouthSpaceSpareSpatialSpawnSpeakSpecialSpeedSpellSpendSphereSpiceSpiderSpikeSpinSpiritSplitSpoilSponsorSpoonSportSpotSpraySpreadSpringSpySquareSqueezeSquirrelStableStadiumStaffStageStairsStampStandStartStateStaySteakSteelStemStepStereoStickStillStingStockStomachStoneStoolStoryStoveStrategyStreetStrikeStrongStruggleStudentStuffStumbleStyleSubjectSubmitSubwaySuccessSuchSuddenSufferSugarSuggestSuitSummerSunSunnySunsetSuperSupplySupremeSureSurfaceSurgeSurpriseSurroundSurveySuspectSustainSwallowSwampSwapSwarmSwearSweetSwiftSwimSwingSwitchSwordSymbolSymptomSyrupSystemTableTackleTagTailTalentTalkTankTapeTargetTaskTasteTattooTaxiTeachTeamTellTenTenantTennisTentTermTestTextThankThatThemeThenTheoryThereTheyThingThisThoughtThreeThriveThrowThumbThunderTicketTideTigerTiltTimberTimeTinyTipTiredTissueTitleToastTobaccoTodayToddlerToeTogetherToiletTokenTomatoTomorrowToneTongueTonightToolToothTopTopicToppleTorchTornadoTortoiseTossTotalTouristTowardTowerTownToyTrackTradeTrafficTragicTrainTransferTrapTrashTravelTrayTreatTreeTrendTrialTribeTrickTriggerTrimTripTrophyTroubleTruckTrueTrulyTrumpetTrustTruthTryTubeTuitionTumbleTunaTunnelTurkeyTurnTurtleTwelveTwentyTwiceTwinTwistTwoTypeTypicalUglyUmbrellaUnableUnawareUncleUncoverUnderUndoUnfairUnfoldUnhappyUniformUniqueUnitUniverseUnknownUnlockUntilUnusualUnveilUpdateUpgradeUpholdUponUpperUpsetUrbanUrgeUsageUseUsedUsefulUselessUsualUtilityVacantVacuumVagueValidValleyValveVanVanishVaporVariousVastVaultVehicleVelvetVendorVentureVenueVerbVerifyVersionVeryVesselVeteranViableVibrantViciousVictoryVideoViewVillageVintageViolinVirtualVirusVisaVisitVisualVitalVividVocalVoiceVoidVolcanoVolumeVoteVoyageWageWagonWaitWalkWallWalnutWantWarfareWarmWarriorWashWaspWasteWaterWaveWayWealthWeaponWearWeaselWeatherWebWeddingWeekendWeirdWelcomeWestWetWhaleWhatWheatWheelWhenWhereWhipWhisperWideWidthWifeWildWillWinWindowWineWingWinkWinnerWinterWireWisdomWiseWishWitnessWolfWomanWonderWoodWoolWordWorkWorldWorryWorthWrapWreckWrestleWristWriteWrongYardYearYellowYouYoungYouthZebraZeroZoneZoo\"" } ]
import binascii import os, re, hashlib import random import struct import ecdsa from typing import Union from .bs58 import b58encode, b58decode, base58_check_encode, base58encodeCheck, base58decode, base58encode from hdwallet import HDWallet as HD_W from hdwallet.symbols import BTC, ETH, TRX, LTC, DOGE, DGB, BTG, RVN, QTUM, DASH, ZEC, BCH, AXE from mnemonic import Mnemonic from .assest import ( MAIN_DIGEST_RMD160, MAX_PRIVATE_KEY, MAIN_PREFIX, MAIN_SUFFIX, ZERO_BASE_NET, VERSION_NETWORK, BASE58_ALPHABET, FINGERPRINT_RMD160, COMPRESSED_PREFIX, COMPRESSED_PREFIX2, UNCOMPRESSED_PREFIX, MAIN_DIGEST, XPUB_PREFIX, ZERO_BYTES, BIP39 )
5,962
def mne_to_wif(self, mnemonic, compress: bool = False): seed = self.mne_to_seed(mnemonic) return self.bytes_to_wif(seed, compress) def mne_to_int(self, mnemonic): seed = self.mne_to_seed(mnemonic) return self.bytes_to_int(seed) def mne_to_xpub(self, mnemonic): seed = self.mne_to_seed(mnemonic) return self.bytes_to_xpub(seed) def mne_to_xprv(self, mnemonic): seed = self.mne_to_seed(mnemonic) return self.bytes_to_xprv(seed) def mne_to_addr(self, mnemonic, compress: bool = False): seed = self.mne_to_seed(mnemonic) return self.bytes_to_addr(seed, compress) def mne_to_binary(self, mnemonic): seed = self.mne_to_seed(mnemonic) return self.bytes_to_binary(seed) def bytes_to_mne(self, seed): return Mnemonic().to_mnemonic(seed) def bytes_to_seed(self, seed): return hashlib.pbkdf2_hmac('sha512', seed, b'mnemonic', 2048) def bytes_to_hex(self, seed): return binascii.hexlify(self.bytes_to_seed(seed)).decode('utf-8') def unHexlify(self, h: str): return binascii.unhexlify(h) def hex_to_bytes(self, hexed): return binascii.unhexlify(hexed) def hex_to_mne(self, hexed: str) -> str: seed = self.hex_to_bytes(hexed) return self.bytes_to_mne(seed) def hex_to_wif(self, hexed, compress: bool = False) -> str: return self.bytes_to_wif(self.hex_to_bytes(hexed), compress) def hex_to_xprv(self, hexed: str) -> str: return self.bytes_to_xprv(self.hex_to_bytes(hexed)) def hex_to_xpub(self, hexed: str) -> str: return self.bytes_to_xpub(self.hex_to_bytes(hexed)) def hex_to_int(self, hexed: str) -> int: return int(hexed, 16) def hex_to_pub(self, hexed: str, compress: bool = False) -> bytes: if compress: return self.bytes_to_public(self.hex_to_bytes(hexed), True) else: return self.bytes_to_public(self.hex_to_bytes(hexed), False) def hex_to_addr(self, hexed: str, compress: bool = False) -> str: seed = self.hex_to_bytes(hexed) if compress: return self.bytes_to_addr(seed, True) else: return self.bytes_to_addr(seed, False) def hex_to_binary(self, hexed: str) -> str: return self.bytes_to_binary(self.hex_to_bytes(hexed)) def bytes_to_hex(self, seed): privatekey_int = int.from_bytes(hashlib.sha256(seed).digest(), byteorder='big') self.gen.checkValid(privatekey_int) pvkByte = privatekey_int.to_bytes(32, byteorder='big') return pvkByte.hex() def bytes_to_int(self, seed) -> int: return int.from_bytes(seed, byteorder='big') def bytes_to_pub(self, seed_bytes: bytes) -> bytes: sk = ecdsa.SigningKey.from_string(seed_bytes[:32], curve=ecdsa.SECP256k1) vk = sk.get_verifying_key() pub = COMPRESSED_PREFIX2 + vk.to_string()[-32:] if vk.to_string()[-1] % 2 == 0 else b'\x03' + vk.to_string()[-32:] return pub def bytes_to_public(self, seed: bytes, compress: bool = True) -> bytes: sk = ecdsa.SigningKey.from_string(seed, curve=ecdsa.SECP256k1) vk = sk.get_verifying_key() if compress: prefix = COMPRESSED_PREFIX2 if vk.pubkey.point.y() % 2 == 0 else COMPRESSED_PREFIX return prefix + vk.to_string()[:32] else: return UNCOMPRESSED_PREFIX + vk.to_string() def bytes_to_xpub(self, seed: bytes, chain_code=None) -> str: if chain_code is None: chain_code = os.urandom(32) # .hex prefix = self.unHexlify(XPUB_PREFIX) FINGERPRINT = ZERO_BYTES + ZERO_BYTES pub = self.bytes_to_pub(seed) xpub = prefix + MAIN_DIGEST + FINGERPRINT + chain_code + pub Hash64 = self.double_sha256(xpub) xpub += Hash64[:4] xpubBase58 = b58encode(xpub) return xpubBase58.decode('utf-8') def bytes_to_mne(self, byte: bytes): seed = byte[:32] return Mnemonic("english").to_mnemonic(seed) def bytes_to_binary(self, bytes_: bytes) -> str: if len(bytes_) != 32: raise ValueError("Input bytes should have a length of 32.") # Convert each byte to its binary representation and pad with zeros return ''.join(format(byte, '08b') for byte in bytes_) def bytes_to_wif(self, private_key, compress=True): if compress:
class Generator: def __init__(self): super().__init__() def checkValid(self, key: int) -> bool: if 0 < key < MAX_PRIVATE_KEY: return True else: raise ValueError(f"Secret Scalar Must be greater than 0 and less than {MAX_PRIVATE_KEY}.") def generate_private_key(self) -> str: randkey = "".join(random.choice("0123456789abcdef") for _ in range(64)) if self.checkValid(int(randkey, 16)): return randkey else: return self.generate_private_key() def generate_xprv(self): return "xprv" + binascii.hexlify(os.urandom(32)).decode('utf-8') def generate_decimal(self) -> int: return random.randint(0, MAX_PRIVATE_KEY) def generate_binary(self) -> str: return "".join(random.choice("01") for _ in range(256)) def generate_entropy(self, entropy_bits=256): entropy = os.urandom(entropy_bits // 8) checksum = hashlib.sha256(entropy).digest()[0] entropy_with_checksum = entropy + bytes([checksum]) return entropy_with_checksum def generate_mnemonic(self, size: int) -> str: characters = re.findall('[A-Z][a-z]+', BIP39) return " ".join(random.choices(characters, k=size)).lower() class Convertor: def __init__(self): super().__init__() self.gen = Generator() def double_sha256(self, data): return hashlib.sha256(hashlib.sha256(data).digest()).digest() def mne_to_seed(self, mnemonic, password=""): salt = ("mnemonic" + password).encode('utf-8') seed = hashlib.pbkdf2_hmac('sha512', mnemonic.encode('utf-8'), salt, 2048) return seed[:32] def mne_to_bytes(self, mnemonic): return self.mne_to_seed(mnemonic) def mne_to_hex(self, mnemonic): seed = self.mne_to_seed(mnemonic) return self.bytes_to_hex(seed) def mne_to_wif(self, mnemonic, compress: bool = False): seed = self.mne_to_seed(mnemonic) return self.bytes_to_wif(seed, compress) def mne_to_int(self, mnemonic): seed = self.mne_to_seed(mnemonic) return self.bytes_to_int(seed) def mne_to_xpub(self, mnemonic): seed = self.mne_to_seed(mnemonic) return self.bytes_to_xpub(seed) def mne_to_xprv(self, mnemonic): seed = self.mne_to_seed(mnemonic) return self.bytes_to_xprv(seed) def mne_to_addr(self, mnemonic, compress: bool = False): seed = self.mne_to_seed(mnemonic) return self.bytes_to_addr(seed, compress) def mne_to_binary(self, mnemonic): seed = self.mne_to_seed(mnemonic) return self.bytes_to_binary(seed) def bytes_to_mne(self, seed): return Mnemonic().to_mnemonic(seed) def bytes_to_seed(self, seed): return hashlib.pbkdf2_hmac('sha512', seed, b'mnemonic', 2048) def bytes_to_hex(self, seed): return binascii.hexlify(self.bytes_to_seed(seed)).decode('utf-8') def unHexlify(self, h: str): return binascii.unhexlify(h) def hex_to_bytes(self, hexed): return binascii.unhexlify(hexed) def hex_to_mne(self, hexed: str) -> str: seed = self.hex_to_bytes(hexed) return self.bytes_to_mne(seed) def hex_to_wif(self, hexed, compress: bool = False) -> str: return self.bytes_to_wif(self.hex_to_bytes(hexed), compress) def hex_to_xprv(self, hexed: str) -> str: return self.bytes_to_xprv(self.hex_to_bytes(hexed)) def hex_to_xpub(self, hexed: str) -> str: return self.bytes_to_xpub(self.hex_to_bytes(hexed)) def hex_to_int(self, hexed: str) -> int: return int(hexed, 16) def hex_to_pub(self, hexed: str, compress: bool = False) -> bytes: if compress: return self.bytes_to_public(self.hex_to_bytes(hexed), True) else: return self.bytes_to_public(self.hex_to_bytes(hexed), False) def hex_to_addr(self, hexed: str, compress: bool = False) -> str: seed = self.hex_to_bytes(hexed) if compress: return self.bytes_to_addr(seed, True) else: return self.bytes_to_addr(seed, False) def hex_to_binary(self, hexed: str) -> str: return self.bytes_to_binary(self.hex_to_bytes(hexed)) def bytes_to_hex(self, seed): privatekey_int = int.from_bytes(hashlib.sha256(seed).digest(), byteorder='big') self.gen.checkValid(privatekey_int) pvkByte = privatekey_int.to_bytes(32, byteorder='big') return pvkByte.hex() def bytes_to_int(self, seed) -> int: return int.from_bytes(seed, byteorder='big') def bytes_to_pub(self, seed_bytes: bytes) -> bytes: sk = ecdsa.SigningKey.from_string(seed_bytes[:32], curve=ecdsa.SECP256k1) vk = sk.get_verifying_key() pub = COMPRESSED_PREFIX2 + vk.to_string()[-32:] if vk.to_string()[-1] % 2 == 0 else b'\x03' + vk.to_string()[-32:] return pub def bytes_to_public(self, seed: bytes, compress: bool = True) -> bytes: sk = ecdsa.SigningKey.from_string(seed, curve=ecdsa.SECP256k1) vk = sk.get_verifying_key() if compress: prefix = COMPRESSED_PREFIX2 if vk.pubkey.point.y() % 2 == 0 else COMPRESSED_PREFIX return prefix + vk.to_string()[:32] else: return UNCOMPRESSED_PREFIX + vk.to_string() def bytes_to_xpub(self, seed: bytes, chain_code=None) -> str: if chain_code is None: chain_code = os.urandom(32) # .hex prefix = self.unHexlify(XPUB_PREFIX) FINGERPRINT = ZERO_BYTES + ZERO_BYTES pub = self.bytes_to_pub(seed) xpub = prefix + MAIN_DIGEST + FINGERPRINT + chain_code + pub Hash64 = self.double_sha256(xpub) xpub += Hash64[:4] xpubBase58 = b58encode(xpub) return xpubBase58.decode('utf-8') def bytes_to_mne(self, byte: bytes): seed = byte[:32] return Mnemonic("english").to_mnemonic(seed) def bytes_to_binary(self, bytes_: bytes) -> str: if len(bytes_) != 32: raise ValueError("Input bytes should have a length of 32.") # Convert each byte to its binary representation and pad with zeros return ''.join(format(byte, '08b') for byte in bytes_) def bytes_to_wif(self, private_key, compress=True): if compress:
EXTENDED_KEY = MAIN_PREFIX + private_key + MAIN_SUFFIX
9
2023-11-10 14:51:41+00:00
8k
henriquesebastiao/poupy
project/apps/app/urls.py
[ { "identifier": "AccountCreateView", "path": "project/apps/app/views/accounts.py", "snippet": "class AccountCreateView(LoginRequiredMixin, CreateView):\n \"\"\"Create a new account.\"\"\"\n\n login_url = 'login'\n\n model = Account\n form_class = AccountEditForm\n template_name = 'pages/app/new_account.html'\n success_url = reverse_lazy('accounts')\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n return super().form_valid(form)" }, { "identifier": "AccountListView", "path": "project/apps/app/views/accounts.py", "snippet": "class AccountListView(\n LoginRequiredMixin, ListView\n): # pylint: disable=too-many-ancestors\n \"\"\"List all accounts.\"\"\"\n\n login_url = 'login'\n\n template_name = 'pages/app/accounts.html'\n context_object_name = 'accounts'\n\n def get_queryset(self):\n return Account.objects.filter(user=self.request.user).only(\n 'id', 'name', 'balance'\n )" }, { "identifier": "AccountUpdateView", "path": "project/apps/app/views/accounts.py", "snippet": "class AccountUpdateView(LoginRequiredMixin, UpdateView):\n \"\"\"Update an account.\"\"\"\n\n login_url = 'login'\n\n model = Account\n form_class = AccountEditForm\n template_name = 'pages/app/account_edit.html'\n success_url = reverse_lazy('accounts')\n pk_url_kwarg = 'account_id'\n\n def get_queryset(self):\n return Account.objects.filter(user=self.request.user).only(\n 'name', 'balance'\n )" }, { "identifier": "DeleteAccountConfirmView", "path": "project/apps/app/views/accounts.py", "snippet": "class DeleteAccountConfirmView(LoginRequiredMixin, View):\n \"\"\"View to confirm the deletion of an account.\"\"\"\n\n login_url = 'login'\n\n @staticmethod\n def post(request):\n \"\"\"Delete the account if request is POST.\"\"\"\n\n account = Account.objects.get(\n id=request.POST['account'],\n user=request.user,\n )\n\n if account.balance == 0:\n account.delete()\n messages.success(request, 'Account deleted successfully.')\n else:\n messages.error(\n request,\n 'It is not possible to delete an account with a non-zero balance.',\n )\n\n return redirect('accounts')" }, { "identifier": "DeleteAccountView", "path": "project/apps/app/views/accounts.py", "snippet": "class DeleteAccountView(LoginRequiredMixin, FormView):\n \"\"\"View to delete an account.\"\"\"\n\n login_url = 'login'\n\n template_name = 'pages/app/delete_account.html'\n form_class = DeleteAccountForm\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def get_context_data(\n self, **kwargs\n ): # pylint: disable=useless-parent-delegation\n return super().get_context_data(**kwargs)" }, { "identifier": "App", "path": "project/apps/app/views/app.py", "snippet": "class App(LoginRequiredMixin, View):\n \"\"\"View for the home page of the app.\"\"\"\n\n login_url = 'login'\n\n @staticmethod\n def get(request):\n \"\"\"Return the home page of the app.\"\"\"\n user = get_user_model().objects.get(email=request.user.email)\n accounts = Account.objects.filter(user=user)\n number_accounts = len(accounts)\n total_balance = sum(account.balance for account in accounts)\n\n all_bigger_transactions_of_month = list(\n Transaction.objects.filter(\n user=user,\n created_at__month=datetime.now().month,\n )\n .values('description', 'type', 'value', 'account')\n .order_by('-value')[:3]\n )\n\n all_bigger_transfer_of_month = list(\n Transfer.objects.filter(\n user=user,\n created_at__month=datetime.now().month,\n )\n .values(\n 'description', 'type', 'account_origin', 'account_destination'\n )\n .order_by('-value')[:3]\n )\n\n all_bigger_transactions_of_month.extend(all_bigger_transfer_of_month)\n\n # Order by value and get the first 3\n all_bigger_transactions_of_month = sorted(\n all_bigger_transactions_of_month,\n key=lambda transaction: transaction.value,\n reverse=True,\n )[:3]\n\n monthly_expenses = sum(\n transaction.value\n for transaction in Transaction.objects.filter(\n user=user,\n created_at__month=datetime.now().month,\n type='EXPENSE',\n ).only('value')\n )\n\n monthly_incomes = sum(\n transaction.value\n for transaction in Transaction.objects.filter(\n user=user,\n created_at__month=datetime.now().month,\n type='INCOME',\n ).only('value')\n )\n\n bigger_transactions_of_month = []\n\n for transaction in all_bigger_transactions_of_month:\n if transaction.type == 'TRANSFER':\n account_name = f'{transaction.account_origin} -> {transaction.account_destination}'\n else:\n account_name = transaction.account.name\n\n bigger_transactions_of_month.append(\n {\n 'type': transaction.type,\n 'description': transaction.description,\n 'value': transaction.value,\n 'account': account_name,\n }\n )\n\n return render(\n request,\n 'pages/app/home.html',\n context={\n 'logo_name_url': 'app',\n 'user_first_name': user.first_name,\n 'total_balance': total_balance,\n 'accounts': accounts,\n 'number_accounts': number_accounts,\n 'bigger_transactions_of_month': bigger_transactions_of_month,\n 'monthly_expenses': monthly_expenses,\n 'monthly_incomes': monthly_incomes,\n },\n )" }, { "identifier": "ExpanseCreateView", "path": "project/apps/app/views/expanse.py", "snippet": "class ExpanseCreateView(LoginRequiredMixin, CreateView):\n \"\"\"View for creating a new expanse.\"\"\"\n\n # pylint: disable=duplicate-code\n login_url = 'login'\n\n form_class = NewTransactionForm\n success_url = reverse_lazy('app')\n\n def form_valid(self, form):\n transaction = form.save(commit=False)\n transaction.user_id = self.request.user.id\n\n account = Account.objects.get(\n name=transaction.account.name, user=self.request.user\n )\n account.balance -= transaction.value\n account.save()\n return super().form_valid(form)" }, { "identifier": "ExpanseView", "path": "project/apps/app/views/expanse.py", "snippet": "class ExpanseView(LoginRequiredMixin, FormView):\n \"\"\"View for the expanse page.\"\"\"\n\n login_url = 'login'\n\n template_name = 'pages/app/new_expanse.html'\n form_class = NewTransactionForm\n\n def get_context_data(\n self, **kwargs\n ): # pylint: disable=useless-parent-delegation\n return super().get_context_data(**kwargs)" }, { "identifier": "IncomeCreateView", "path": "project/apps/app/views/income.py", "snippet": "class IncomeCreateView(LoginRequiredMixin, CreateView):\n \"\"\"View for creating income transactions.\"\"\"\n\n # pylint: disable=duplicate-code\n login_url = 'login'\n\n form_class = NewTransactionForm\n success_url = reverse_lazy('app')\n\n def form_valid(self, form):\n transaction = form.save(commit=False)\n transaction.user_id = self.request.user.id\n\n transaction.type = Transaction.TransactionType.INCOME\n transaction.save()\n\n # Update value in account\n account = Account.objects.get(\n name=transaction.account.name, user=self.request.user\n )\n account.balance += transaction.value\n account.save()\n return super().form_valid(form)" }, { "identifier": "IncomeView", "path": "project/apps/app/views/income.py", "snippet": "class IncomeView(LoginRequiredMixin, FormView):\n \"\"\"View for income transactions page.\"\"\"\n\n login_url = 'login'\n\n template_name = 'pages/app/new_income.html'\n form_class = NewTransactionForm\n\n def get_context_data(\n self, **kwargs\n ): # pylint: disable=useless-parent-delegation\n return super().get_context_data(**kwargs)" }, { "identifier": "LoginCreateView", "path": "project/apps/app/views/login.py", "snippet": "class LoginCreateView(FormView):\n \"\"\"View for login create.\"\"\"\n\n template_name = 'pages/app/login.html'\n form_class = LoginForm\n\n def form_valid(self, form):\n authenticate_user = authenticate(\n self.request,\n username=form.cleaned_data.get('username', ''),\n password=form.cleaned_data.get('password', ''),\n )\n\n if authenticate_user is not None:\n login(self.request, authenticate_user)\n # Após o usuário se autenticar, redireciona ele para o app\n return redirect(reverse('app'))\n\n # If the user is not authenticated, an error message is displayed.\n messages.error(self.request, 'Invalid credentials.')\n return super().form_invalid(form)\n\n def form_invalid(self, form):\n messages.error(self.request, 'Error in data validation.')\n return super().form_invalid(form)" }, { "identifier": "LoginView", "path": "project/apps/app/views/login.py", "snippet": "class LoginView(FormView):\n \"\"\"View for login page.\"\"\"\n\n template_name = 'pages/app/login.html'\n form_class = LoginForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form_action'] = reverse('login_create')\n return context" }, { "identifier": "logout_view", "path": "project/apps/app/views/login.py", "snippet": "@login_required(login_url='login')\ndef logout_view(request):\n \"\"\"View for logout.\"\"\"\n if not request.POST:\n raise Http404()\n\n logout(request)\n messages.success(request, 'Logout completed successfully.')\n return redirect(reverse('login'))" }, { "identifier": "UserApplicationUpdatePasswordView", "path": "project/apps/app/views/settings.py", "snippet": "class UserApplicationUpdatePasswordView(LoginRequiredMixin, View):\n login_url = 'login'\n\n @staticmethod\n def get(request, *args, **kwargs):\n form = UserApplicationEditPasswordForm\n\n return render(\n request,\n 'pages/app/new_password.html',\n context={\n 'form': form,\n },\n )\n\n @staticmethod\n def post(request, *args, **kwargs):\n form = UserApplicationEditPasswordForm(request.POST)\n\n if form.is_valid():\n request.user.set_password(request.POST['password'].strip())\n logout(request)\n messages.success(\n request, 'Password changed successfully. Please login again.'\n )\n\n return redirect('login')\n else:\n return render(\n request,\n 'pages/app/new_password.html',\n context={\n 'form': form,\n },\n )" }, { "identifier": "UserApplicationUpdateView", "path": "project/apps/app/views/settings.py", "snippet": "class UserApplicationUpdateView(LoginRequiredMixin, View):\n login_url = 'login'\n\n @staticmethod\n def get(request, *args, **kwargs):\n \"\"\"\n Load the form with the current logged-in\n user data on the screen so that they can be edited\n \"\"\"\n form = UserApplicationEditForm(\n data=request.POST or None, instance=request.user\n )\n\n return render(\n request,\n 'pages/app/settings.html',\n context={\n 'form': form,\n },\n )\n\n @staticmethod\n def post(request, *args, **kwargs):\n \"\"\"\n Update the user's data with the data\n entered in the form\n\n Returns:\n Redirect to app page\n \"\"\"\n form = UserApplicationEditForm(\n data=request.POST or None, instance=request.user\n )\n\n if form.is_valid():\n user = form.save(commit=False)\n user.first_name = request.POST['first_name'].strip().title()\n user.last_name = request.POST['last_name'].strip().title()\n user.username = request.POST['username'].strip()\n user.email = request.POST['email'].strip()\n user.save()\n\n return redirect('app')" }, { "identifier": "SignupView", "path": "project/apps/app/views/signup.py", "snippet": "class SignupView(FormView):\n \"\"\"Signup view page.\"\"\"\n\n template_name = 'pages/app/signup.html'\n form_class = SignupForm\n success_url = reverse_lazy('login')\n\n def form_valid(self, form):\n self.request.session['signup_form_data'] = form.cleaned_data\n return super().form_valid(form)" }, { "identifier": "UserCreateView", "path": "project/apps/app/views/signup.py", "snippet": "class UserCreateView(CreateView):\n \"\"\"Create user view.\"\"\"\n\n template_name = 'pages/app/signup.html'\n form_class = SignupForm\n success_url = reverse_lazy('login')\n\n def get(self, request, *args, **kwargs):\n raise Http404\n\n def form_valid(self, form):\n user = form.save(commit=False)\n user.set_password(user.password) # Salva a senha criptografada no db\n user.save()\n messages.success(self.request, 'User created successfully.')\n return super().form_valid(form)\n\n def form_invalid(self, form):\n messages.error(self.request, 'Invalid form, try again.')\n return super().form_invalid(form)" }, { "identifier": "TransactionDeleteView", "path": "project/apps/app/views/transactions.py", "snippet": "class TransactionDeleteView(LoginRequiredMixin, View):\n \"\"\"Transaction delete view.\"\"\"\n\n login_url = 'login'\n\n @staticmethod\n def post(request, transaction_id, *args, **kwargs):\n \"\"\"Delete transaction if request method is POST.\"\"\"\n transaction = Transaction.objects.get(id=transaction_id)\n\n account = Account.objects.get(id=transaction.account.id)\n\n if transaction.type == 'INCOME':\n account.balance -= transaction.value\n elif transaction.type == 'EXPANSE':\n account.balance += transaction.value\n\n # In the future, we will implement the logic for deleting a transfer,\n # for now the delete button is not displayed when editing a transfer.\n\n account.save()\n transaction.delete()\n\n messages.success(request, 'Transaction deleted successfully.')\n\n return redirect('transactions')" }, { "identifier": "TransactionEditView", "path": "project/apps/app/views/transactions.py", "snippet": "class TransactionEditView(LoginRequiredMixin, UpdateView):\n \"\"\"Transaction edit view page.\"\"\"\n\n login_url = 'login'\n\n model = Transaction\n form_class = TransactionsEditForm\n template_name = 'pages/app/transaction_edit.html'\n pk_url_kwarg = 'transaction_id'\n\n def get_queryset(self):\n # If there is no transaction with this id, it means it is a transfer\n try:\n transaction = Transaction.objects.get(\n user=self.request.user,\n )\n except Transaction.DoesNotExist:\n transaction = Transfer.objects.filter(\n user=self.request.user,\n id=self.kwargs['transaction_id'],\n )\n return transaction\n\n def get_success_url(self):\n messages.success(self.request, 'Changes saved successfully.')\n return reverse_lazy('transactions')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # Template information if the transaction is a transfer,\n # in which case the delete button for this transaction will not be displayed.\n context['transfer'] = self.object.type\n\n return context" }, { "identifier": "TransactionsView", "path": "project/apps/app/views/transactions.py", "snippet": "class TransactionsView(LoginRequiredMixin, TemplateView):\n \"\"\"Transactions view page.\"\"\"\n\n login_url = 'login'\n\n template_name = 'pages/app/transactions.html'\n\n def get_context_data(self, **kwargs):\n user = get_user_model().objects.get(email=self.request.user.email)\n all_transactions = list(\n Transaction.objects.filter(user=user)\n .order_by('-id')\n .only('id', 'type', 'description', 'value', 'account')\n )\n\n all_transfers = list(\n Transfer.objects.filter(user=user)\n .order_by('-id')\n .only('id', 'type', 'description', 'value')\n )\n\n # Combine transfers and transactions so that they are displayed on the transactions page\n all_transactions.extend(all_transfers)\n\n context = super().get_context_data(**kwargs)\n context['all_transactions'] = all_transactions\n return context" }, { "identifier": "TransferCreateView", "path": "project/apps/app/views/transfer.py", "snippet": "class TransferCreateView(LoginRequiredMixin, FormView):\n \"\"\"Transfer create view.\"\"\"\n\n login_url = 'login'\n\n template_name = 'pages/app/new_transfer.html'\n form_class = TransferForm\n\n def form_valid(self, form):\n data = form.cleaned_data\n account_origin = Account.objects.get(name=data['account_origin'])\n account_destination = Account.objects.get(\n name=data['account_destination']\n )\n\n if account_origin.balance >= data['value']:\n transaction = Transfer(\n description=data['description'],\n user=self.request.user,\n account_origin=account_origin,\n account_destination=account_destination,\n value=data['value'],\n )\n\n account_origin.balance -= data['value']\n account_destination.balance += data['value']\n\n account_origin.save()\n account_destination.save()\n\n transaction.save()\n\n return redirect('app')\n\n # If the user does not have enough balance to make the transfer,\n # an error message is displayed.\n messages.error(\n self.request, 'Insufficient balance to make the transfer.'\n )\n return self.form_invalid(form)\n\n def form_invalid(self, form):\n messages.error(self.request, 'Error in data validation.')\n return super().form_invalid(form)" }, { "identifier": "TransferView", "path": "project/apps/app/views/transfer.py", "snippet": "class TransferView(LoginRequiredMixin, FormView):\n \"\"\"Transfer view page.\"\"\"\n\n login_url = 'login'\n\n template_name = 'pages/app/new_transfer.html'\n form_class = TransferForm\n\n def get_context_data(\n self, **kwargs\n ): # pylint: disable=useless-parent-delegation\n return super().get_context_data(**kwargs)" } ]
from django.urls import path from ..app.views.accounts import ( AccountCreateView, AccountListView, AccountUpdateView, DeleteAccountConfirmView, DeleteAccountView, ) from ..app.views.app import App from ..app.views.expanse import ExpanseCreateView, ExpanseView from ..app.views.income import IncomeCreateView, IncomeView from ..app.views.login import LoginCreateView, LoginView, logout_view from ..app.views.settings import ( UserApplicationUpdatePasswordView, UserApplicationUpdateView, ) from ..app.views.signup import SignupView, UserCreateView from ..app.views.transactions import ( TransactionDeleteView, TransactionEditView, TransactionsView, ) from ..app.views.transfer import TransferCreateView, TransferView
4,445
"""URLs module.""" urlpatterns = [ path('', App.as_view(), name='app'), path('signup/', SignupView.as_view(), name='signup'), path('user-create/', UserCreateView.as_view(), name='user_create'), path('login/', LoginView.as_view(), name='login'), path('login/create/', LoginCreateView.as_view(), name='login_create'), path('logout/', logout_view, name='logout'), path('transactions/', TransactionsView.as_view(), name='transactions'), path( 'transaction/<int:transaction_id>/edit/',
"""URLs module.""" urlpatterns = [ path('', App.as_view(), name='app'), path('signup/', SignupView.as_view(), name='signup'), path('user-create/', UserCreateView.as_view(), name='user_create'), path('login/', LoginView.as_view(), name='login'), path('login/create/', LoginCreateView.as_view(), name='login_create'), path('logout/', logout_view, name='logout'), path('transactions/', TransactionsView.as_view(), name='transactions'), path( 'transaction/<int:transaction_id>/edit/',
TransactionEditView.as_view(),
18
2023-11-17 21:05:05+00:00
8k
UWNetworksLab/adn-compiler
compiler/element/props/flow.py
[ { "identifier": "Expr", "path": "compiler/element/node.py", "snippet": "class Expr(Node):\n def __init__(self, lhs: Expr, op: Operator, rhs: Expr):\n self.lhs = lhs\n self.op = op\n self.rhs = rhs\n self.type = \"unknown\"" }, { "identifier": "Identifier", "path": "compiler/element/node.py", "snippet": "class Identifier(Node):\n def __init__(self, name: str):\n self.name = name" }, { "identifier": "MethodCall", "path": "compiler/element/node.py", "snippet": "class MethodCall(Expr):\n def __init__(self, obj: Identifier, method: MethodType, args: List[Expr]):\n self.obj = obj\n self.method = method\n self.args = args" }, { "identifier": "AliasAnalyzer", "path": "compiler/element/props/analyzer.py", "snippet": "class AliasAnalyzer(Visitor):\n def __init__(self, target: str):\n self.targets: List[str] = [target]\n self.target_fields: Dict[str, List[str]] = {}\n for t in self.targets:\n self.target_fields[t] = []\n\n def visitBlock(self, node: List[Statement], ctx) -> List[str]:\n for s in node:\n s.accept(self, ctx)\n return self.targets\n\n def visitNode(self, node: Node, ctx):\n if node == START_NODE or node == END_NODE or node == PASS_NODE:\n return\n LOG.error(\n \"Node\", node.__class__.__name__, \"should be visited in alias analyzer\"\n )\n raise Exception(\"Unreachable!\")\n\n def visitProgram(self, node: Program, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitInternal(self, node: Internal, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitProcedure(self, node: Procedure, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitStatement(self, node: Statement, ctx):\n if node.stmt == None:\n return\n else:\n return node.stmt.accept(self, ctx)\n\n def visitMatch(self, node: Match, ctx):\n for (p, s) in node.actions:\n p.accept(self, ctx)\n for st in s:\n st.accept(self, ctx)\n\n def visitAssign(self, node: Assign, ctx):\n name = node.left.name\n is_target = node.right.accept(self, ctx)\n if is_target == True:\n self.targets.append(name)\n self.target_fields[name] = []\n\n def visitPattern(self, node: Pattern, ctx) -> bool:\n return node.value.accept(self, ctx)\n\n def visitExpr(self, node: Expr, ctx) -> bool:\n if isinstance(node.lhs, Identifier) and node.lhs.name in self.targets:\n return True\n if isinstance(node.rhs, Identifier) and node.rhs.name in self.targets:\n return True\n return node.lhs.accept(self, ctx) or node.rhs.accept(self, ctx)\n\n def visitIdentifier(self, node: Identifier, ctx) -> str:\n return node.name in self.targets\n\n def visitType(self, node: Type, ctx) -> bool:\n return False\n\n def visitFuncCall(self, node: FuncCall, ctx) -> bool:\n ret = node.name.accept(self, ctx)\n for a in node.args:\n ret = a.accept(self, ctx) or ret\n return ret\n\n def visitMethodCall(self, node: MethodCall, ctx) -> bool:\n assert isinstance(node.obj, Identifier)\n if node.obj.name in self.targets:\n if node.method.name == \"GET\":\n return True\n return False\n\n def visitSend(self, node: Send, ctx) -> bool:\n return False\n\n def visitLiteral(self, node: Literal, ctx) -> bool:\n return False\n\n def visitError(self, node: Error, ctx) -> bool:\n return False" }, { "identifier": "CopyAnalyzer", "path": "compiler/element/props/analyzer.py", "snippet": "class CopyAnalyzer(Visitor):\n def __init__(self, targets: List[str]):\n self.targets = targets\n self.send_num = 0\n\n def visitBlock(self, node: List[Statement], ctx) -> int:\n for s in node:\n s.accept(self, ctx)\n return self.send_num\n\n def visitNode(self, node: Node, ctx):\n if node == START_NODE or node == END_NODE or node == PASS_NODE:\n return\n LOG.error(node.__class__.__name__, \"should be visited in copy analyzer\")\n raise Exception(\"Unreachable!\")\n\n def visitProgram(self, node: Program, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitInternal(self, node: Internal, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitProcedure(self, node: Procedure, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitStatement(self, node: Statement, ctx):\n if node.stmt == None:\n return\n else:\n return node.stmt.accept(self, ctx)\n\n def visitMatch(self, node: Match, ctx):\n for (p, s) in node.actions:\n for st in s:\n st.accept(self, ctx)\n\n def visitAssign(self, node: Assign, ctx):\n pass\n\n def visitPattern(self, node: Pattern, ctx):\n pass\n\n def visitExpr(self, node: Expr, ctx):\n node.lhs.accept(self, ctx)\n node.rhs.accept(self, ctx)\n\n def visitIdentifier(self, node: Identifier, ctx) -> bool:\n pass\n\n def visitType(self, node: Type, ctx):\n pass\n\n def visitFuncCall(self, node: FuncCall, ctx):\n node.name.accept(self, ctx)\n for a in node.args:\n a.accept(self, ctx)\n\n def visitMethodCall(self, node: MethodCall, ctx):\n pass\n\n def visitSend(self, node: Send, ctx) -> bool:\n send_target = node.msg.accept(ExprResolver(), ctx)\n if send_target in self.targets:\n self.send_num += 1\n\n def visitLiteral(self, node: Literal, ctx) -> bool:\n return False\n\n def visitError(self, node: Error, ctx) -> bool:\n return False" }, { "identifier": "DropAnalyzer", "path": "compiler/element/props/analyzer.py", "snippet": "class DropAnalyzer(Visitor):\n def __init__(self, targets: List[str], direction: str):\n self.direction = direction\n self.targets = targets\n self.target_fields: Dict[str, List[str]] = {}\n self.random_included = False\n for t in targets:\n self.target_fields[t] = []\n\n def visitBlock(self, node: List[Statement], ctx) -> bool:\n ret = False\n for s in node:\n ret = s.accept(self, ctx) or ret\n return ret\n\n def visitNode(self, node: Node, ctx):\n if node == START_NODE or node == END_NODE or node == PASS_NODE:\n return\n LOG.error(\"Node\", node.__class__.__name__, \"should be visited in drop analyzer\")\n raise Exception(\"Unreachable!\")\n\n def visitProgram(self, node: Program, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitInternal(self, node: Internal, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitProcedure(self, node: Procedure, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitStatement(self, node: Statement, ctx):\n if node.stmt == None:\n return\n else:\n return node.stmt.accept(self, ctx)\n\n def visitMatch(self, node: Match, ctx) -> bool:\n for a in node.actions:\n for st in a[1]:\n if isinstance(st, Send):\n if isinstance(st.msg, Error):\n return True\n return False\n # todo! fixme\n raise Exception(\"Unreachable! Match should not appear in drop analyzer\")\n\n def visitAssign(self, node: Assign, ctx) -> bool:\n return False\n\n def visitPattern(self, node: Pattern, ctx) -> bool:\n return False\n\n def visitExpr(self, node: Expr, ctx) -> bool:\n return node.lhs.accept(self, ctx) or node.rhs.accept(self, ctx)\n\n def visitIdentifier(self, node: Identifier, ctx) -> bool:\n return False\n\n def visitType(self, node: Type, ctx) -> bool:\n return False\n\n def visitFuncCall(self, node: FuncCall, ctx) -> bool:\n if node.name.name == \"randomf\" or node.name.name == \"randomi\":\n self.random_included = True\n return False\n\n def visitMethodCall(self, node: MethodCall, ctx) -> bool:\n return False\n\n def visitSend(self, node: Send, ctx) -> bool:\n if node.direction == self.direction:\n name = node.msg.accept(ExprResolver(), ctx)\n return name in self.targets\n else:\n return False\n\n def visitLiteral(self, node: Literal, ctx) -> bool:\n return False\n\n def visitError(self, node: Error, ctx) -> bool:\n return False" }, { "identifier": "ReadAnalyzer", "path": "compiler/element/props/analyzer.py", "snippet": "class ReadAnalyzer(Visitor):\n def __init__(self, targets: List[str]):\n self.targets = targets\n self.target_fields: Dict[str, List[str]] = {}\n for t in targets:\n self.target_fields[t] = []\n\n def visitBlock(self, node: List[Statement], ctx) -> bool:\n ret = False\n for s in node:\n ret = s.accept(self, ctx) or ret\n return ret\n\n def visitNode(self, node: Node, ctx):\n if node == START_NODE or node == END_NODE or node == PASS_NODE:\n return\n LOG.error(node.__class__.__name__, \"should be visited in read analyzer\")\n raise Exception(\"Unreachable!\")\n\n def visitProgram(self, node: Program, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitInternal(self, node: Internal, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitProcedure(self, node: Procedure, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitStatement(self, node: Statement, ctx):\n if node.stmt == None:\n return\n else:\n return node.stmt.accept(self, ctx)\n\n def visitMatch(self, node: Match, ctx) -> bool:\n ret = False\n for (p, s) in node.actions:\n ret = p.accept(self, ctx) or ret\n for st in s:\n ret = st.accept(self, ctx) or ret\n return ret\n\n def visitAssign(self, node: Assign, ctx) -> bool:\n return node.left.accept(self, ctx) or node.right.accept(self, ctx)\n\n def visitPattern(self, node: Pattern, ctx) -> bool:\n return node.value.accept(self, ctx)\n\n def visitExpr(self, node: Expr, ctx) -> bool:\n return node.lhs.accept(self, ctx) or node.rhs.accept(self, ctx)\n\n def visitIdentifier(self, node: Identifier, ctx) -> bool:\n return False\n\n def visitType(self, node: Type, ctx) -> bool:\n return False\n\n def visitFuncCall(self, node: FuncCall, ctx) -> bool:\n ret = node.name.accept(self, ctx)\n for a in node.args:\n ret = a.accept(self, ctx) or ret\n return ret\n\n def visitMethodCall(self, node: MethodCall, ctx) -> bool:\n if isinstance(node.obj, Identifier):\n if node.obj.name in self.targets and node.method.name == \"GET\":\n er = ExprResolver()\n fields = [i.accept(er, None) for i in node.args]\n self.target_fields[node.obj.name] += fields\n return True\n else:\n raise NotADirectoryError\n ret = False\n for a in node.args:\n if a != None:\n ret = a.accept(self, ctx) or ret\n return ret\n\n def visitSend(self, node: Send, ctx) -> bool:\n return node.msg.accept(self, ctx)\n\n def visitLiteral(self, node: Literal, ctx) -> bool:\n return False\n\n def visitError(self, node: Error, ctx) -> bool:\n return False" }, { "identifier": "StateAnalyzer", "path": "compiler/element/props/analyzer.py", "snippet": "class StateAnalyzer(Visitor):\n def __init__():\n pass\n\n def visitNode(self, ctx):\n raise Exception(\"Should be unreachable!\")\n\n def visitInternal(self, node: Internal, ctx) -> int:\n return len(node.internal) > 0" }, { "identifier": "WriteAnalyzer", "path": "compiler/element/props/analyzer.py", "snippet": "class WriteAnalyzer(Visitor):\n def __init__(self, targets: List[str]):\n self.targets = targets\n self.target_fields: Dict[str, List[Tuple(str, str)]] = {}\n for t in targets:\n self.target_fields[t] = []\n\n def visitBlock(self, node: List[Statement], ctx) -> bool:\n ret = False\n for s in node:\n ret = s.accept(self, ctx) or ret\n return ret\n\n def visitNode(self, node: Node, ctx):\n if node == START_NODE or node == END_NODE or node == PASS_NODE:\n return\n LOG.error(node.__class__.__name__, \"should be visited in write analyzer\")\n raise Exception(\"Unreachable!\")\n\n def visitProgram(self, node: Program, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitInternal(self, node: Internal, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitProcedure(self, node: Procedure, ctx):\n raise Exception(\"Unreachable!\")\n\n def visitStatement(self, node: Statement, ctx):\n if node.stmt == None:\n return\n else:\n return node.stmt.accept(self, ctx)\n\n def visitMatch(self, node: Match, ctx) -> bool:\n ret = False\n for (p, s) in node.actions:\n ret = p.accept(self, ctx) or ret\n for st in s:\n ret = st.accept(self, ctx) or ret\n return ret\n\n def visitAssign(self, node: Assign, ctx) -> bool:\n return node.left.accept(self, ctx) or node.right.accept(self, ctx)\n\n def visitPattern(self, node: Pattern, ctx) -> bool:\n return node.value.accept(self, ctx)\n\n def visitExpr(self, node: Expr, ctx) -> bool:\n return node.lhs.accept(self, ctx) or node.rhs.accept(self, ctx)\n\n def visitIdentifier(self, node: Identifier, ctx) -> bool:\n return False\n\n def visitType(self, node: Type, ctx) -> bool:\n return False\n\n def visitFuncCall(self, node: FuncCall, ctx) -> bool:\n ret = node.name.accept(self, ctx)\n for a in node.args:\n ret = a.accept(self, ctx) or ret\n return ret\n\n def visitMethodCall(self, node: MethodCall, ctx) -> bool:\n assert isinstance(node.obj, Identifier)\n if node.obj.name in self.targets and node.method.name == \"SET\":\n er = ExprResolver()\n assert len(node.args) == 2\n fields = [i.accept(er, None) for i in node.args]\n self.target_fields[node.obj.name] += [(fields[0], fields[1])]\n return True\n ret = False\n for a in node.args:\n if a != None:\n ret = a.accept(self, ctx) or ret\n return ret\n\n def visitSend(self, node: Send, ctx) -> bool:\n return node.msg.accept(self, ctx)\n\n def visitLiteral(self, node: Literal, ctx) -> bool:\n return False\n\n def visitError(self, node: Error, ctx) -> bool:\n return False" }, { "identifier": "Visitor", "path": "compiler/element/visitor.py", "snippet": "class Visitor(ABC):\n def visitNode(self, node: Node, ctx):\n raise Exception(f\"visit function for {node.__class__.__name__} not implemented\")\n\n def visitProgram(self, node: Program, ctx):\n return self.visitNode(node)\n\n def visitInternal(self, node: Internal, ctx):\n return self.visitNode(node)\n\n def visitProcedure(self, node: Procedure, ctx):\n return self.visitNode(node)\n\n def visitStatement(self, node: Statement, ctx):\n return self.visitNode(node)\n\n def visitMatch(self, node: Match, ctx):\n return self.visitNode(node)\n\n def visitAssign(self, node: Assign, ctx):\n return self.visitNode(node)\n\n def visitPattern(self, node: Pattern, ctx):\n return self.visitNode(node)\n\n def visitExpr(self, node: Expr, ctx):\n return self.visitNode(node)\n\n def visitIdentifier(self, node: Identifier, ctx):\n return self.visitNode(node)\n\n def visitFuncCall(self, node: FuncCall, ctx):\n return self.visitNode(node)\n\n def visitMethodCall(self, node: MethodCall, ctx):\n return self.visitNode(node)\n\n def visitSend(self, node: Send, ctx):\n return self.visitNode(node)\n\n def visitLiteral(self, node: Literal, ctx):\n return self.visitNode(node)" } ]
from typing import Dict, List, Optional, Tuple from compiler.element.node import * from compiler.element.node import Expr, Identifier, MethodCall from compiler.element.props.analyzer import ( AliasAnalyzer, CopyAnalyzer, DropAnalyzer, ReadAnalyzer, StateAnalyzer, WriteAnalyzer, ) from compiler.element.visitor import Visitor
5,645
prev = v.idx return prev def handle_match(self, match: Match, prev: int) -> None: expr_v = Vertex(Statement(match.expr), len(self.vertices), "match_expr") self.vertices.append(expr_v) self.link(prev, expr_v.idx) prev = expr_v.idx end_points = [] for (p, s) in match.actions: match len(s): case 0: head = PASS_NODE # empty statement, do nothing rest = [] case 1: head = s[0] rest = [] case _: head = s[0] rest = s[1:] head_v = Vertex(head, len(self.vertices), "match_head") self.vertices.append(head_v) self.link(prev, head_v.idx, (expr_v.node, p)) if len(rest) == 0: end_points.append(head_v.idx) else: end_points.append(self.handle_block(rest, head_v.idx)) merge_v = Vertex(PASS_NODE, len(self.vertices), "match_merge") self.vertices.append(merge_v) for ep in end_points: self.link(ep, merge_v.idx) return merge_v.idx def build_graph(self, proc: Procedure) -> None: start_v = Vertex(START_NODE, 0, "start") end_v = Vertex(END_NODE, 1, "end") self.vertices.append(start_v) self.vertices.append(end_v) prev = 0 for body in proc.body: # order matters, since match is a subclass of statement if isinstance(body, Match): prev = self.handle_match(body, prev) elif isinstance(body, Statement): prev = self.handle_block([body], prev) else: raise Exception("build graph encountered: ", body.__class__.__name__) self.link(prev, end_v.idx) def extract_path(self) -> List[List[Vertex]]: q = [0] ret: Dict[int, List[List[Vertex]]] = {} ret[0] = [[self.vertices[0]]] while len(q) > 0: u = q.pop() for e in self.edges: if e.u == u: v = self.vertices[e.v] paths = ret[u].copy() paths = [p + [v] for p in paths] if e.v not in ret: ret[e.v] = paths else: ret[e.v] = ret[e.v] + paths self.in_deg[e.v] -= 1 if self.in_deg[e.v] == 0: q.append(e.v) return ret[1] def analyze(self, proc: Procedure, verbose: bool = False) -> Property: # Paths represent all possible code path of this element self.build_graph(proc) paths = self.extract_path() rpc_name = f"rpc_{proc.name}" if proc.name == "req": direction = "NET" elif proc.name == "resp": direction = "APP" report = "Total #Path = " + str(len(paths)) + "\n" ret = Property() # Visit every possible code path and take a union of the operation for path in paths: report += "\nFor path: \n " report += "->".join([v.annotation for v in path if v != self.vertices[-1]]) report += "\n\n" path_nodes = [v.node for v in path] aa = AliasAnalyzer(rpc_name) targets = aa.visitBlock(path_nodes, None) wa = WriteAnalyzer(targets) write = wa.visitBlock(path_nodes, None) if write: write_fields = wa.target_fields report += "Write: " for (k, v) in write_fields.items(): for vv in v: report += f"{vv} " ret.write.append(vv[0]) report += "\n" ra = ReadAnalyzer(targets) read = ra.visitBlock(path_nodes, None) if read: read_fields = ra.target_fields report += "Read: " for (k, v) in read_fields.items(): for vv in v: report += f"({vv}) " ret.read.append(vv) report += "\n"
class Property: def __init__(self) -> None: self.drop: bool = False self.block: bool = False self.read: List[str] = [] self.write: List[str] = [] self.copy: bool = False def check(self): self.read = list(set(self.read)) self.write = list(set(self.write)) self.read = [i.strip("'") for i in self.read] self.write = [i.strip("'") for i in self.write] class Edge: def __init__(self, u: int, v: int, w: Tuple[Expr, Expr] = []) -> None: self.u = u self.v = v self.w = w class Vertex: def __init__(self, node: Node, idx: int, annotation: Optional[str] = None) -> None: self.node = node self.idx = idx if annotation is None: self.annotation = self.node.__class__.__name__ else: self.annotation = "[" + annotation + "]" + self.node.__class__.__name__ class FlowGraph: def __init__(self) -> None: self.vertices: List[Vertex] = [] self.edges: List[Edge] = [] self.in_deg: Dict[int, int] = {} def link(self, u: int, v: int, w: Tuple[Expr, Expr] = []) -> None: self.edges.append(Edge(u, v, w)) if v in self.in_deg: self.in_deg[v] += 1 else: self.in_deg[v] = 1 def handle_block(self, block: List[Statement], prev: int) -> int: for s in block: assert isinstance(s, Statement) v = Vertex(s, len(self.vertices)) self.vertices.append(v) self.link(prev, v.idx) prev = v.idx return prev def handle_match(self, match: Match, prev: int) -> None: expr_v = Vertex(Statement(match.expr), len(self.vertices), "match_expr") self.vertices.append(expr_v) self.link(prev, expr_v.idx) prev = expr_v.idx end_points = [] for (p, s) in match.actions: match len(s): case 0: head = PASS_NODE # empty statement, do nothing rest = [] case 1: head = s[0] rest = [] case _: head = s[0] rest = s[1:] head_v = Vertex(head, len(self.vertices), "match_head") self.vertices.append(head_v) self.link(prev, head_v.idx, (expr_v.node, p)) if len(rest) == 0: end_points.append(head_v.idx) else: end_points.append(self.handle_block(rest, head_v.idx)) merge_v = Vertex(PASS_NODE, len(self.vertices), "match_merge") self.vertices.append(merge_v) for ep in end_points: self.link(ep, merge_v.idx) return merge_v.idx def build_graph(self, proc: Procedure) -> None: start_v = Vertex(START_NODE, 0, "start") end_v = Vertex(END_NODE, 1, "end") self.vertices.append(start_v) self.vertices.append(end_v) prev = 0 for body in proc.body: # order matters, since match is a subclass of statement if isinstance(body, Match): prev = self.handle_match(body, prev) elif isinstance(body, Statement): prev = self.handle_block([body], prev) else: raise Exception("build graph encountered: ", body.__class__.__name__) self.link(prev, end_v.idx) def extract_path(self) -> List[List[Vertex]]: q = [0] ret: Dict[int, List[List[Vertex]]] = {} ret[0] = [[self.vertices[0]]] while len(q) > 0: u = q.pop() for e in self.edges: if e.u == u: v = self.vertices[e.v] paths = ret[u].copy() paths = [p + [v] for p in paths] if e.v not in ret: ret[e.v] = paths else: ret[e.v] = ret[e.v] + paths self.in_deg[e.v] -= 1 if self.in_deg[e.v] == 0: q.append(e.v) return ret[1] def analyze(self, proc: Procedure, verbose: bool = False) -> Property: # Paths represent all possible code path of this element self.build_graph(proc) paths = self.extract_path() rpc_name = f"rpc_{proc.name}" if proc.name == "req": direction = "NET" elif proc.name == "resp": direction = "APP" report = "Total #Path = " + str(len(paths)) + "\n" ret = Property() # Visit every possible code path and take a union of the operation for path in paths: report += "\nFor path: \n " report += "->".join([v.annotation for v in path if v != self.vertices[-1]]) report += "\n\n" path_nodes = [v.node for v in path] aa = AliasAnalyzer(rpc_name) targets = aa.visitBlock(path_nodes, None) wa = WriteAnalyzer(targets) write = wa.visitBlock(path_nodes, None) if write: write_fields = wa.target_fields report += "Write: " for (k, v) in write_fields.items(): for vv in v: report += f"{vv} " ret.write.append(vv[0]) report += "\n" ra = ReadAnalyzer(targets) read = ra.visitBlock(path_nodes, None) if read: read_fields = ra.target_fields report += "Read: " for (k, v) in read_fields.items(): for vv in v: report += f"({vv}) " ret.read.append(vv) report += "\n"
da = DropAnalyzer(targets, direction)
5
2023-11-13 07:31:52+00:00
8k
tyang816/ProtSSN
src/dataset/cath_dataset.py
[ { "identifier": "safe_index", "path": "src/utils/dataset_utils.py", "snippet": "def safe_index(l, e):\n \"\"\"\n Return index of element e in list l. If e is not present, return the last index\n \"\"\"\n try:\n return l.index(e)\n except:\n return len(l) - 1" }, { "identifier": "one_hot_res", "path": "src/utils/dataset_utils.py", "snippet": "def one_hot_res(type_idx, num_residue_type=20):\n rec_feat = [0 for _ in range(num_residue_type)]\n if type_idx < num_residue_type:\n rec_feat[type_idx] = 1\n return rec_feat\n else:\n # print(\"Warning: residue type index exceeds \"+num_residue_type+\" !\")\n return False" }, { "identifier": "log", "path": "src/utils/dataset_utils.py", "snippet": "def log(*args):\n print(f'[{datetime.now()}]', *args)" }, { "identifier": "dihedral", "path": "src/utils/dataset_utils.py", "snippet": "def dihedral(vec1, vec2, vec3, vec4):\n \"\"\"\n Returns a float value for the dihedral angle between\n the four vectors. They define the bond for which the\n torsion is calculated (~) as:\n V1 - V2 ~ V3 - V4\n The vectors vec1 .. vec4 can be array objects, lists or tuples of length\n three containing floats.\n For Scientific.geometry.Vector objects the behavior is different\n on Windows and Linux. Therefore, the latter is not a featured input type\n even though it may work.\n If the dihedral angle cant be calculated (because vectors are collinear),\n the function raises a DihedralGeometryError\n \"\"\"\n # create array instances.\n v1, v2, v3, v4 = create_vectors(vec1, vec2, vec3, vec4)\n all_vecs = [v1, v2, v3, v4]\n\n # rule out that two of the atoms are identical\n # except the first and last, which may be.\n for i in range(len(all_vecs)-1):\n for j in range(i+1, len(all_vecs)):\n if i > 0 or j < 3: # exclude the (1,4) pair\n equals = all_vecs[i] == all_vecs[j]\n if equals.all():\n raise DihedralGeometryError(\n \"Vectors #%i and #%i may not be identical!\" % (i, j))\n\n # calculate vectors representing bonds\n v12 = v2-v1\n v23 = v3-v2\n v34 = v4-v3\n\n # calculate vectors perpendicular to the bonds\n normal1 = cross(v12, v23)\n normal2 = cross(v23, v34)\n\n # check for linearity\n if norm(normal1) == 0 or norm(normal2) == 0:\n raise DihedralGeometryError(\n \"Vectors are in one line; cannot calculate normals!\")\n\n # normalize them to length 1.0\n normal1 = normal1/norm(normal1)\n normal2 = normal2/norm(normal2)\n\n # calculate torsion and convert to degrees\n torsion = angle(normal1, normal2) * 180.0/pi\n\n # take into account the determinant\n # (the determinant is a scalar value distinguishing\n # between clockwise and counter-clockwise torsion.\n if scalar(normal1, v34) >= 0:\n return torsion\n else:\n torsion = 360-torsion\n if torsion == 360:\n torsion = 0.0\n return torsion" }, { "identifier": "NormalizeProtein", "path": "src/utils/dataset_utils.py", "snippet": "class NormalizeProtein(BaseTransform):\n r\"\"\"Centers and normalizes node positions to the interval :math:`(-1, 1)`\n (functional name: :obj:`normalize_scale`).\n \"\"\"\n\n def __init__(self, filename, skip_x=20, skip_edge_attr=64, safe_domi=1e-10):\n\n dic = torch.load(filename)\n self.skip_x = skip_x\n self.skip_edge_attr = skip_edge_attr\n self.safe_domi = safe_domi\n self.x_mean = dic['x_mean']\n self.x_std = dic['x_std']\n self.pos_mean = dic['pos_mean']\n self.pos_std = torch.mean(dic['pos_std'])\n self.edge_attr_mean = dic['edge_attr_mean']\n self.edge_attr_std = dic['edge_attr_std']\n\n def __call__(self, data):\n data.x[:, self.skip_x:] = (data.x[:, self.skip_x:] - self.x_mean[self.skip_x:]\n ).div_(self.x_std[self.skip_x:] + self.safe_domi)\n data.pos = data.pos - data.pos.mean(dim=-2, keepdim=False)\n data.pos = data.pos.div_(self.pos_std + self.safe_domi)\n data.edge_attr[:, self.skip_edge_attr:] = (data.edge_attr[:, self.skip_edge_attr:]\n - self.edge_attr_mean[self.skip_edge_attr:]).div_(self.edge_attr_std[self.skip_edge_attr:] + self.safe_domi)\n\n return data" }, { "identifier": "dataset_argument_", "path": "src/utils/dataset_utils.py", "snippet": "def dataset_argument_(root):\n dataset_arg = {}\n if root == \"cath40_k10_dyn_imem\":\n dataset_arg['root'] = f\"data/{root}\"\n dataset_arg['name'] = '40'\n dataset_arg['set_length'] = None\n dataset_arg['normal_file'] = None\n dataset_arg['divide_num'] = 1\n dataset_arg['divide_idx'] = 0\n dataset_arg['c_alpha_max_neighbors'] = 10\n return dataset_arg" }, { "identifier": "get_stat", "path": "src/utils/dataset_utils.py", "snippet": "def get_stat(graph_root, limited_num=None, num_subgroup=1000, max_limits=100000):\n # obtain mean and std of graphs in graph_root\n # graph_root: string, calculate mean and std of all attributes of graphs in graph_root\n # limited_num: int, optional, just calculated limited number of graphs in graph_root\n # num_Subgroup: int, group all graphs in graph_root, the number of each subgroup is num_subgroup\n # max_limits: int, set the initial minimum value as max_limits\n\n wrong_proteins = []\n filenames = os.listdir(graph_root)\n random.shuffle(filenames)\n # set sample length\n n = len(filenames)\n if limited_num:\n n = min(n, limited_num)\n count = 0\n if n < num_subgroup * 10:\n num_subgroup = 1\n\n # initialize scalar value\n num_node_min, num_edge_min = torch.tensor(\n [max_limits]), torch.tensor([max_limits])\n num_node_max, num_node_avg, num_edge_max, num_edge_avg = torch.tensor(\n [0]), torch.tensor([0]), torch.tensor([0]), torch.tensor([0])\n\n # initialize mean, std\n graph = torch.load(os.path.join(graph_root, filenames[0]))\n x, pos, edge_attr = graph.x, graph.pos, graph.edge_attr\n x_mean = torch.zeros(x.shape[1])\n x_max = torch.zeros(x.shape[1])\n x_min = torch.tensor([max_limits for i in range(x.shape[1])])\n x_std = torch.zeros(x.shape[1])\n pos_mean = torch.zeros(pos.shape[1])\n pos_std = torch.zeros(pos.shape[1])\n edge_attr_mean = torch.zeros(edge_attr.shape[1])\n edge_attr_std = torch.zeros(edge_attr.shape[1])\n\n # initialize sub mean, std\n x_mean_1 = torch.zeros(x.shape[1])\n x_std_1 = torch.zeros(x.shape[1])\n pos_mean_1 = torch.zeros(pos.shape[1])\n pos_std_1 = torch.zeros(pos.shape[1])\n\n edge_attr_mean_1 = torch.zeros(edge_attr.shape[1])\n edge_attr_std_1 = torch.zeros(edge_attr.shape[1])\n\n for i in tqdm(range(n)):\n file = filenames[i]\n graph = torch.load(os.path.join(graph_root, file))\n x, pos, mu_r_norm, edge_attr = graph.x, graph.pos, graph.mu_r_norm, graph.edge_attr\n if torch.isnan(x).any():\n wrong_proteins.append(file)\n continue\n count += 1\n node_num = graph.x.shape[0]\n edge_num = graph.edge_attr.shape[0]\n num_node_min = min(num_node_min, node_num)\n num_edge_min = min(num_edge_min, edge_num)\n num_node_max = max(num_node_max, node_num)\n num_edge_max = max(num_edge_max, edge_num)\n num_node_avg += node_num\n num_edge_avg += edge_num\n\n x_max = torch.max(x_max, x.max(axis=0).values)\n x_min = torch.min(x_min, x.min(axis=0).values)\n x_mean_1 += x.nanmean(axis=0)\n x_std_1 += x.std(axis=0)\n pos_mean_1 += pos.mean(axis=0)\n pos_std_1 += pos.std(axis=0)\n edge_attr_mean_1 += edge_attr.mean(axis=0)\n edge_attr_std_1 += edge_attr.std(axis=0)\n\n if count == num_subgroup:\n x_mean += x_mean_1.div_(num_subgroup)\n x_std += x_std_1.div_(num_subgroup)\n pos_mean += pos_mean_1.div_(num_subgroup)\n pos_std += pos_std_1.div_(num_subgroup)\n edge_attr_mean += edge_attr_mean_1.div_(num_subgroup)\n edge_attr_std += edge_attr_std_1.div_(num_subgroup)\n\n x_mean_1 = torch.zeros(x.shape[1])\n x_std_1 = torch.zeros(x.shape[1])\n pos_mean_1 = torch.zeros(pos.shape[1])\n pos_std_1 = torch.zeros(pos.shape[1])\n edge_attr_mean_1 = torch.zeros(edge_attr.shape[1])\n edge_attr_std_1 = torch.zeros(edge_attr.shape[1])\n count = 0\n\n num_node_avg = num_node_avg/n\n num_edge_avg = num_edge_avg/n\n n_2 = n // num_subgroup\n x_mean = x_mean.div_(n_2)\n x_std = x_std.div_(n_2)\n pos_mean = pos_mean.div_(n_2)\n pos_std = pos_std.div_(n_2)\n edge_attr_mean = edge_attr_mean.div_(n_2)\n edge_attr_std = edge_attr_std.div_(n_2)\n\n dic = {'x_max': x_max, 'x_min': x_min, 'x_mean': x_mean, 'x_std': x_std,\n 'pos_mean': pos_mean, 'pos_std': pos_std,\n 'edge_attr_mean': edge_attr_mean, 'edge_attr_std': edge_attr_std,\n 'num_graph': n - len(wrong_proteins),\n 'num_node_min': num_node_min, 'num_edge_min': num_edge_min,\n 'num_node_max': num_node_max, 'num_edge_max': num_edge_max,\n 'num_node_avg': num_node_avg, 'num_edge_avg': num_edge_avg}\n\n filename = 'mean_attr'\n saved_filename_pt = os.path.join(\n '/'.join(graph_root.split('/')[:-1]), filename + '.pt')\n torch.save(dic, saved_filename_pt)\n saved_filename = os.path.join(\n '/'.join(graph_root.split('/')[:-1]), filename + '.csv')\n w = csv.writer(open(saved_filename, 'w'))\n for key, val in dic.items():\n w.writerow([key, val])\n\n saved_filename = os.path.join(\n '/'.join(graph_root.split('/')[:-1]), filename + '_proteins.txt')\n with open(saved_filename, 'w') as f:\n for i in range(n):\n f.write(str(filenames[i]) + '\\n')\n\n saved_filename = os.path.join(\n '/'.join(graph_root.split('/')[:-1]), filename + '_wrong_proteins.txt')\n with open(saved_filename, 'w') as f:\n for file in wrong_proteins:\n f.write(file + '\\n')\n\n return saved_filename_pt" } ]
import os import torch import sys import math import random import warnings import torch import os import sys import torch.nn.functional as F import scipy.spatial as spa import numpy as np from tqdm import tqdm from scipy.special import softmax from Bio.PDB import PDBParser, ShrakeRupley from Bio.PDB.PDBExceptions import PDBConstructionWarning from rdkit.Chem import GetPeriodicTable from typing import Callable, List, Optional from torch.utils.data import DataLoader from torch_geometric.data import InMemoryDataset, Data from src.utils.dataset_utils import safe_index, one_hot_res, log, dihedral, NormalizeProtein, dataset_argument_, get_stat
6,320
@property def processed_file_names(self) -> str: return ['train.pt', 'val.pt'] def write_info(self): written_filename = os.path.join(self.root, 'wrong_protein_names.txt') file = open(written_filename, "w+") for protein_name in self.wrong_proteins: file.writelines(protein_name + '\n') file.close() def process(self): #generate graph data and save in graph dir self.generate_protein_graph() # self.write_info() filenames = os.listdir(self.saved_graph_dir) protein_length = len(filenames) if self.set_length: protein_length = min(protein_length, self.set_length) if not self.normalize_file: self.normalize_file = get_stat(self.saved_graph_dir) random.shuffle(filenames) train_list = [f for f in filenames if "_" in f or "-" in f] filenames = [f for f in filenames if "_" not in f or "-" not in f] train_list.extend(filenames[:-self.num_val]) filenames_list = [train_list, filenames[-self.num_val:]] for k in range(2):####split train,val,test data_list = [] ###move special name to test set special_name_list = ["p53-dimer.pdb.pt"] for special_name in special_name_list: if special_name in filenames_list[0]: filenames_list[0].remove(special_name) filenames_list[1].append(special_name) for i in tqdm(range(len(filenames_list[k]))): file = filenames_list[k][i] try: graph1 = torch.load(os.path.join(self.saved_graph_dir, file))##load processed graph data torch pt file except: print(file) continue del graph1['distances'] del graph1['edge_dist'] del graph1['mu_r_norm'] del graph1['seq'] data_list.append(graph1) if self.is_normalize: normalize_transform = NormalizeProtein(filename=self.normalize_file) data_list = [d for d in data_list if normalize_transform(d)] if self.pre_filter is not None: data_list = [d for d in data_list if self.pre_filter(d)] if self.pre_transform is not None: data_list = [self.pre_transform(d) for d in data_list] torch.save(data_list, self.processed_paths[k]) def generate_protein_graph(self): names = os.listdir(self.raw_file_names) print(names) names.sort() n = int(np.ceil(len(names) / self.divide_num)) names = names[n * self.divide_idx:min(len(names), n * (self.divide_idx + 1))] for idx, name in enumerate(tqdm(names)): saved_graph_filename = os.path.join(self.saved_graph_dir, name + '.pt') if os.path.exists(saved_graph_filename): continue protein_filename = os.path.join(self.raw_file_names, name) if (name in self.wrong_proteins) or (not protein_filename): continue try: rec, rec_coords, c_alpha_coords, n_coords, c_coords,seq = self.get_receptor_inference(protein_filename) except: continue if rec !=False: if len(seq)>len(c_alpha_coords): del seq[-(len(seq)-len(c_alpha_coords)):] #meet "dna" data will remove the file and rec will be false # print(self.c_alpha_max_neighbors) rec_graph = self.get_calpha_graph(rec, c_alpha_coords, n_coords, c_coords, rec_coords,seq) if not rec_graph: self.wrong_proteins.append(name) continue torch.save(rec_graph, saved_graph_filename) def rec_residue_featurizer(self, rec, chain_id, one_hot=True, add_feature=None): count = 0 flag_sasa=1 try: self.sr.compute(rec, level="R") except: flag_sasa=0 for i, chain in enumerate(rec.get_chains()): if i != chain_id: continue num_res = len(list(chain.get_residues()))#len([_ for _ in rec.get_residues()]) num_feature = 2 if add_feature.any(): num_feature += add_feature.shape[1] res_feature = torch.zeros(num_res, self.num_residue_type + num_feature) for i, residue in enumerate(chain.get_residues()): if flag_sasa==0: residue.sasa=0 sasa = residue.sasa for atom in residue: if atom.name == 'CA': bfactor = atom.bfactor assert not np.isinf(bfactor) assert not np.isnan(bfactor) assert not np.isinf(sasa) assert not np.isnan(sasa) residx = safe_index( self.allowable_features['possible_amino_acids'], residue.get_resname())
current_dir = os.getcwd() sys.path.append(current_dir) cwd = os.getcwd() sys.path.append(cwd + '/src/dataset_utils') warnings.filterwarnings("ignore") one_letter = { 'VAL':'V', 'ILE':'I', 'LEU':'L', 'GLU':'E', 'GLN':'Q', 'ASP':'D', 'ASN':'N', 'HIS':'H', 'TRP':'W', 'PHE':'F', 'TYR':'Y', 'ARG':'R', 'LYS':'K', 'SER':'S', 'THR':'T', 'MET':'M', 'ALA':'A', 'GLY':'G', 'PRO':'P', 'CYS':'C' } class CathDataset(InMemoryDataset): r""" Args: root (string): Root directory where the dataset should be saved. name (string): The name of the dataset. raw_dir (string, optional): Root directory where the original dataset stored(default: :obj:`None`) num_residue_type (int, optional): The number of amino acid types. (default: obj:'20') micro_radius (int, optional): The radius of micro-environment centered on the mask node. (default: obj:'20') c_alpha_max_neighbors (int, optional): The number of maximum connected nodes. (default: obj:'10') cutoff (int, optional): The maximum connected nodes distance (default: obj:'30') seq_dist_cut (int, optional): one-hot encoding the sequence distance edge attribute (default: obj:) [0.25,0.5,0.75,0.9,0.95,0.98,0.99] [ 2. 3. 13. 63. 127. 247. 347.] num_val (int, optional): The number of validation samples in case of "random" split. (default: 500) num_test (int, optional): The number of test samples in case of "random" split. (default: 1000) # use_localdatastet (bool) (bool,optional): If :obj:'True', online dataset # will be downloaded. If not, local pdb files will be used # (default: obj:'True') transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before every access. (default: :obj:`None`) pre_transform (callable, optional): A function/transform that takes in an :obj:`torch_geometric.data.Data` object and returns a transformed version. The data object will be transformed before being saved to disk. (default: :obj:`None`) pre_filter (callable, optional): A function that takes in an :obj:`torch_geometric.data.Data` object and returns a boolean value, indicating whether the data object should be included in the final dataset. (default: :obj:`None`) """ splits = ['train', 'val', 'test'] allowable_features = { 'possible_atomic_num_list': list(range(1, 119)) + ['misc'], 'possible_chirality_list': [ 'CHI_UNSPECIFIED', 'CHI_TETRAHEDRAL_CW', 'CHI_TETRAHEDRAL_CCW', 'CHI_OTHER' ], 'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'], 'possible_numring_list': [0, 1, 2, 3, 4, 5, 6, 'misc'], 'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6, 'misc'], 'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'], 'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'], 'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'], 'possible_hybridization_list': [ 'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc' ], 'possible_is_aromatic_list': [False, True], 'possible_is_in_ring3_list': [False, True], 'possible_is_in_ring4_list': [False, True], 'possible_is_in_ring5_list': [False, True], 'possible_is_in_ring6_list': [False, True], 'possible_is_in_ring7_list': [False, True], 'possible_is_in_ring8_list': [False, True], 'possible_amino_acids': ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL', 'HIP', 'HIE', 'TPO', 'HID', 'LEV', 'MEU', 'PTR', 'GLV', 'CYT', 'SEP', 'HIZ', 'CYM', 'GLM', 'ASQ', 'TYS', 'CYX', 'GLZ', 'misc'], 'possible_atom_type_2': ['C*', 'CA', 'CB', 'CD', 'CE', 'CG', 'CH', 'CZ', 'N*', 'ND', 'NE', 'NH', 'NZ', 'O*', 'OD', 'OE', 'OG', 'OH', 'OX', 'S*', 'SD', 'SG', 'misc'], 'possible_atom_type_3': ['C', 'CA', 'CB', 'CD', 'CD1', 'CD2', 'CE', 'CE1', 'CE2', 'CE3', 'CG', 'CG1', 'CG2', 'CH2', 'CZ', 'CZ2', 'CZ3', 'N', 'ND1', 'ND2', 'NE', 'NE1', 'NE2', 'NH1', 'NH2', 'NZ', 'O', 'OD1', 'OD2', 'OE1', 'OE2', 'OG', 'OG1', 'OH', 'OXT', 'SD', 'SG', 'misc'], } def __init__(self, root: str, split: str = 'train', num_residue_type: int = 20, micro_radius: int = 20, c_alpha_max_neighbors: int = 10, cutoff: int = 30, seq_dist_cut: int = 64, use_micro: bool = False, use_angle: bool = False, use_omega: bool = False, transform: Optional[Callable] = None, pre_transform: Optional[Callable] = None, pre_filter: Optional[Callable] = None, divide_num: int = 1, divide_idx: int = 0, set_length: int = 500, num_val: int = 10, is_normalize: bool = True, normalize_file: str = None, p: float = 0.5, use_sasa: bool =False, use_bfactor: bool = False, use_dihedral: bool = False, use_coordinate: bool = False, use_denoise: bool = False, noise_type: str = 'wild', temperature = 1.0 ): self.p=p self.use_sasa=use_sasa self.use_bfactor=use_bfactor self.use_dihedral=use_dihedral self.use_coordinate=use_coordinate self.use_denoise=use_denoise self.noise_type = noise_type self.temperature = temperature self.split = split assert self.split in self.splits self.num_residue_type = num_residue_type self.micro_radius = micro_radius self.c_alpha_max_neighbors = c_alpha_max_neighbors self.seq_dist_cut = seq_dist_cut self.use_micro = use_micro self.use_angle = use_angle self.use_omega = use_omega self.cutoff = cutoff self.num_val = num_val self.divide_num = divide_num self.divide_idx = divide_idx self.set_length = set_length self.is_normalize = is_normalize self.normalize_file = normalize_file self.wrong_proteins = ['1kp0A01', '2atcA02'] self.sr = ShrakeRupley(probe_radius=1.4, # in A. Default is 1.40 roughly the radius of a water molecule. n_points=100) # resolution of the surface of each atom. Default is 100. A higher number of points results in more precise measurements, but slows down the calculation. self.periodic_table = GetPeriodicTable() self.biopython_parser = PDBParser() super().__init__(root, transform, pre_transform, pre_filter) self.dataset = torch.load(self.processed_paths[self.splits.index(self.split)]) # self.data, self.slices = torch.load( # self.processed_paths[self.splits.index(self.split)]) # self.nums_amino_cum = self.slices['x'] @property def raw_file_names(self) -> str: raw_file_names = os.path.join('data', 'cath', "dompdb") if not os.path.exists(raw_file_names): os.mkdir(raw_file_names) return raw_file_names @property def raw_dir(self) -> str: if not os.path.exists(self.root): os.mkdir(self.root) raw_dir = os.path.join(self.root, 'raw') if not os.path.exists(raw_dir): os.mkdir(raw_dir) return raw_dir @property def saved_graph_dir(self) -> str: dir_root = os.path.join(self.root) if not os.path.exists(dir_root): os.mkdir(dir_root) dir_name = os.path.join(dir_root, 'graph_seq') if not os.path.exists(dir_name): os.mkdir(dir_name) if not self.set_length: self.set_length = len(os.listdir(dir_name)) return dir_name @property def saved_amino_cum(self) -> str: amino_cum_name = os.path.join( self.root, 'amino_cum.pt') return amino_cum_name @property def processed_dir(self) -> str: return os.path.join(self.root, 'processed_seq') @property def processed_file_names(self) -> str: return ['train.pt', 'val.pt'] def write_info(self): written_filename = os.path.join(self.root, 'wrong_protein_names.txt') file = open(written_filename, "w+") for protein_name in self.wrong_proteins: file.writelines(protein_name + '\n') file.close() def process(self): #generate graph data and save in graph dir self.generate_protein_graph() # self.write_info() filenames = os.listdir(self.saved_graph_dir) protein_length = len(filenames) if self.set_length: protein_length = min(protein_length, self.set_length) if not self.normalize_file: self.normalize_file = get_stat(self.saved_graph_dir) random.shuffle(filenames) train_list = [f for f in filenames if "_" in f or "-" in f] filenames = [f for f in filenames if "_" not in f or "-" not in f] train_list.extend(filenames[:-self.num_val]) filenames_list = [train_list, filenames[-self.num_val:]] for k in range(2):####split train,val,test data_list = [] ###move special name to test set special_name_list = ["p53-dimer.pdb.pt"] for special_name in special_name_list: if special_name in filenames_list[0]: filenames_list[0].remove(special_name) filenames_list[1].append(special_name) for i in tqdm(range(len(filenames_list[k]))): file = filenames_list[k][i] try: graph1 = torch.load(os.path.join(self.saved_graph_dir, file))##load processed graph data torch pt file except: print(file) continue del graph1['distances'] del graph1['edge_dist'] del graph1['mu_r_norm'] del graph1['seq'] data_list.append(graph1) if self.is_normalize: normalize_transform = NormalizeProtein(filename=self.normalize_file) data_list = [d for d in data_list if normalize_transform(d)] if self.pre_filter is not None: data_list = [d for d in data_list if self.pre_filter(d)] if self.pre_transform is not None: data_list = [self.pre_transform(d) for d in data_list] torch.save(data_list, self.processed_paths[k]) def generate_protein_graph(self): names = os.listdir(self.raw_file_names) print(names) names.sort() n = int(np.ceil(len(names) / self.divide_num)) names = names[n * self.divide_idx:min(len(names), n * (self.divide_idx + 1))] for idx, name in enumerate(tqdm(names)): saved_graph_filename = os.path.join(self.saved_graph_dir, name + '.pt') if os.path.exists(saved_graph_filename): continue protein_filename = os.path.join(self.raw_file_names, name) if (name in self.wrong_proteins) or (not protein_filename): continue try: rec, rec_coords, c_alpha_coords, n_coords, c_coords,seq = self.get_receptor_inference(protein_filename) except: continue if rec !=False: if len(seq)>len(c_alpha_coords): del seq[-(len(seq)-len(c_alpha_coords)):] #meet "dna" data will remove the file and rec will be false # print(self.c_alpha_max_neighbors) rec_graph = self.get_calpha_graph(rec, c_alpha_coords, n_coords, c_coords, rec_coords,seq) if not rec_graph: self.wrong_proteins.append(name) continue torch.save(rec_graph, saved_graph_filename) def rec_residue_featurizer(self, rec, chain_id, one_hot=True, add_feature=None): count = 0 flag_sasa=1 try: self.sr.compute(rec, level="R") except: flag_sasa=0 for i, chain in enumerate(rec.get_chains()): if i != chain_id: continue num_res = len(list(chain.get_residues()))#len([_ for _ in rec.get_residues()]) num_feature = 2 if add_feature.any(): num_feature += add_feature.shape[1] res_feature = torch.zeros(num_res, self.num_residue_type + num_feature) for i, residue in enumerate(chain.get_residues()): if flag_sasa==0: residue.sasa=0 sasa = residue.sasa for atom in residue: if atom.name == 'CA': bfactor = atom.bfactor assert not np.isinf(bfactor) assert not np.isnan(bfactor) assert not np.isinf(sasa) assert not np.isnan(sasa) residx = safe_index( self.allowable_features['possible_amino_acids'], residue.get_resname())
res_feat_1 = one_hot_res(
1
2023-11-10 07:21:37+00:00
8k
HypeboyJake/ReinforceTradeAI
c_main.py
[ { "identifier": "CoinTradingEnv", "path": "CoinTradingEnv.py", "snippet": "class CoinTradingEnv(gym.Env):\r\n def __init__(self, data):\r\n super(CoinTradingEnv, self).__init__()\r\n self.data = data\r\n # 0 : Buy, 1 : Sell, 3 : Hold\r\n self.action_space = gym.spaces.Discrete(3)\r\n self.initial_cash = 1000 # 초기 자본금\r\n self.current_cash = self.initial_cash # 현재 현금\r\n self.trade_fee = 0.0014 # 거래 수수료\r\n self.current_coin = 0.0 # 현재 보유 코인 수\r\n self.min_order_amount = 10 # 최소 주문 금액 ($10)\r\n self.pyramiding_level = 0 # 피라미딩 횟수\r\n self.portfolio_value = 0 # 포트폴리오 가치\r\n\r\n def reset(self):\r\n self.current_step = 0\r\n self.current_cash = self.initial_cash\r\n self.current_coin = 0.0\r\n self.portfolio_valu= 0.0\r\n self.roe_per_step = []\r\n return self.data[self.current_step]\r\n\r\n def step(self, action):\r\n current_price = self.data[self.current_step][3]\r\n portfolio_value_before = self.current_cash + (self.current_coin * current_price) \r\n self.current_cash, self.current_coin, final_action = self.execute_trade(action)\r\n self.portfolio_value = self.current_cash + (self.current_coin * current_price)\r\n self.record_roe()\r\n\r\n self.current_step += 1\r\n done = self.current_step >= len(self.data) - 1\r\n obs = self.data[self.current_step] if not done else np.zeros_like(self.data[0])\r\n\r\n reward = self.calculate_reward(final_action, portfolio_value_before, self.portfolio_value)##\r\n return obs, reward, done, {'final_action': final_action}\r\n\r\n def execute_trade(self, action):\r\n pyramiding_level = self.pyramiding_level\r\n commission_rate = self.trade_fee\r\n investment_ratio = 1.0 if pyramiding_level == 0 else 1 / (pyramiding_level + 1)\r\n final_action = action\r\n\r\n if action == 0: \r\n amount_to_invest = self.current_cash * investment_ratio\r\n amount_to_invest_after_commission = amount_to_invest * (1 - commission_rate)\r\n if amount_to_invest_after_commission >= self.min_order_amount:\r\n num_coins_to_buy = amount_to_invest_after_commission / self.data[self.current_step][3]\r\n if num_coins_to_buy > 0:\r\n self.current_cash -= num_coins_to_buy * self.data[self.current_step][3] * (1 + commission_rate)\r\n self.current_coin += num_coins_to_buy\r\n else:\r\n final_action = 2 \r\n else:\r\n final_action = 2 \r\n\r\n elif action == 1: \r\n num_coins_to_sell = self.current_coin * investment_ratio\r\n if num_coins_to_sell > 0:\r\n self.current_cash += num_coins_to_sell * self.data[self.current_step][3] * (1 - commission_rate)\r\n self.current_coin -= num_coins_to_sell\r\n else:\r\n final_action = 2 \r\n\r\n self.current_cash = max(self.current_cash, 0) \r\n self.current_coin = max(self.current_coin, 0) \r\n\r\n return self.current_cash, self.current_coin, final_action \r\n\r\n\r\n\r\n def calculate_reward(self, final_action, portfolio_value_before, portfolio_value_after):\r\n long_term_lookback = 20 \r\n if self.current_step > long_term_lookback:\r\n long_term_profit_loss = portfolio_value_after / self.data[self.current_step - long_term_lookback][3] - 1\r\n else:\r\n long_term_profit_loss = portfolio_value_after / self.initial_cash - 1\r\n\r\n unrealized_reward_weight = 0.5\r\n realized_reward_weight = 1\r\n long_term_reward_weight = 2 \r\n trading_fee = self.trade_fee\r\n risk_adjustment_factor = 0.05 \r\n\r\n reward = 0\r\n if final_action == 0: \r\n realized_profit_loss = portfolio_value_after - portfolio_value_before\r\n reward = realized_profit_loss * realized_reward_weight - trading_fee\r\n elif final_action == 1: \r\n realized_profit_loss = portfolio_value_before - portfolio_value_after\r\n reward = realized_profit_loss * realized_reward_weight - trading_fee\r\n elif final_action == 2: \r\n unrealized_profit_loss = portfolio_value_after - portfolio_value_before\r\n reward = unrealized_profit_loss * unrealized_reward_weight\r\n\r\n reward += long_term_profit_loss * long_term_reward_weight\r\n\r\n lookback = min(self.current_step, 20)\r\n past_prices = self.data[self.current_step-lookback:self.current_step]\r\n past_returns = [(past_prices[i+1][3] - past_prices[i][3]) / past_prices[i][3] for i in range(lookback - 1)]\r\n volatility = np.std(past_returns) if len(past_returns) > 1 else 0\r\n\r\n risk_adjusted_reward = reward * (1 - risk_adjustment_factor * volatility)\r\n\r\n return risk_adjusted_reward\r\n\r\n \r\n def record_roe(self):\r\n total_assets = self.current_cash + (self.current_coin * self.data[self.current_step][3])\r\n roe = ((total_assets - self.initial_cash) / self.initial_cash) * 100\r\n self.roe_per_step.append(roe)\r\n return roe" }, { "identifier": "Agent", "path": "sharing/agent.py", "snippet": "class Agent:\r\n def __init__(self, input_dim, output_dim, epsilon, gamma,\r\n # \"사용할 모델을 선택하세요\"\r\n # \"Please select the model to use.\"\r\n agent_type='PPO', \r\n # \"사용할 옵티마이저를 선택하세요\"\r\n # \"Please select the optimizer to use.\"\r\n optimizer_type='Adam',\r\n shared_network=False,\r\n clip_ratio=0.2,\r\n lr=0.0001, \r\n weight_decay=0, \r\n **kwargs):\r\n self.epsilon = epsilon\r\n self.gamma = gamma\r\n self.shared_network = shared_network\r\n self.clip_ratio = clip_ratio\r\n\r\n self.old_log_probs = []\r\n self.ppo_values = []\r\n\r\n self.value_loss_coef = 0.5 \r\n self.entropy_coef = 0.01 \r\n\r\n model_class = MODEL_MAPPING.get(agent_type)\r\n if not model_class:\r\n raise ValueError(f\"Unsupported agent type: {agent_type}\")\r\n self.model = model_class(input_dim, output_dim, shared_network)\r\n\r\n optimizer_class = OPTIMIZER_MAPPING.get(optimizer_type)\r\n if not optimizer_class:\r\n raise ValueError(f\"Unsupported optimizer type: {optimizer_type}\")\r\n self.optimizer = optimizer_class(self.model.parameters(), lr=lr, weight_decay=weight_decay, **kwargs)\r\n\r\n if agent_type in ['DQN', 'DDQN']:\r\n self.target_model = model_class(input_dim, output_dim, shared_network)\r\n self.target_model.load_state_dict(self.model.state_dict())\r\n self.target_model.eval()\r\n \r\n def select_action(self, state):\r\n # 0 : Buy, 1 : Sell, 3 : Hold\r\n action_space = [0, 1, 2] \r\n state = state.float()\r\n\r\n # Epsilon-greedy\r\n if np.random.rand() < self.epsilon:\r\n return np.random.choice(action_space)\r\n else:\r\n if isinstance(self.model, DQN):\r\n with torch.no_grad():\r\n model_output = self.model(state)\r\n max_value, max_index = model_output.max(0) \r\n return max_index.item()\r\n elif isinstance(self.model, DDQN):\r\n with torch.no_grad():\r\n state = state.unsqueeze(0).float() \r\n q_values = self.model(state) \r\n return q_values.max(1)[1].item()\r\n elif isinstance(self.model, A2C):\r\n with torch.no_grad():\r\n state = state.unsqueeze(0).float() \r\n policy, value = self.model(state)\r\n dist = torch.distributions.Categorical(policy)\r\n action = dist.sample()\r\n log_prob = dist.log_prob(action)\r\n self.last_log_prob = log_prob \r\n self.last_value = value \r\n return action.item()\r\n elif isinstance(self.model, PPO):\r\n with torch.no_grad():\r\n policy, value = self.model(state)\r\n dist = torch.distributions.Categorical(policy)\r\n action = dist.sample()\r\n log_prob = dist.log_prob(action)\r\n \r\n self.old_log_probs.append(log_prob)\r\n self.ppo_values.append(value)\r\n return action.item()\r\n elif isinstance(self.model, LSTM):\r\n with torch.no_grad():\r\n state = state.unsqueeze(0).unsqueeze(1)\r\n log_probs, _, _ = self.model(state)\r\n probs = torch.exp(log_probs)\r\n action = torch.multinomial(probs, 1).item()\r\n return action\r\n elif isinstance(self.model, DNN):\r\n with torch.no_grad():\r\n model_output = self.model(state)\r\n max_value, max_index = model_output.max(0) \r\n return max_index.item()\r\n else:\r\n raise ValueError(f\"Unsupported model type: {type(self.model)}\")\r\n \r\n def learn(self, state, action, reward, next_state, done):\r\n if isinstance(self.model, DQN):\r\n state = state.unsqueeze(0) if state.dim() == 1 else state\r\n next_state = next_state.unsqueeze(0) if next_state.dim() == 1 else next_state\r\n action = torch.tensor([action], dtype=torch.int64).unsqueeze(0)\r\n reward = torch.tensor([reward], dtype=torch.float32).unsqueeze(0)\r\n done = torch.tensor([done], dtype=torch.float32).unsqueeze(0)\r\n dqn_batch = (state, action, reward, next_state, done)\r\n optimize_dqn(self.model, self.target_model, self.optimizer, dqn_batch, self.gamma)\r\n elif isinstance(self.model, DDQN):\r\n state = state.unsqueeze(0) if state.dim() == 1 else state\r\n next_state = next_state.unsqueeze(0) if next_state.dim() == 1 else next_state\r\n action = torch.tensor([action], dtype=torch.int64).unsqueeze(0)\r\n reward = torch.tensor([reward], dtype=torch.float32).unsqueeze(0)\r\n done = torch.tensor([done], dtype=torch.float32).unsqueeze(0)\r\n ddqn_batch = (state, action, reward, next_state, done)\r\n optimize_ddqn(self.model, self.target_model, self.optimizer, ddqn_batch, self.gamma)\r\n elif isinstance(self.model, A2C):\r\n reward_tensor = torch.tensor(reward).float()\r\n done_tensor = torch.tensor(done, dtype=torch.float32)\r\n state = state.float()\r\n action_tensor = torch.tensor(action).long()\r\n next_state = next_state.float()\r\n done_tensor = done_tensor.unsqueeze(0) if done_tensor.dim() == 0 else done_tensor\r\n log_probs, values = calculate_log_probs_and_values(self.model, state, action_tensor)\r\n a2c_batch = (state, action_tensor, reward_tensor, next_state, done_tensor, log_probs, values)\r\n optimize_a2c(self.model, self.optimizer, a2c_batch, self.gamma)\r\n elif isinstance(self.model, PPO):\r\n if not self.old_log_probs or not self.ppo_values:\r\n return\r\n old_log_probs = torch.stack(self.old_log_probs)\r\n ppo_values = torch.stack(self.ppo_values)\r\n ppo_batch = (state, action, reward, next_state, done, old_log_probs, ppo_values)\r\n optimize_ppo(self.model, self.optimizer, ppo_batch, self.clip_ratio)\r\n self.old_log_probs = []\r\n self.ppo_values = []\r\n elif isinstance(self.model, LSTM):\r\n state = state.clone().detach().unsqueeze(0).float() if not state.requires_grad else state.unsqueeze(0).float()\r\n next_state = next_state.clone().detach().unsqueeze(0).float() if not next_state.requires_grad else next_state.unsqueeze(0).float()\r\n action = torch.tensor([action], dtype=torch.int64).unsqueeze(0)\r\n reward = torch.tensor([reward], dtype=torch.float32).unsqueeze(0)\r\n done = torch.tensor([done], dtype=torch.float32).unsqueeze(0)\r\n log_probs, values, hidden_state = self.model(state)\r\n lstm_batch = (state, action, reward, next_state, done, log_probs, values)\r\n optimize_lstm(self.model, self.optimizer, lstm_batch, self.value_loss_coef, self.entropy_coef)\r\n elif isinstance(self.model, DNN):\r\n state = torch.tensor(state, dtype=torch.float32).unsqueeze(0) if not isinstance(state, torch.Tensor) else state\r\n next_state = next_state.unsqueeze(0) if next_state.dim() == 1 else next_state\r\n reward = torch.tensor([reward], dtype=torch.float32)\r\n done = torch.tensor([done], dtype=torch.float32)\r\n outputs = self.model(state)\r\n next_state_values = self.model(next_state).max(1)[0].detach()\r\n target_value = reward + (1 - done) * self.gamma * next_state_values\r\n target_value = target_value.unsqueeze(-1) \r\n action = torch.tensor(action, dtype=torch.int64).unsqueeze(-1)\r\n predicted_q_value = outputs[action].unsqueeze(-1)\r\n optimize_dnn(self.model, self.optimizer, (predicted_q_value, target_value))\r\n else:\r\n raise ValueError(f\"Unsupported model type: {type(self.model)}\")" }, { "identifier": "create_folder", "path": "sharing/visualization.py", "snippet": "def create_folder(epoch):\r\n folder_name = f'epoch_{epoch}'\r\n os.makedirs(folder_name, exist_ok=True)\r\n return folder_name\r" }, { "identifier": "plot_all_charts", "path": "sharing/visualization.py", "snippet": "def plot_all_charts(old_data, holdings, data, actions, roe_values, folder_name, epoch):\r\n ohlcv_df = pd.DataFrame(old_data)\r\n ohlcv_df.iloc[:, 0] = pd.to_datetime(ohlcv_df.iloc[:, 0])\r\n ohlcv_df.set_index(ohlcv_df.iloc[:, 0], inplace=True)\r\n\r\n plt.figure(figsize=(15, 12))\r\n\r\n plt.subplot(4, 1, 1)\r\n plt.plot(ohlcv_df.iloc[:, 4], label='Close Price') \r\n plt.title('Close Price Line Chart')\r\n plt.xlabel('Time')\r\n plt.ylabel('Price')\r\n plt.legend()\r\n\r\n \r\n plt.subplot(4, 1, 2)\r\n plt.plot(data[:-1, 3], label='Close Price', linewidth=1)\r\n for i, action in enumerate(actions):\r\n color = 'blue' if action == 0 else 'red' if action == 1 else 'yellow'\r\n marker = '^' if action == 0 else 'v' if action == 1 else 'o'\r\n plt.scatter(i, data[i, 3], color=color, marker=marker, alpha=0.7)\r\n plt.title('Agent Actions with Close Price')\r\n plt.xlabel('Step')\r\n plt.ylabel('Price')\r\n plt.legend()\r\n plt.grid(True, linestyle='--', alpha=0.5)\r\n\r\n\r\n plt.subplot(4, 1, 3)\r\n plt.plot(holdings, label='Holdings')\r\n plt.title('Holdings per Step')\r\n plt.xlabel('Step')\r\n plt.ylabel('Amount of Held Coins')\r\n plt.legend()\r\n\r\n plt.subplot(4, 1, 4)\r\n plt.plot(roe_values, label='ROE')\r\n plt.title('Return on Equity over Steps')\r\n plt.xlabel('Step')\r\n plt.ylabel('ROE (%)')\r\n plt.legend()\r\n\r\n plt.tight_layout() \r\n plt.savefig(f'{folder_name}/combined_chart_{epoch}.png')\r\n plt.close()\r" } ]
import torch import pandas as pd from tqdm import tqdm from CoinTradingEnv import CoinTradingEnv from sharing.agent import Agent from sharing.visualization import create_folder, plot_all_charts
4,095
''' 파일의 형식은 0번째 날짜, 3번째 종가를 무조건 지켜주셔야합니다. The file format must strictly follow: date as the 0th element, and closing price as the 3rd element. ''' "실제 파일경로를 입력해주세요" "Please enter the actual file path." real_data = pd.read_csv("file_path") #"data는 시각화에만 사용되고 학습할 때 사용하지 않습니다" #"The data is used only for visualization and not for training purposes" "write the correct feature name" data = real_data.drop(['Date'], axis=1) #data = real_data.drop(['Time'], axis=1) "파일에 결측치가 있다면 각주를 풀어주세요.(평균으로 대체)" "If there are missing values in the file, please annotate them. (Replace with average)" # data.fillna(data.mean(), inplace=True) # NAN값 평균으로 대체 data = data.values env = CoinTradingEnv(data) "파일의 피쳐수를 넣어주세요 (피쳐수, )형식을 유지해주세요." "Please enter the number of features in the file, and maintain the format (number of features, )." "ex ) state_dim = (7,)" state_dim = action_dim = env.action_space.n
''' 파일의 형식은 0번째 날짜, 3번째 종가를 무조건 지켜주셔야합니다. The file format must strictly follow: date as the 0th element, and closing price as the 3rd element. ''' "실제 파일경로를 입력해주세요" "Please enter the actual file path." real_data = pd.read_csv("file_path") #"data는 시각화에만 사용되고 학습할 때 사용하지 않습니다" #"The data is used only for visualization and not for training purposes" "write the correct feature name" data = real_data.drop(['Date'], axis=1) #data = real_data.drop(['Time'], axis=1) "파일에 결측치가 있다면 각주를 풀어주세요.(평균으로 대체)" "If there are missing values in the file, please annotate them. (Replace with average)" # data.fillna(data.mean(), inplace=True) # NAN값 평균으로 대체 data = data.values env = CoinTradingEnv(data) "파일의 피쳐수를 넣어주세요 (피쳐수, )형식을 유지해주세요." "Please enter the number of features in the file, and maintain the format (number of features, )." "ex ) state_dim = (7,)" state_dim = action_dim = env.action_space.n
agent = Agent(input_dim=state_dim, output_dim=action_dim, epsilon=0.3 , gamma= 0.99)
1
2023-11-16 12:04:20+00:00
8k
atlantic-quantum/Shipyard
tests/passes/test_core_splitter.py
[ { "identifier": "TransformError", "path": "shipyard/compiler_error.py", "snippet": "class TransformError(Error):\n \"\"\"Error class for Transformation Errors, raised by QASMTransformer subclasses\"\"\"" }, { "identifier": "CoreSplitter", "path": "shipyard/passes/core_splitter.py", "snippet": "class CoreSplitter(QASMTransformer):\n \"\"\"\n QASMTransformer that transforms a qasm program for multiple ports\n into a qasm program for a target ports\n\n Args:\n target_ports (set[str]): a set of target ports to transform the program to.\n \"\"\"\n\n def __init__(self, target_ports: set[str]) -> None:\n if isinstance(target_ports, str):\n target_ports = set([target_ports])\n self.target_ports = target_ports\n self.context: ScopeContext = None\n self.frames = set()\n\n # pylint: disable=C0103\n # snake_case naming\n\n def visit_Program(self, node: ast.Program) -> ast.Program:\n \"\"\"\n Program (defcal) node transformer:\n Enters GLOBAL context and visits the program node\n Exits GLOBAL context before returning the node\n\n Args:\n node (ast.Program): openQASM program to process\n\n Returns:\n ast.Program: same node\n \"\"\"\n with self.scope_manager(ScopeContext.GLOBAL):\n statements = self._visit_list(node.statements, self.visit)\n return ast.Program(statements=statements, version=node.version)\n\n def visit_ClassicalDeclaration(\n self, node: ast.ClassicalDeclaration\n ) -> ast.ClassicalDeclaration:\n \"\"\"\n Classi calDeclaration node transformer:\n a) Frame declarations:\n If the port of a standard frame declaration is a target port:\n Add frame to list of used frames and return the node.\n Else:\n Remove the node\n If the frame declaration is not of the expected syntax i.e.\n frame some_frame = newframe(port, frequency, phase)\n Raise Transform error\n\n b) Port Declaration\n IF the port is not one of the target ports -> remove the node\n\n c) Other declarations\n return the node\n\n\n Args:\n node (ast.ClassicalDeclaration): openQASM classical declaration to process\n\n Raises:\n TransformError:\n if a frame declaration does not match the expected format i.e.\n frame some_frame = newframe(port, frequency, phase)\n\n Returns:\n ast.ClassicalDeclaration: same node if it is not removed, else None.\n \"\"\"\n match node:\n case ast.ClassicalDeclaration(\n type=ast.FrameType(),\n identifier=frame_id,\n init_expression=ast.FunctionCall(\n name=ast.Identifier(\"newframe\"),\n arguments=[port_arg, _, _],\n ),\n ):\n if port_arg.name in self.target_ports:\n self.frames.add(frame_id.name)\n return super().visit_ClassicalDeclaration(node)\n LOGGER.debug(\n \"REMOVED: Declared frame: %s, that does not use a target port: %s\",\n frame_id.name,\n self.target_ports,\n )\n return None\n case ast.ClassicalDeclaration(type=ast.FrameType()):\n raise TransformError(ErrorCode.UNHANDLED, \"Unhandled frame declaration\")\n case ast.ClassicalDeclaration(type=ast.PortType(), identifier=port):\n if port.name in self.target_ports:\n return super().visit_ClassicalDeclaration(node)\n LOGGER.debug(\n \"REMOVED: Declared port: %s, that is not target port: %s\",\n port.name,\n self.target_ports,\n )\n return None\n case _:\n return super().visit_ClassicalDeclaration(node)\n\n def visit_FunctionCall(self, node: ast.FunctionCall) -> ast.FunctionCall:\n \"\"\"\n FunctionCall node transformer:\n If a play/capture_v2 function call is performed on a frame that uses the\n target AWG core it is passed on, else the node is removed.\n Other function calls are passed on\n\n Example:\n target: ch1\n\n in:\n frame1 = newframe(ch1, ...)\n frame2 = newframe(ch2, ...) <- removed by visit_ClassicalDeclaration\n\n play(frame1, ...)\n play(frame2, ...) <- this FunctionCall node will be removed\n\n other_function_call(...)\n\n out:\n frame1 = newframe(ch1, ...)\n play(frame1, ...)\n\n other_function_call(...)\n\n Args:\n node (ast.FunctionCall): openQASM function call node to process\n\n Returns:\n ast.FunctionCall: same node if it is passed on, else None.\n \"\"\"\n match node:\n case ast.FunctionCall(\n name=ast.Identifier(\n \"play\"\n | \"capture_v1\"\n | \"capture_v2\"\n | \"capture_v3\"\n | \"capture_v1_spectrum\"\n | \"set_frequency\"\n | \"shift_frequency\"\n | \"set_phase\"\n | \"shift_phase\"\n ),\n arguments=[frame_arg, _],\n ):\n if frame_arg.name in self.frames:\n return super().visit_FunctionCall(node)\n LOGGER.debug(\n \"REMOVED: Function call %s that is not using frame for target %s\",\n node,\n self.target_ports,\n )\n return None\n case _:\n return super().visit_FunctionCall(node)\n\n def visit_ExpressionStatement(\n self, node: ast.ExpressionStatement\n ) -> ast.ExpressionStatement:\n \"\"\"\n ExpressionStatement node transformer:\n ExpressionStatements are wrappers around other expressions. If other\n methods of this transformer remove the expression of the ExpressionStatement\n this visitor also removes the ExpressionStatement wrapping it\n (as ExpressionStatements with expression=None are not allowed.)\n\n Args:\n node (ast.ExpressionStatement): openQASM expression statement to process\n\n Returns:\n ast.ExpressionStatement: same node if not removed, else None\n \"\"\"\n expression = self.visit(node.expression)\n if expression is not None:\n return super().visit_ExpressionStatement(node)\n LOGGER.debug(\"REMOVED: Empty ExpressionStatement %s\", node)\n return None\n\n def visit_CalibrationStatement(\n self, node: ast.CalibrationStatement\n ) -> ast.CalibrationStatement:\n \"\"\"\n CalibrationStatement node transformer:\n Enters DEFCAL context and visits the calibration statement node\n Exits DEFCAL context before returning the node\n\n Args:\n node (ast.CalibrationStatement): openQASM calibration statement to process\n\n Returns:\n ast.CalibrationStatement: same node\n \"\"\"\n with self.scope_manager(ScopeContext.DEFCAL):\n body = self._visit_list(node.body, self.visit)\n return ast.CalibrationStatement(body=body)\n\n def visit_SubroutineDefinition(\n self, node=ast.SubroutineDefinition\n ) -> ast.SubroutineDefinition:\n \"\"\"\n SubroutineDefinition node transformer:\n Enters SUBROUTINE context and visits the Subroutine definition node\n Exits SUBROUTINE context before returning the node\n\n Args:\n node (ast.SubroutineDefinition): openQASM subroutine definition to process\n\n Returns:\n ast.SubroutineDefinition: same node\n \"\"\"\n with self.scope_manager(ScopeContext.SUBROUTINE):\n body = self._visit_list(node.body, self.visit)\n return ast.SubroutineDefinition(\n name=self.visit_Identifier(node.name),\n arguments=self._visit_list(node.arguments, self.visit),\n body=body,\n return_type=self.visit(node.return_type) if node.return_type else None,\n )\n\n def visit_CalibrationDefinition(\n self, node: ast.CalibrationDefinition\n ) -> ast.CalibrationDefinition:\n \"\"\"\n CalibrationDefinition (defcal) node transformer:\n Enters DEFCAL context and visits the calibration definition node\n Then Exits DEFCAL context\n If all statements within the body of a defcal statements are removed\n this visitor also removes that defcal statements\n\n\n Args:\n node (ast.CalibrationDefinition): openQASM defcal node to process\n\n Returns:\n ast.CalibrationDefinition:\n same node if it has any statements within its body\n \"\"\"\n with self.scope_manager(ScopeContext.DEFCAL):\n body = self._visit_list(node.body, self.visit)\n if body:\n return ast.CalibrationDefinition(\n name=self.visit_Identifier(node.name),\n arguments=self._visit_list(node.arguments, self.visit),\n qubits=self._visit_list(node.qubits, self.visit),\n return_type=self.visit(node.return_type) if node.return_type else None,\n body=body,\n )\n LOGGER.debug(\"REMOVED: Empty CalibrationDefinition (defcal) %s\", node)\n return None\n\n def visit_ReturnStatement(self, node: ast.ReturnStatement) -> ast.ReturnStatement:\n \"\"\"\n ReturnStatement node transformer:\n If the expression of the return statement is removed then the return\n statement itself is also removed.\n\n Args:\n node (ast.ReturnStatement): openQASM return statement to process\n\n Returns:\n ast.ReturnStatement: same node if not removed, else None\n \"\"\"\n expression = self.visit(node.expression)\n if expression:\n return ast.ReturnStatement(expression=expression)\n LOGGER.debug(\"REMOVED: Empty ReturnStatement %s\", node)\n return None\n\n def visit_DelayInstruction(self, node: ast.DelayInstruction):\n \"\"\"\n DelayInstruction node transformer:\n If the dealy instruction is performed on a frame not using a target port\n it is removed.\n\n Args:\n node (ast.DelayInstruction): openQASM play instructions to process\n\n Returns:\n ast.DelayInstruction: same node if not removed, else None\n \"\"\"\n if self.context == ScopeContext.DEFCAL:\n if node.qubits[0].name in self.frames:\n return super().visit_DelayInstruction(node)\n LOGGER.debug(\"REMOVED: Unused DelayInstruction %s\", node)\n return None\n return super().visit_DelayInstruction(node)\n\n # pylint: enable=C0103\n\n @contextmanager\n def scope_manager(self, context: ScopeContext):\n \"\"\"\n Context manager for the scope of the qasm program.\n on entering the manager sets the context to the input context.\n restores the previous context on exiting the manager\n\n Args:\n context (ScopeContext): context to set while within the manager.\n \"\"\"\n old_context = self.context\n try:\n self.context = context\n yield\n finally:\n self.context = old_context" }, { "identifier": "ports_for_core", "path": "shipyard/passes/core_splitter.py", "snippet": "def ports_for_core(setup: SetupInternal, instrument: str, core_index: int):\n \"\"\"\n Gives all ports that are assocated with\n a specific core index of a specific instrument within a setup\n \"\"\"\n return set(\n port_name\n for port_name, port in setup.ports.items()\n if (port.instrument.name == instrument and port.core.index == core_index)\n )" }, { "identifier": "RemoveUnused", "path": "shipyard/passes/remove_unused.py", "snippet": "class RemoveUnused(GenericTransformer):\n \"\"\"\n QASMTransformer that removed unused and undeclared nodes from an openQASM AST.\n\n Usage:\n qasm_ast: ast.Program\n RemoveUnused(qasm_ast) <- transforms qasm ast\n\n Note:\n May have to be run multiple times to have intended effect.\n \"\"\"\n\n def __init__(self, node: ast.Program | None = None) -> None:\n super().__init__()\n self.unused: set[str] = None\n self.declared: set[str] = None\n self.remove_assignment: set[str] = set()\n if node:\n self.visit(node)\n\n def visit(self, node: ast.QASMNode, context=None) -> ast.QASMNode:\n if not self.unused and not self.declared:\n self.unused, self.declared = _DetermineUnused(node).result()\n LOGGER.debug(\"UPDATED: RemovedUnused with unused: %s\", self.unused)\n LOGGER.debug(\"UPDATED: RemovedUnused with declared: %s\", self.declared)\n return super().visit(node, context)\n\n # pylint: disable=C0103\n # (snake_case naming style)\n\n def visit_ClassicalDeclaration(\n self, node: ast.ClassicalDeclaration\n ) -> ast.ClassicalDeclaration:\n \"\"\"\n ClassicalDeclaration node visitor:\n Removes the node if it is not used within the openQASM program the node\n is part of.\n\n Example:\n in: int i = 1;\n int j = 2;\n i; // <- this line uses 'i'\n out:\n int i = 1;\n i;\n\n Args:\n node (ast.ClassicalDeclaration): openQASM classical declaration node\n\n Returns:\n ast.ClassicalDeclaration:\n if the node is used in the program else returns None\n \"\"\"\n if node.identifier.name not in self.unused:\n return node\n LOGGER.debug(\n \"REMOVED: unused ClassicalDeclaration node: %s\", node.identifier.name\n )\n return None\n\n def visit_ConstantDeclaration(\n self, node: ast.ConstantDeclaration\n ) -> ast.ConstantDeclaration:\n \"\"\"\n ClassicalDeclaration node visitor:\n Removes the node if it is not used within the openQASM program the node\n is part of.\n\n Example:\n in: const int i = 1;\n const int j = 2;\n i; // <- this line uses 'i'\n out:\n const int i = 1;\n i;\n\n Args:\n node (ast.ConstantDeclaration): openQASM constant declaration node\n\n Returns:\n ast.ConstantDeclaration:\n if the node is used in the program else returns None\n \"\"\"\n if node.identifier.name not in self.unused:\n return node\n LOGGER.debug(\n \"REMOVED: unused ConstantDeclaration node: %s\", node.identifier.name\n )\n return None\n\n def visit_SubroutineDefinition(\n self, node: ast.SubroutineDefinition\n ) -> ast.SubroutineDefinition:\n \"\"\"\n SubroutineDefinition node visitor:\n Removes the subroutine definition node if it is not used within the openQASM\n program the node is part of.\n\n Example:\n in: def used_func() {...};\n def unused_func() {...};\n used_func(); // <- this line uses 'used_func'\n out:\n def used_func() {...};\n used_func();\n\n Args:\n node (ast.SubroutineDefinition): openQASM subroutine definition node\n\n Returns:\n ast.SubroutineDefinition:\n if the node is used in the program else returns None\n \"\"\"\n node.body = self._visit_list(node.body, self.visit)\n if node.name.name not in self.unused:\n return node\n LOGGER.debug(\"REMOVED: unused SubroutineDefinition node: %s\", node)\n return None\n\n def visit_CalibrationDefinition(\n self, node: ast.CalibrationDefinition\n ) -> ast.CalibrationDefinition:\n \"\"\"\n CalibrationDefinition (defcal) node visitor:\n Visits the statements within the body of the defcal statments.\n\n Removes the calibration definition (defcal) node if it is not used within\n the openQASM program the node is part of.\n\n If the the defcal statement is used and it has a return value, checks\n if the return statement has been removed from the body of the defcal\n statements and adds the defcal signature to statements that assignment\n should be removed from.\n\n If the defcal statement is a measure statement, it will be written\n regardless of whether it is used or not.\n\n Example 1:\n in: defcal used_gate $0 {...};\n defcal unused_gate $0 {...};\n used_gate $0; // <- this line uses 'used_gate'\n out:\n defcal used_func $0 {...};\n used_gate $1;\n\n Example 2:\n in: defcal used_measure $0 -> bit {\n play(frame1, ...);\n play(frame0, ...);\n return capture_v2(frame0, ...);\n }\n bit measured_bit;\n measured_bit = used_measure $0;\n out:\n defcal used_measure $0 {\n play(frame1, ...);\n }\n used_measure $0;\n\n\n Args:\n node (ast.CalibrationDefinition): openQASM defcal node\n\n Returns:\n ast.CalibrationDefinition:\n if the node is used in the program else returns None\n \"\"\"\n node.body = self._visit_list(node.body, self.visit)\n if (\n not Mangler(node).signature().match(self.unused)\n and node.body\n or node.name.name == \"measure\"\n ):\n if node.return_type:\n has_return = False\n for stmt in node.body:\n has_return = isinstance(stmt, ast.ReturnStatement) or has_return\n if not has_return:\n node.return_type = None\n self.remove_assignment.add(Mangler(node).signature().mangle())\n LOGGER.debug(\n \"ADDED: defcal defintion to remove_assignment: %s\", node\n )\n return node\n LOGGER.debug(\"REMOVED: unused CalibrationDefinition node: %s\", node)\n return None\n\n def visit_QuantumGate(self, node: ast.QuantumGate) -> ast.QuantumGate:\n \"\"\"\n QuantumGate node visitor:\n Removes the quantum gate node if it is not declared within the openQASM\n program the node is part of.\n\n Example:\n in: defcal declared_gate $0 {...};\n declared_gate $0;\n undeclared_gate $0; // <- defcal for this gate is not declared\n out:\n defcal declared_gate $0 {...};\n declared_gate $0;\n\n Args:\n node (ast.QuantumGate): openQASM quantum gate node\n\n Returns:\n ast.QuantumGate:\n if the node is used in the program else returns None\n \"\"\"\n declared = Mangler(node).signature().match(self.declared)\n LOGGER.debug(\"DECLARED Gates: %s\", declared)\n if declared:\n return node\n LOGGER.debug(\"REMOVED: Undeclared QuantumGate node: %s\", node)\n return None\n\n def visit_QuantumMeasurement(\n self, node: ast.QuantumMeasurement\n ) -> ast.QuantumMeasurement:\n \"\"\"\n QuantumMeasurement node visitor:\n Removes the quantum measurement node if it is not declared within the\n openQASM program the node is part of.\n\n Example:\n in: defcal measure $0 {...};\n measure $0;\n measure $1; // <- defcal for this gate is not declared\n out:\n defcal measure $0 {...};\n measure $0;\n\n Args:\n node (ast.QuantumMeasurement): openQASM quantum measurement node.\n\n Returns:\n ast.QuantumMeasurement:\n if the node is used in the program else returns None.\n \"\"\"\n declared = Mangler(node).signature().match(self.declared)\n LOGGER.debug(\"DECLARED Measurements: %s\", declared)\n if declared:\n return node\n LOGGER.debug(\"REMOVED: Undeclared QuantumMeasurement node: %s\", node)\n return None\n\n def visit_QuantumReset(self, node: ast.QuantumReset) -> ast.QuantumReset:\n \"\"\"\n QuantumReset node visitor:\n Removes the quantum reset node if it is not declared within the\n openQASM program the node is part of.\n\n Example:\n in: defcal reset $0 {...};\n reset $0;\n reset $1; // <- defcal for this gate is not declared\n out:\n defcal reset $0 {...};\n reset $0;\n\n Args:\n node (ast.QuantumReset): openQASM quantum reset node.\n\n Returns:\n ast.QuantumReset:\n if the node is used in the program else returns None.\n \"\"\"\n declared = Mangler(node).signature().match(self.declared)\n LOGGER.debug(\"DECLARED Resets: %s\", declared)\n if declared:\n return node\n LOGGER.debug(\"REMOVED: Undeclared QuantumReset node: %s\", node)\n return None\n\n def visit_QuantumMeasurementStatement(\n self, node: ast.QuantumMeasurementStatement\n ) -> ast.QuantumMeasurementStatement:\n \"\"\"\n QuantumMeasurementStatement node visitor:\n If the QuantumMeasurement node of the MeasurementStatement is removed\n also remove the MeasurementStatement.\n\n If the mangled measurement call signature is in the remove_assignment list\n remove the target from the node.\n\n Example 1:\n in:\n defcal measure $0 {...};\n bit meas_bit_0;\n bit meas_bit_1; <- a 2nd pass will then also remove this declaration\n meas_bit_0 = measure $0;\n __________ <- removed as defcal is not declared\n meas_bit_1 = measure $1;\n |---------------------| <- meas is removed so entire stmt is removed\n\n out:\n defcal measure $0 {...};\n bit meas_bit_0;\n meas_bit_0 = measure $0;\n\n Example 2:\n in: remove_assignment = [_ZN7measure_PN0_QN0_$0_R]\n\n bit meas_bit_0;\n meas_bit_0 = measure $0;\n\n out:\n measure $0;\n\n Args:\n node (ast.QuantumMeasurementStatement):\n openQASM quantum measurement statement node.\n\n Returns:\n ast.QuantumMeasurementStatement:\n if the node is used in the program else returns None.\n \"\"\"\n mangler = Mangler(node.measure)\n mangler.return_type = \"\"\n if mangler.signature().mangle() in self.remove_assignment:\n node.target = None\n if self.visit(node.measure):\n return node\n LOGGER.debug(\"REMOVED: Unused QuantumMeasurementStatement node: %s\", node)\n return None\n\n # pylint: enable=C0103\n # (snake_case naming style)" }, { "identifier": "SetupInternal", "path": "shipyard/setup/internal.py", "snippet": "class SetupInternal(BaseModel):\n\n \"\"\"\n A Pydantic model containing the information required to compile an openQASM program\n to instrument level instructions.\n\n It is recommended to instanciate this object from a configuration file\n (json (future yml?))\n \"\"\"\n\n # todo validation\n\n # todo move to own module\n instruments: dict[str, Instrument]\n ports: dict[str, Port]\n frames: dict[str, Frame]\n\n @classmethod\n def from_dict(cls, setup: dict[str, dict[str, dict]]) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a dictionary\n\n Args:\n setup (dict[str, dict[str, dict]]): dictionary to create a Setup object from\n\n Returns:\n Setup: created from dictionary\n \"\"\"\n instruments = {\n k: Instrument(name=k, **v) for k, v in setup[\"Instruments\"].items()\n }\n ports = {}\n for k, val in setup[\"Ports\"].items():\n val[\"instrument\"] = instruments[val[\"instrument\"]]\n val[\"core\"] = Port.Core(**val[\"core\"])\n ports[k] = Port(name=k, **val)\n frames = {}\n for k, val in setup[\"Frames\"].items():\n val[\"port\"] = ports[val[\"port\"]]\n frames[k] = Frame(name=k, **val)\n return cls(instruments=instruments, ports=ports, frames=frames)\n\n def to_dict(self) -> dict[str, dict[str, dict]]:\n \"\"\"Creates a dictionary from a Setup object\n\n Args:\n filename (Path | str, optional):\n path to save dictionary to. Defaults to None.\n\n Returns:\n dict[str, dict[str, dict]]: dictionary created from Setup object\n \"\"\"\n setup = {\n \"Instruments\": {\n k: {\n \"type\": v.type,\n \"serial\": v.serial,\n }\n for k, v in self.instruments.items()\n },\n \"Ports\": {\n k: {\n \"instrument\": v.instrument.name,\n \"core\": {\n \"type\": v.core.type.value,\n \"index\": v.core.index,\n \"channels\": v.core.channels,\n },\n }\n for k, v in self.ports.items()\n },\n \"Frames\": {\n k: {\n \"port\": v.port.name,\n \"frequency\": v.frequency,\n \"phase\": v.phase,\n }\n for k, v in self.frames.items()\n },\n }\n return setup\n\n @classmethod\n def from_json(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a json file\n\n Args:\n filename (str | Path): path to json file\n\n Returns:\n Setup: created from json file\n \"\"\"\n with open(filename, encoding=\"utf-8\") as file:\n data = json.load(file)\n return cls.from_dict(data)\n\n def to_json(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a json file\n\n Args:\n filename (str | Path): path to json file to create\n\n Returns:\n Path: path to json file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n json.dump(data, file, indent=4)\n return Path(filename)\n\n @classmethod\n def from_yml(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a yml file\n\n Args:\n filename (str | Path): path to yml file\n\n Returns:\n Setup: created from yml file\n \"\"\"\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n data = yaml.safe_load(file)\n return cls.from_dict(data)\n\n def to_yml(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a yml file\n\n Args:\n filename (str | Path): path to yml file to create\n\n Returns:\n Path: path to yml file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n yaml.dump(data, file)\n return Path(filename)\n\n def cores(self) -> set[tuple[str, int, str]]:\n \"\"\"Gets all the AWG Cores used in the setup\n\n Returns:\n set[tuple[str, int, str]]:\n a Set of tuples, each tuple has a string representing the instruement\n name, a integer representing the index of the awg core of the\n instrument and a string representing the type of the awg core.\n \"\"\"\n return set(\n (port.instrument.name, port.core.index, port.core.type.value)\n for port in self.ports.values()\n )" } ]
from copy import deepcopy from pathlib import Path from openpulse import ast, parse from openpulse.printer import dumps from shipyard.compiler_error import TransformError from shipyard.passes.core_splitter import CoreSplitter, ports_for_core from shipyard.passes.remove_unused import RemoveUnused from shipyard.setup.internal import SetupInternal import pytest
6,905
def test_split_basic(): """ """ qasm_path = Path(__file__).parent.parent / "qasm/split.qasm" with open(qasm_path, encoding="utf_8") as qasm_file: qasm_code = qasm_file.read() qasm_ast = parse(qasm_code) def split_port(port, target_file): transformed_ast = CoreSplitter(port).visit(deepcopy(qasm_ast)) RemoveUnused().visit(transformed_ast) with open( Path(__file__).parent.parent / f"qasm/{target_file}.qasm", encoding="utf_8" ) as qasm_file: post_split = qasm_file.read() # print(dumps(transformed_ast)) for genrated, target in zip( dumps(transformed_ast).split("\n"), post_split.split("\n") ): assert genrated == target split_port("awg1_ch1", "post_split_1") split_port("awg2_ch1", "post_split_2") def test_ports_for_core(): json_path = Path(__file__).parent.parent / "setups/complex.json"
def test_split_basic(): """ """ qasm_path = Path(__file__).parent.parent / "qasm/split.qasm" with open(qasm_path, encoding="utf_8") as qasm_file: qasm_code = qasm_file.read() qasm_ast = parse(qasm_code) def split_port(port, target_file): transformed_ast = CoreSplitter(port).visit(deepcopy(qasm_ast)) RemoveUnused().visit(transformed_ast) with open( Path(__file__).parent.parent / f"qasm/{target_file}.qasm", encoding="utf_8" ) as qasm_file: post_split = qasm_file.read() # print(dumps(transformed_ast)) for genrated, target in zip( dumps(transformed_ast).split("\n"), post_split.split("\n") ): assert genrated == target split_port("awg1_ch1", "post_split_1") split_port("awg2_ch1", "post_split_2") def test_ports_for_core(): json_path = Path(__file__).parent.parent / "setups/complex.json"
complex_setup = SetupInternal.from_json(json_path)
4
2023-11-16 17:37:29+00:00
8k
PrAsAnNaRePo/LocalAgent
localagent/initialize_agents.py
[ { "identifier": "KnowledgeBase", "path": "localagent/knowledge_base.py", "snippet": "class KnowledgeBase:\n def __init__(\n self,\n file_dir: str\n ) -> None:\n \"\"\"\n Creates a knowledge base for Agent.\n\n Args:\n file_dir (str): The path to the directory containing the documents to load.\n\n \"\"\"\n self.path = file_dir\n self.embeddings = HuggingFaceHubEmbeddings(\n huggingfacehub_api_token=os.environ.get(\"HUGGINGFACEHUB_API_TOKEN\"),\n )\n chunks = self.load_documents(self.path)\n embeds = self.create_embeddings(chunks[0])\n self.vector_store = self.create_vectorstore(chunks[0], embeds)\n \n def load_documents(\n self,\n docs_directory_path: str\n ) -> List[Dict[str, Union[str, List[Dict[str, str]]]]]:\n \"\"\"\n Load documents from a directory and return a list of dictionaries containing the name of each document and its chunks.\n\n Args:\n docs_directory_path (str): The path to the directory containing the documents to load.\n\n Returns:\n List[Dict[str, Union[str, List[Dict[str, str]]]]]: A list of dictionaries containing the name of each document and its chunks.\n \"\"\"\n\n result = []\n\n for file_name in os.listdir(docs_directory_path):\n file_path = os.path.join(docs_directory_path, file_name)\n\n if file_name.endswith(\".pdf\"):\n loader = PyPDFLoader(file_path=file_path)\n else:\n loader = UnstructuredFileLoader(file_path=file_path)\n\n document = loader.load()\n\n text_splitter = CharacterTextSplitter(\n separator=\" \",\n chunk_size=2000,\n chunk_overlap=200,\n length_function=len,\n )\n\n chunks = [\n {\"chunk_\" + str(i + 1): chunk.page_content}\n for i, chunk in enumerate(text_splitter.split_documents(documents=document))\n ]\n\n # Add document name and chunked data to result list\n file_name = os.path.splitext(file_name)[0]\n result.append({\"name\": file_name, \"chunks\": chunks})\n\n return result\n \n def create_embeddings(self, documents) -> List[Embeddings]:\n all_embeddings: list[Embeddings] = []\n texts: list[str] = []\n for doc in documents['chunks']:\n for key, value in doc.items():\n texts.append(value)\n break\n\n embeddings_list = self.embeddings.embed_documents(texts)\n\n all_embeddings.extend(embeddings_list)\n\n return all_embeddings\n \n def create_vectorstore(self, documents, embeds) -> FAISS:\n loaded_embeddings = embeds\n\n texts: list = []\n\n for doc in documents['chunks']:\n for key, value in doc.items():\n texts.append(value)\n break\n\n # Combine the texts and embeddings into a list of tuples\n text_embedding = list(zip(texts, loaded_embeddings))\n\n # Create a FAISS object from the embeddings and text embeddings\n faiss = FAISS.from_embeddings(embedding=self.embeddings, text_embeddings=text_embedding)\n\n return faiss\n\n def create_similarity_search_docs(\n self,\n query: str,\n ) -> List[Document]:\n \"\"\"\n This function takes in three arguments: query, huggingfacehub_api_token, and path_to_vectorstore.\n It returns a list of documents that are most similar to the query.\n\n Parameters:\n - query (str): The query string.\n - huggingfacehub_api_token (str | None): The Hugging Face Hub API token.\n - path_to_vectorstore (str): The path to the vectorstore file.\n\n Returns:\n - List[Document]: A list of documents that are most similar to the query.\n \"\"\"\n\n answer_docs = self.vector_store.similarity_search(query, k=4)\n return answer_docs[0].page_content" }, { "identifier": "get_prompt_from_template", "path": "localagent/utils.py", "snippet": "def get_prompt_from_template(system, history, human_, assistant_, eos_token):\n for i in history:\n if i['role'] == 'user':\n system += f'{human_}{i[\"content\"]}{eos_token}'\n if i['role'] == 'assistant':\n system += f'{assistant_}{i[\"content\"]}{eos_token}'\n\n if history[-1]['role'] == 'user':\n system += f'{assistant_}'\n\n return system" }, { "identifier": "assistant_message", "path": "localagent/utils.py", "snippet": "def assistant_message(msg):\n Print(Markdown(f\"{Fore.YELLOW}{Style.BRIGHT}🤖 {Fore.YELLOW}{msg}{Style.RESET_ALL}\"))" }, { "identifier": "internal_monologue", "path": "localagent/utils.py", "snippet": "def internal_monologue(msg):\n # ANSI escape code for italic is '\\x1B[3m'\n print(f\"\\x1B[3m{Fore.LIGHTBLACK_EX}💭 {msg}{Style.RESET_ALL}\")" }, { "identifier": "important_message", "path": "localagent/utils.py", "snippet": "def important_message(msg):\n print(f\"{Fore.MAGENTA}{Style.BRIGHT}{msg}{Style.RESET_ALL}\")" }, { "identifier": "clear_line", "path": "localagent/utils.py", "snippet": "def clear_line():\n if os.name == \"nt\": # for windows\n console.print(\"\\033[A\\033[K\", end=\"\")\n else: # for linux\n sys.stdout.write(\"\\033[2K\\033[G\")\n sys.stdout.flush()" }, { "identifier": "warning_message", "path": "localagent/utils.py", "snippet": "def warning_message(msg):\n print(f\"{Fore.RED}{Style.BRIGHT}{msg}{Style.RESET_ALL}\")" }, { "identifier": "Interpreter", "path": "localagent/interpreter.py", "snippet": "class Interpreter:\n def __init__(self, exec, max_try, human_, assistant_, eos_token, stream=False) -> None:\n self.history = []\n self.exec = exec\n self.max_try = max_try\n self.human_ = human_\n self.assistant_ = assistant_\n self.eos_token = eos_token\n self.stream = stream\n \n def execute_code(self, lang, code, timeout=10):\n if lang.lower() == 'python':\n try:\n output = subprocess.run([sys.executable, \"-c\", code], capture_output=True, text=True, timeout=timeout)\n except subprocess.TimeoutExpired:\n print(f\"Execution of Python code timed out after {timeout} seconds.\")\n return None\n elif lang.lower() == 'bash':\n try:\n output = subprocess.run(code, shell=True, capture_output=True, text=True, timeout=timeout)\n except subprocess.TimeoutExpired:\n print(f\"Execution of Bash code timed out after {timeout} seconds.\")\n return None\n else:\n print('Only supported python and ')\n return None\n \n return output\n \n def __call__(self, task):\n print('\\n')\n internal_monologue(\"Interpreter is executing the code...\\n\")\n self.history.append({'role':'user', 'content':task})\n count = 1\n while True and count <= self.max_try:\n prompt = get_prompt_from_template(CODE_INTERPRETER, self.history, self.human_, self.assistant_, self.eos_token)\n if self.exec['name'] == 'webui':\n if self.stream:\n response = stream_run(self.exec['uri'], prompt)\n else:\n with console.status(\"[bold cyan]Executing codes...\") as status:\n response = run(self.exec['uri'], prompt)\n elif self.exec['name'] == 'ollama':\n if self.stream:\n response = ollama_generate(model_name=self.exec['uri'], template=prompt, stream=True)[0]\n else:\n with console.status(\"[bold cyan]Executing codes...\") as status:\n response = ollama_generate(model_name=self.exec['uri'], template=prompt)[0]\n else:\n raise Exception('Only supported webui and ollama.')\n count += 1\n \n self.history.append({'role':'user', 'content':response})\n code_blocks = extract_code(response)\n final_code_output = ''\n outs = []\n if len(code_blocks) > 0:\n for n, i in enumerate(code_blocks):\n lang, code = i\n output = self.execute_code(lang, code)\n if output.returncode == 0:\n outs.append(0)\n final_code_output += f\"\\nThe output of the block number #{n}:\\n{output.stdout}\"\n if output.returncode == 1:\n outs.append(1)\n final_code_output += f\"\\nThe block number #{n} got error:\\n{output.stderr}.\\nPlease check out and come up with better code.\"\n self.history.append({'role':'user', 'content':final_code_output})\n if 1 not in outs:\n return final_code_output\n else:\n print('\\n')\n else:\n print('retrying...')\n self.history.pop()\n \n return \"Sorry, can't able to make this now.\"" }, { "identifier": "stream_run", "path": "localagent/gen.py", "snippet": "def stream_run(uri, prompt, force_model=False):\n return asyncio.run(print_response_stream(uri, prompt, force_model))" }, { "identifier": "run", "path": "localagent/gen.py", "snippet": "def run(uri, prompt, force_model=False):\n if force_model:\n prompt += \"\\nThought:\"\n request = {\n 'prompt': prompt,\n 'max_new_tokens': 500,\n 'auto_max_new_tokens': False,\n 'max_tokens_second': 0,\n 'do_sample': True,\n 'temperature': 0.01,\n 'repetition_penalty': 1.24,\n 'temperature': 0.1,\n 'skip_special_tokens': True,\n 'stopping_strings': ['<|end_of_turn|>', '<|im_end|>', 'Observation']\n }\n\n response = requests.post(uri, json=request)\n if response.status_code == 200:\n result = response.json()['results'][0]['text']\n return '\\nThought:'+result if force_model else result" }, { "identifier": "ollama_generate", "path": "localagent/gen.py", "snippet": "def ollama_generate(model_name, prompt=None, system=None, template=None, stream=False, format=\"\", context=None, options=None, callback=None, force_model=False):\n try:\n if template is not None and force_model:\n template += '\\nThought:'\n url = f\"{BASE_URL}/api/generate\"\n payload = {\n \"model\": model_name, \n \"prompt\": prompt, \n \"system\": system, \n \"template\": template, \n \"context\": context, \n \"options\": options,\n \"format\": format,\n }\n \n # Remove keys with None values\n payload = {k: v for k, v in payload.items() if v is not None}\n \n with requests.post(url, json=payload, stream=True) as response:\n response.raise_for_status()\n \n # Creating a variable to hold the context history of the final chunk\n final_context = None\n \n # Variable to hold concatenated response strings if no callback is provided\n full_response = \"\"\n\n # Iterating over the response line by line and displaying the details\n for line in response.iter_lines():\n if line:\n # Parsing each line (JSON chunk) and extracting the details\n chunk = json.loads(line)\n \n # If a callback function is provided, call it with the chunk\n if callback:\n callback(chunk)\n else:\n # If this is not the last chunk, add the \"response\" field value to full_response and print it\n if not chunk.get(\"done\"):\n response_piece = chunk.get(\"response\", \"\")\n full_response += response_piece\n if 'Observation' in full_response:\n break\n if stream:\n print(response_piece, end=\"\", flush=True)\n \n # Check if it's the last chunk (done is true)\n if chunk.get(\"done\"):\n final_context = chunk.get(\"context\")\n full_response = full_response.replace('Observation', '')\n # Return the full response and the final context\n return '\\nThought:'+full_response if force_model else full_response, final_context\n \n except requests.exceptions.RequestException as e:\n print(f\"An error occurred: {e}\")\n return None, None" } ]
import json import warnings from localagent.knowledge_base import KnowledgeBase from localagent.utils import get_prompt_from_template, assistant_message, internal_monologue, important_message, clear_line, warning_message from localagent.interpreter import Interpreter from localagent.gen import stream_run, run, ollama_generate from rich.console import Console
5,143
'required': True, 'schema': { 'type': 'string' }, }], } ) if len(self.tools) != 0: self.system_prompt = self.create_prompt_with_tools() def create_prompt_with_tools(self): tool_desc = """{name_for_model}: Call this tool to interact with the {name_for_human} API. What is the {name_for_human} API useful for? {description_for_model} Parameters: {parameters} Format the arguments as a JSON object.""" tool_descs = [] tool_names = [] for info in self.tools: tool_descs.append( tool_desc.format( name_for_model=info['name_for_model'], name_for_human=info['name_for_human'], description_for_model=info['description_for_model'], parameters=json.dumps( info['parameters'], ensure_ascii=False), ) ) tool_names.append(info['name_for_model']) tool_descs = '\n\n'.join(tool_descs) tool_names = ','.join(tool_names) if self.use_codeinterpreter: react_prompt = f"""{self.system_} YOUR PERSONA: {self.system_prompt} You have access to these following tools: {tool_descs} Use the following format: Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can be repeated zero or more times) Thought: I now know the final answer Message: the final answer to the original input question {self.human_}: hey, you UP!{self.eos_token}{self.assistant_}: Thought: User asking about my availability, I should respond by telling i'm available for assistance. So no need to use any tool for this. Message: Hey there! I'm just here. How can I help you today?{self.eos_token} {self.human_}: Create a folder called Project-1 and create a file called temp.py in it.{self.eos_token}{self.assistant_}: Thought: The user wants to create a folder and a file in it, so I need to ask code_interpreter to create folder and file. Action: code_interpreter Action Input: {{"task": "Create a folder called Project-1 in the current folder and create a file called temp.py in Project-1 folder."}}{self.eos_token} {self.human_}: This is code interpreter (not user). Created a folder called Project-1 and created a file called temp.py inside Project-1. Thought: Now the files are created. I should tell the user about it. No need to use any tools again. Message: Created a folder and file in it. I'm here to help you if you need any assistance.{self.eos_token} """ else: react_prompt = f"""{self.system_} YOUR PERSONA: {self.system_prompt} {tool_descs} Use the following format: Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action (in json format) Observation: the result of the action ... (this Thought/Action/Action Input/Observation can be repeated zero or more times) Thought: I now know the final answer Message: the final answer to the original input question {self.human_}: hey, you UP!{self.eos_token}{self.assistant_}: Thought: User asking about my availability, I should respond by telling i'm available for assistance. So no need to use any tool for this. Message: Hey there! I'm just here. How can I help you today?{self.eos_token}""" return react_prompt def go_flow(self, prompt): self.history.append({'role':'user', 'content':prompt}) done = False while not done: prompt = get_prompt_from_template(self.system_prompt, self.history, self.human_, self.assistant_, self.eos_token) if len(self.tools) != 0: if self.stream: raw_response = stream_run(self.webui_url, prompt, force_model=True) if self.webui_url else ollama_generate(self.olla_model_name, template=prompt, force_model=True, stream=True)[0].replace(self.eos_token, "").replace(self.eos_token[:-1], "") else: with console.status("[bold cyan]Thinking...") as status: raw_response = run(self.webui_url, prompt, force_model=True) if self.webui_url else ollama_generate(self.olla_model_name, template=prompt)[0].replace(self.eos_token, "").replace(self.eos_token[:-1], "") else: if self.stream: raw_response = stream_run(self.webui_url, prompt) if self.webui_url else ollama_generate(self.olla_model_name, template=prompt, stream=True)[0].replace(self.eos_token, "").replace(self.eos_token[:-1], "") else: with console.status("[bold cyan]Thinking...") as status: raw_response = str(run(self.webui_url, prompt)) if self.webui_url else ollama_generate(model_name=self.olla_model_name, template=prompt)[0].replace(self.eos_token, "").replace(self.eos_token[:-1], "") self.history.append({"role": "assistant", "content": raw_response}) if len(self.tools) != 0: response = raw_response.strip().split('\n') thought, message, action, action_inp = None, None, None, None for i in response: if i.startswith('Thought:'): thought = i.replace('Thought: ', '') if i.startswith('Message:'): message = i.replace('Message: ', '') if i.startswith('Action:'): action = i.replace('Action: ', '') if action: start_index = raw_response.find('{') end_index = raw_response.rfind('}') json_part = raw_response[start_index:end_index + 1] action_inp = json.loads(json_part)
console = Console() warnings.filterwarnings("ignore") class CreateAgent: def __init__( self, webui_url: str=None, ollama_model_name:str = None, system_prompt: str = None, system_:str = '', human_:str = 'GPT4 User', assistant_:str = "GPT4 Assistant", eos_token:str = '<|end_of_turn|>', tools: list[dict] = None, use_codeinterpreter: bool = False, interpreter_max_try:int = 3, knowledge_base: KnowledgeBase = None, stream:bool = False, verbose:bool = False, ) -> None: assert webui_url is not None or ollama_model_name is not None, 'Either webui_url or ollama_model_name should be given.' self.webui_url = webui_url self.olla_model_name = ollama_model_name self.stream = stream if webui_url is not None: if webui_url.startswith('http'): self.stream = False self.webui_url = webui_url+'/v1/generate' if verbose: important_message('agent initialized with non stream, If you want to start agent with streaming pass the streaming uri instead.') elif webui_url.startswith('ws'): self.stream = True self.webui_url = webui_url if verbose: important_message('agent initialized with stream, If you want to start agent with non streaming pass the regular api uri instead.') self.system_prompt = system_prompt self.tools = tools self.system_ = system_ self.human_ = human_ self.assistant_ = assistant_ self.eos_token = eos_token self.use_codeinterpreter = use_codeinterpreter self.knowledge_base = knowledge_base self.verbose = verbose self.history = [] if verbose: important_message(f'Agent initialized with stream={self.stream}') if not system_prompt: if verbose: important_message('No system prompt given, creating default system prompt.') self.system_prompt = f'{self.system_}\nYou are an AI assistant\n' if not self.tools: self.tools = [] if knowledge_base is not None: if verbose: important_message('Knowledge base is given, creating knowledge_retrival tool.') self.system_prompt += 'You have given a Knowledge document where you able to access contents in that using knowledge_retrival tool.\n' self.tools.append( { 'name_for_human': 'Knowledge retrival', 'name_for_model': 'knowledge_retrival', 'description_for_model': 'knowledge_retrival is a tool used to retrive any information from the Knowledge document.', 'parameters': [{ 'name': 'query', 'description': 'A query to search the specific information from document uploaded.', 'required': True, 'schema': { 'type': 'string' }, }], } ) if use_codeinterpreter: if verbose: important_message('Code interpreter is enabled, creating code_interpreter tool.') self.interpreter = Interpreter( exec={"name": 'ollama' if self.olla_model_name is not None else 'webui', "uri": self.olla_model_name if self.olla_model_name is not None else self.webui_url}, max_try=interpreter_max_try, human_=human_, assistant_=assistant_, eos_token=eos_token, stream=self.stream, ) self.tools.append( { 'name_for_human': 'code interpreter', 'name_for_model': 'code_interpreter', 'description_for_model': 'Code Interpreter enables the assistant to write and run code. code_interpreter is sensitive, it need all the information about the task to perform it such as path to file, correct file name, any other information required to perform the task.', 'parameters': [{ 'name': 'task', 'description': 'Describe the task clearly and briefly to the code interpreter to run the code and returns the output with you.', 'required': True, 'schema': { 'type': 'string' }, }], } ) if len(self.tools) != 0: self.system_prompt = self.create_prompt_with_tools() def create_prompt_with_tools(self): tool_desc = """{name_for_model}: Call this tool to interact with the {name_for_human} API. What is the {name_for_human} API useful for? {description_for_model} Parameters: {parameters} Format the arguments as a JSON object.""" tool_descs = [] tool_names = [] for info in self.tools: tool_descs.append( tool_desc.format( name_for_model=info['name_for_model'], name_for_human=info['name_for_human'], description_for_model=info['description_for_model'], parameters=json.dumps( info['parameters'], ensure_ascii=False), ) ) tool_names.append(info['name_for_model']) tool_descs = '\n\n'.join(tool_descs) tool_names = ','.join(tool_names) if self.use_codeinterpreter: react_prompt = f"""{self.system_} YOUR PERSONA: {self.system_prompt} You have access to these following tools: {tool_descs} Use the following format: Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action Observation: the result of the action ... (this Thought/Action/Action Input/Observation can be repeated zero or more times) Thought: I now know the final answer Message: the final answer to the original input question {self.human_}: hey, you UP!{self.eos_token}{self.assistant_}: Thought: User asking about my availability, I should respond by telling i'm available for assistance. So no need to use any tool for this. Message: Hey there! I'm just here. How can I help you today?{self.eos_token} {self.human_}: Create a folder called Project-1 and create a file called temp.py in it.{self.eos_token}{self.assistant_}: Thought: The user wants to create a folder and a file in it, so I need to ask code_interpreter to create folder and file. Action: code_interpreter Action Input: {{"task": "Create a folder called Project-1 in the current folder and create a file called temp.py in Project-1 folder."}}{self.eos_token} {self.human_}: This is code interpreter (not user). Created a folder called Project-1 and created a file called temp.py inside Project-1. Thought: Now the files are created. I should tell the user about it. No need to use any tools again. Message: Created a folder and file in it. I'm here to help you if you need any assistance.{self.eos_token} """ else: react_prompt = f"""{self.system_} YOUR PERSONA: {self.system_prompt} {tool_descs} Use the following format: Thought: you should always think about what to do Action: the action to take, should be one of [{tool_names}] Action Input: the input to the action (in json format) Observation: the result of the action ... (this Thought/Action/Action Input/Observation can be repeated zero or more times) Thought: I now know the final answer Message: the final answer to the original input question {self.human_}: hey, you UP!{self.eos_token}{self.assistant_}: Thought: User asking about my availability, I should respond by telling i'm available for assistance. So no need to use any tool for this. Message: Hey there! I'm just here. How can I help you today?{self.eos_token}""" return react_prompt def go_flow(self, prompt): self.history.append({'role':'user', 'content':prompt}) done = False while not done: prompt = get_prompt_from_template(self.system_prompt, self.history, self.human_, self.assistant_, self.eos_token) if len(self.tools) != 0: if self.stream: raw_response = stream_run(self.webui_url, prompt, force_model=True) if self.webui_url else ollama_generate(self.olla_model_name, template=prompt, force_model=True, stream=True)[0].replace(self.eos_token, "").replace(self.eos_token[:-1], "") else: with console.status("[bold cyan]Thinking...") as status: raw_response = run(self.webui_url, prompt, force_model=True) if self.webui_url else ollama_generate(self.olla_model_name, template=prompt)[0].replace(self.eos_token, "").replace(self.eos_token[:-1], "") else: if self.stream: raw_response = stream_run(self.webui_url, prompt) if self.webui_url else ollama_generate(self.olla_model_name, template=prompt, stream=True)[0].replace(self.eos_token, "").replace(self.eos_token[:-1], "") else: with console.status("[bold cyan]Thinking...") as status: raw_response = str(run(self.webui_url, prompt)) if self.webui_url else ollama_generate(model_name=self.olla_model_name, template=prompt)[0].replace(self.eos_token, "").replace(self.eos_token[:-1], "") self.history.append({"role": "assistant", "content": raw_response}) if len(self.tools) != 0: response = raw_response.strip().split('\n') thought, message, action, action_inp = None, None, None, None for i in response: if i.startswith('Thought:'): thought = i.replace('Thought: ', '') if i.startswith('Message:'): message = i.replace('Message: ', '') if i.startswith('Action:'): action = i.replace('Action: ', '') if action: start_index = raw_response.find('{') end_index = raw_response.rfind('}') json_part = raw_response[start_index:end_index + 1] action_inp = json.loads(json_part)
internal_monologue(thought)
3
2023-11-10 07:47:41+00:00
8k
ceterum1/llm-defender-subnet
llm_defender/core/miners/miner.py
[ { "identifier": "BaseNeuron", "path": "llm_defender/base/neuron.py", "snippet": "class BaseNeuron:\n \"\"\"Summary of the class\n\n Class description\n\n Attributes:\n parser:\n Instance of ArgumentParser with the arguments given as\n command-line arguments in the execution script\n profile:\n Instance of str depicting the profile for the neuron\n \"\"\"\n\n def __init__(self, parser: ArgumentParser, profile: str) -> None:\n self.parser = parser\n self.profile = profile\n self.step = 0\n self.last_updated_block = 0\n self.base_path = f\"{path.expanduser('~')}/.llm-defender-subnet\"\n self.subnet_version = subnet_version\n\n def config(self, bt_classes: list) -> bt.config:\n \"\"\"Applies neuron configuration.\n\n This function attaches the configuration parameters to the\n necessary bittensor classes and initializes the logging for the\n neuron.\n\n Args:\n bt_classes:\n A list of Bittensor classes the apply the configuration\n to\n\n Returns:\n config:\n An instance of Bittensor config class containing the\n neuron configuration\n\n Raises:\n AttributeError:\n An error occurred during the configuration process\n OSError:\n Unable to create a log path.\n\n \"\"\"\n try:\n for bt_class in bt_classes:\n bt_class.add_args(self.parser)\n except AttributeError as e:\n bt.logging.error(\n f\"Unable to attach ArgumentParsers to Bittensor classes: {e}\"\n )\n raise AttributeError from e\n\n config = bt.config(self.parser)\n\n # Construct log path\n log_path = f\"{self.base_path}/logs/{config.wallet.name}/{config.wallet.hotkey}/{config.netuid}/{self.profile}\"\n\n # Create the log path if it does not exists\n try:\n config.full_path = path.expanduser(log_path)\n if not path.exists(config.full_path):\n makedirs(config.full_path, exist_ok=True)\n except OSError as e:\n bt.logging.error(f\"Unable to create log path: {e}\")\n raise OSError from e\n\n return config" }, { "identifier": "LLMDefenderProtocol", "path": "llm_defender/base/protocol.py", "snippet": "class LLMDefenderProtocol(bt.Synapse):\n \"\"\"\n This class implements the protocol definition for the the\n llm-defender subnet.\n\n The protocol is a simple request-response communication protocol in\n which the validator sends a request to the miner for processing\n activities.\n \"\"\"\n\n # Parse variables\n prompt: typing.Optional[str] = None\n engine: typing.Optional[str] = None\n output: typing.Optional[dict] = None\n\n synapse_uuid: str = pydantic.Field(\n ...,\n description=\"Synapse UUID\",\n allow_mutation=False\n )\n\n subnet_version: int = pydantic.Field(\n ...,\n description=\"Current subnet version\",\n allow_mutation=False,\n )\n\n roles: typing.List[str] = pydantic.Field(\n ...,\n title=\"Roles\",\n description=\"An immutable list depicting the roles\",\n allow_mutation=False,\n regex=r\"^(internal|external)$\",\n )\n\n analyzer: typing.List[str] = pydantic.Field(\n ...,\n title=\"analyzer\",\n description=\"An immutable list depicting the analyzers to execute\",\n allow_mutation=False,\n regex=r\"^(Prompt Injection)$\",\n )\n\n def get_analyzers(self) -> list:\n \"\"\"Returns the analyzers associated with the synapse\"\"\"\n\n return self.analyzer\n\n def deserialize(self) -> bt.Synapse:\n \"\"\"Deserialize the instance of the protocol\"\"\"\n return self" }, { "identifier": "YaraEngine", "path": "llm_defender/core/miners/engines/prompt_injection/yara.py", "snippet": "class YaraEngine(BaseEngine):\n \"\"\"This class implements the YARA engine.\n \n YARA is a powerful pattern matching tool that can be used to analyze\n strings based on boolean operators and other logical patterns. As a\n part of prompt injection analyzer, the YARA engine is used to detect\n well known prompt injections and other patterns within the inputs\n that could be an indication of prompt injections.\n\n The detection logic is described within the rules and in order to\n fine-tune this engine, you should add additional rules within the\n yara_rules directory.\n \n \"\"\"\n def __init__(self, prompt: str=None, name: str = \"engine:yara\"):\n super().__init__(name=name)\n\n self.prompt = prompt\n self.compiled = f\"{self.cache_dir}/compiled_rules\"\n self.rules = f\"{path.dirname(__file__)}/yara_rules/*.yar\"\n\n def _calculate_confidence(self):\n if self.output[\"outcome\"] != \"NoRuleMatch\":\n match_accuracies = []\n for match in self.output[\"meta\"]:\n if float(match[\"accuracy\"]) < 0.0 or float(match[\"accuracy\"]) > 1.0:\n raise ValueError(f'YARA rule accuracy is out-of-bounds: {match}')\n match_accuracies.append(float(match[\"accuracy\"]))\n\n return 1.0 * max(match_accuracies)\n return 0.5\n\n def _populate_data(self, results):\n if results:\n return {\n \"outcome\": \"RuleMatch\",\n \"meta\": [result.meta for result in results],\n }\n return {\"outcome\": \"NoRuleMatch\"}\n\n def prepare(self) -> bool:\n # Check cache directory\n if not path.exists(self.cache_dir):\n try:\n makedirs(self.cache_dir)\n except OSError as e:\n raise OSError(f\"Unable to create cache directory: {e}\") from e\n\n # Compile YARA rules\n try:\n files = glob(self.rules)\n yara_rules = {}\n for file in files:\n with open(file, \"r\", encoding=\"utf-8\") as f:\n yara_rules[file] = f.read()\n\n compiled_rules = yara.compile(sources=yara_rules)\n compiled_rules.save(self.compiled)\n\n if not path.isfile(self.compiled):\n raise FileNotFoundError(f'Unable to locate compiled YARA rules: {e}')\n\n return True\n except OSError as e:\n raise OSError(f\"Unable to read YARA rules: {e}\") from e\n except yara.SyntaxError as e:\n raise yara.SyntaxError(f\"Syntax error when compiling YARA rules: {e}\") from e\n except yara.Error as e:\n raise yara.Error(f\"Unable to compile YARA rules: {e}\") from e\n\n def initialize(self) -> yara.Rules:\n if not path.isfile(self.compiled):\n bt.logging.warning(\"Compiled YARA rules not found. Running preparation mid-flight.\")\n if not self.prepare():\n raise yara.Error('Unable to prepare YARA engine')\n try:\n rules = yara.load(self.compiled)\n return rules\n except yara.Error as e:\n raise yara.Error(f\"Unable to load rules: {e}\") from e\n \n def execute(self, rules: yara.Rules) -> bool:\n\n if not self.prompt:\n raise ValueError('Cannot execute engine with empty input')\n\n if not isinstance(self.prompt, str):\n raise ValueError(f'Input must be a string. The type for the input {self.prompt} is: {type(self.prompt)}')\n \n try:\n results = rules.match(data=self.prompt)\n except yara.TimeoutError as e:\n raise yara.TimeoutError(f'YARA matching timed out: {e}') from e\n except yara.Error as e:\n raise yara.TimeoutError(f'YARA matching returned an error: {e}') from e\n\n self.output = self._populate_data(results)\n self.confidence = self._calculate_confidence()\n\n bt.logging.debug(f\"YARA engine executed (Confidence: {self.confidence} - Output: {self.output})\")\n return True" }, { "identifier": "TextClassificationEngine", "path": "llm_defender/core/miners/engines/prompt_injection/text_classification.py", "snippet": "class TextClassificationEngine(BaseEngine):\n \"\"\"Text classification engine for detecting prompt injection.\n\n This class implements an engine that uses text classification to\n identity prompt injection attacks. The text classification engine is\n the primary detection method along with the heuristics engine\n detecting prompt injection attacks.\n\n Whereas the heuristics engine is a collection of specialized\n sub-engines the text-classification engine focuses on analyzing the\n prompt as a whole and thus has a potential to yield better results\n than the heuristic based approaches.\n\n Attributes:\n\n \"\"\"\n\n def __init__(self, prompt: str = None, name: str = \"engine:text_classification\"):\n super().__init__(name=name)\n self.prompt = prompt\n\n def _calculate_confidence(self):\n # Determine the confidence based on the score\n if self.output[\"outcome\"] != \"UNKNOWN\":\n if self.output[\"outcome\"] == \"SAFE\":\n return 0.0\n else:\n return 1.0\n else:\n return 0.5\n\n def _populate_data(self, results):\n if results:\n return {\"outcome\": results[0][\"label\"], \"score\": results[0][\"score\"]}\n return {\"outcome\": \"UNKNOWN\"}\n\n def prepare(self) -> bool:\n # Check cache directory\n if not path.exists(self.cache_dir):\n try:\n makedirs(self.cache_dir)\n except OSError as e:\n raise OSError(f\"Unable to create cache directory: {e}\") from e\n \n _, _ = self.initialize()\n\n return True\n\n def initialize(self):\n try:\n model = AutoModelForSequenceClassification.from_pretrained(\n \"laiyer/deberta-v3-base-prompt-injection\", cache_dir=self.cache_dir\n )\n\n tokenizer = AutoTokenizer.from_pretrained(\n \"laiyer/deberta-v3-base-prompt-injection\", cache_dir=self.cache_dir\n )\n except Exception as e:\n raise Exception(\n f\"Error occurred when initializing model or tokenizer: {e}\"\n ) from e\n\n if not model or not tokenizer:\n raise ValueError(\"Model or tokenizer is empty\")\n\n return model, tokenizer\n\n def execute(self, model, tokenizer):\n \"\"\"Perform text-classification for the prompt.\n\n This function performs classification of the given prompt to\n enable it to detect prompt injection. The function returns the\n label and score provided by the classifier and defines the class\n attributes based on the outcome of the classifier.\n\n Arguments:\n Model:\n The model used by the pipeline\n Tokenizer:\n The tokenizer used by the pipeline\n \"\"\"\n\n if not model or not tokenizer:\n raise ValueError(\"Model or tokenizer is empty\")\n try:\n pipe = pipeline(\n \"text-classification\",\n model=model,\n tokenizer=tokenizer,\n truncation=True,\n max_length=512,\n device=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n )\n results = pipe(self.prompt)\n except Exception as e:\n raise Exception(\n f\"Error occurred during text classification pipeline execution: {e}\"\n ) from e\n\n self.output = self._populate_data(results)\n self.confidence = self._calculate_confidence()\n\n bt.logging.debug(\n f\"Text Classification engine executed (Confidence: {self.confidence} - Output: {self.output})\"\n )\n return True" }, { "identifier": "VectorEngine", "path": "llm_defender/core/miners/engines/prompt_injection/vector_search.py", "snippet": "class VectorEngine(BaseEngine):\n \"\"\"Distance-based detection of prompt injection.\n\n This class implements an engine that uses vector embeddings to\n determine how similar a given prompt is when compared against known\n prompt injections that are stored in chromadb.\n\n Upon initialization, the default implementation stores known\n prompt-injection strings from publicly available datasets within a\n locally persisted chromadb.\n\n Attributes:\n db_path:\n An instance of str depicting the path to store the chromadb\n prompt:\n An instance of str depicting the prompt to be searched for\n result_count:\n An instance of int indicating how many results to return\n from the collection\n threshold:\n An instance of float indicating the cut-off point for when a\n match is considered good enough to be accounted for.\n engine_name:\n An instance of str depicting the name for the engine,\n default to \"Vector Search\"\n\n Methods:\n get_collection(): Returns the chromadb collection\n \"\"\"\n\n def __init__(\n self,\n prompt: str = None,\n name=\"engine:vector_search\",\n reset_on_init=False\n ):\n super().__init__(name=name)\n self.prompt = prompt\n self.collection_name = \"prompt-injection-strings\"\n self.reset_on_init = reset_on_init\n\n def _calculate_confidence(self):\n if self.output[\"outcome\"] != \"ResultsNotFound\":\n # Some distances are above 1.6 -> unlikely to be malicious\n distances = self.output[\"distances\"]\n if any(distance >= 1.6 for distance in distances):\n return 0.0\n if any(distance <= 1.0 for distance in distances):\n return 1.0\n \n # Calculate the value between 0.0 and 1.0 based on the distance from 1.0 to 1.6\n min_distance = 1.0\n max_distance = 1.6\n\n # Normalize the distances between 1.0 and 1.6 to a range between 0 and 1\n normalized_distances = [(distance - min_distance) / (max_distance - min_distance) for distance in distances]\n\n # Calculate the mean of normalized distances\n if normalized_distances:\n normalized_mean = sum(normalized_distances) / len(normalized_distances)\n\n # Interpolate the value between 0.0 and 1.0 based on the normalized_mean\n interpolated_value = 1.0 - normalized_mean\n\n return interpolated_value\n return 0.5\n\n return 0.5\n\n def _populate_data(self, results):\n if results:\n return {\n \"outcome\": \"ResultsFound\",\n \"distances\": results[\"distances\"][0],\n \"documents\": results[\"documents\"][0],\n }\n return {\"outcome\": \"ResultsNotFound\"}\n\n def prepare(self) -> bool:\n \"\"\"This function is used by prep.py\n\n The prep.py executes the prepare methods from all engines before\n the miner is launched. If you change the models used by the\n engines, you must also change this prepare function to match.\n\n For the vector search engine, the accuracy is highly dependent\n of the contents in the vector database. As a part of the\n fine-tuning of the engines, it is recommended to adjust what\n information is loaded into the chromadb as this code is\n executed.\n \"\"\"\n # Check cache directory\n if not path.exists(self.cache_dir):\n try:\n makedirs(self.cache_dir)\n except OSError as e:\n raise OSError(f\"Unable to create cache directory: {e}\") from e\n \n # Client is needed to prepare the engine\n client = self.initialize()\n\n # Initialize collection\n try:\n collection = client.get_or_create_collection(name=self.collection_name)\n except Exception as e:\n raise Exception(f\"Unable to get or create chromadb collection: {e}\") from e\n\n # Populate chromadb\n try:\n # If there already are items in the collection, reset them\n if collection.count() > 0:\n return True\n\n dataset = load_dataset(\n \"deepset/prompt-injections\", cache_dir=self.cache_dir\n )\n filtered_dataset = dataset.filter(lambda x: x[\"label\"] == 1)\n\n # Add training data\n collection.add(\n documents=filtered_dataset[\"train\"][\"text\"],\n ids=[\n str(uuid.uuid4())\n for _ in range(len(filtered_dataset[\"train\"][\"text\"]))\n ],\n )\n\n # Add testing data\n collection.add(\n documents=filtered_dataset[\"test\"][\"text\"],\n ids=[\n str(uuid.uuid4())\n for _ in range(len(filtered_dataset[\"test\"][\"text\"]))\n ],\n )\n\n # Trigger the download of onnx model\n collection.query(query_texts=\"foo\")\n\n return True\n except Exception as e:\n raise Exception(f\"Unable to populate chromadb collection: {e}\") from e\n\n def initialize(self) -> chromadb.PersistentClient:\n client = chromadb.PersistentClient(path=f\"{self.cache_dir}/chromadb2\", settings=Settings(allow_reset=True))\n if self.reset_on_init:\n client.reset()\n\n return client\n\n def execute(self, client: chromadb.PersistentClient):\n # Get collection\n try:\n collection = client.get_collection(name=self.collection_name)\n except ValueError as e:\n bt.logging.warning(\n f\"Running preparation mid-flight for chromadb. The miner may not have been initialized properly, consider restarting the miner. Error received: {e}\"\n )\n self.prepare()\n collection = client.get_collection(name=\"prompt-injection-strings\")\n except Exception as e:\n raise Exception(f\"Unable to get collection from chromadb: {e}\") from e\n\n if not collection:\n raise ValueError(\"ChromaDB collection not found\")\n\n # Execute query\n try:\n results = collection.query(\n query_texts=self.prompt,\n n_results=2,\n include=[\"documents\", \"distances\"],\n )\n except Exception as e:\n raise Exception(f\"Unable to query documents from collection: {e}\") from e\n\n self.output = self._populate_data(results)\n self.confidence = self._calculate_confidence()\n\n bt.logging.debug(\n f\"Vector Search engine executed (Confidence: {self.confidence} - Output: {self.output})\"\n )\n return True" }, { "identifier": "validate_miner_blacklist", "path": "llm_defender/base/utils.py", "snippet": "def validate_miner_blacklist(miner_blacklist) -> bool:\n \"\"\"The local blacklist must be a JSON array:\n [\n {\"hotkey\": \"5FZV8fBTpEo51pxxPd5AqdpwN3BzK8rxog6VYFiGd6H7pPKY\", \"reason\": \"Exploitation\"},\n {\"hotkey\": \"5FMjfXzFuW6wLYVGTrvE5Zd66T1dvgv3qKKhWeTFWXoQm3jS\", \"reason\": \"Exploitation\"}\n ]\n \"\"\"\n if miner_blacklist:\n return bool(\n isinstance(miner_blacklist, list)\n and all(\n isinstance(item, dict)\n and all(key in item for key in [\"hotkey\", \"reason\"])\n for item in miner_blacklist\n )\n )\n return False" } ]
from argparse import ArgumentParser from typing import Tuple from llm_defender.base.neuron import BaseNeuron from llm_defender.base.protocol import LLMDefenderProtocol from llm_defender.core.miners.engines.prompt_injection.yara import YaraEngine from llm_defender.core.miners.engines.prompt_injection.text_classification import TextClassificationEngine from llm_defender.core.miners.engines.prompt_injection.vector_search import VectorEngine from llm_defender.base.utils import validate_miner_blacklist import sys import requests import bittensor as bt
4,889
"""Module for prompt-injection neurons for the llm-defender-subnet. Long description Typical example usage: foo = bar() foo.bar() """ class PromptInjectionMiner(BaseNeuron): """Summary of the class Class description Attributes: """ def __init__(self, parser: ArgumentParser): super().__init__(parser=parser, profile="miner") self.neuron_config = self.config( bt_classes=[bt.subtensor, bt.logging, bt.wallet, bt.axon] ) args = parser.parse_args() if args.miner_set_weights == "False": self.miner_set_weights = False else: self.miner_set_weights = True self.chromadb_client = VectorEngine().initialize()
"""Module for prompt-injection neurons for the llm-defender-subnet. Long description Typical example usage: foo = bar() foo.bar() """ class PromptInjectionMiner(BaseNeuron): """Summary of the class Class description Attributes: """ def __init__(self, parser: ArgumentParser): super().__init__(parser=parser, profile="miner") self.neuron_config = self.config( bt_classes=[bt.subtensor, bt.logging, bt.wallet, bt.axon] ) args = parser.parse_args() if args.miner_set_weights == "False": self.miner_set_weights = False else: self.miner_set_weights = True self.chromadb_client = VectorEngine().initialize()
self.model, self.tokenizer = TextClassificationEngine().initialize()
3
2023-11-14 18:10:35+00:00
8k
rohitsinghlab/sceodesic
sceodesic/sceo.py
[ { "identifier": "fn_timer", "path": "sceodesic/utils/fn_timer.py", "snippet": "def fn_timer(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n\n # run and time function\n start_time = time.time()\n result = func(*args, **kwargs)\n end_time = time.time()\n elapsed_time = end_time - start_time\n\n print(f\"{func.__name__} took {elapsed_time:.3f} seconds to run.\")\n return result\n return wrapper" }, { "identifier": "parse_sceo_command_line_args", "path": "sceodesic/sceo_io/sceo_command_line_parser.py", "snippet": "def parse_sceo_command_line_args(default_config):\n parser = argparse.ArgumentParser(description='sceodesic pipeline')\n parser.add_argument('--config', type=str, default='', help='Path to YAML configuration file.')\n \n # -1 means not set, use default\n parser.add_argument('--num_hvg', type=int, help='Number of highly variable genes.', default=-1) \n \n parser.add_argument('--action', type=int, help='Actions to control the pipeline.', default=1)\n parser.add_argument('--inp_data', type=str, required=True, help='Path to input data.')\n parser.add_argument('--output_prefix', type=str, required=True, help='Output prefix for file names.', default='DEFAULT')\n parser.add_argument('--filepath', type=str, help=\"Path in which to store results. Note that this argument will overwrite the file path specified in the config file. Required if config file is NOT provided.\")\n parser.add_argument('--adata_output_name', type=str, help=\"The path of the file containing the adata embedding output, will overwrite the default path.\")\n args = parser.parse_args()\n \n # Read from YAML config if provided\n if args.config:\n cfg = read_yaml_config(args.config, default_config)\n cfg['output_prefix'] = cfg.get('output_prefix', args.output_prefix)\n \n # by default command line argument is set to -1\n cfg['num_hvg'] = args.num_hvg if args.num_hvg > 0 else cfg['num_hvg']\n args.config = cfg\n else:\n args.config = default_config\n\n # if config file is not provided, we must specify a file path\n if not args.filepath:\n message = (\"ArgumentError: You must either pass in a config.yaml file\"\n \" or an output directory using the `--filepath` argument\")\n sys.exit(message)\n\n # note that args.filepath will overwrite the filepath specified in the config file\n if args.filepath:\n args.config['filepath'] = args.filepath\n\n return args" }, { "identifier": "load_input", "path": "sceodesic/sceo_io/sceo_load_input.py", "snippet": "@fn_timer\ndef load_input(inp_data):\n adata = ad.read_h5ad(inp_data)\n print(\"Anndata read in\")\n \n # should we change this? (ROHIT)\n if adata.X.max() > 20:\n sc.pp.normalize_total(adata, target_sum=1e4) #normalize counts\n sc.pp.log1p(adata) # log 1+\n print(\"log-cp10k data computed\")\n else:\n print('data was already log-normalized')\n \n return adata" }, { "identifier": "compute_covariance_and_ncomps_pct_variance", "path": "sceodesic/helper/compute_covariance.py", "snippet": "def compute_covariance_and_ncomps_pct_variance(data, max_condition_number, pvd_pct):\n \"\"\" Computes a symmetric positive definite sample covariance matrix.\n - `data` is a cell x gene 2D numpy array.\n \"\"\"\n # Compute raw covariance.\n matrix = np.cov(data, rowvar=False)\n\n S,U = np.linalg.eigh(matrix)\n ncomps_pct_variance = np.argmax(np.cumsum(S[::-1]) / np.sum(S) >= pvd_pct) + 1\n \n # normalize by condition-volume \n matrix = _normalize_condition_volume(S, U, max_condition_number, log=False)\n \n return matrix, ncomps_pct_variance" }, { "identifier": "_get_cell_cohorts", "path": "sceodesic/sceo_main/get_cell_cohorts.py", "snippet": "def _get_cell_cohorts(adata, num_clusters, stratify_cols, num_hvg, \n copy, return_results, n_init, \n clustering_filename=None,\n uns_key=None, cluster_key=None, stratify_key=None):\n \n if uns_key is None:\n uns_key = UNS_KEY\n \n if cluster_key is None:\n cluster_key = CLUSTER_KEY\n \n if stratify_key is None:\n stratify_key = STRATIFY_KEY\n \n if copy:\n adata = adata.copy()\n \n # should we get rid of this (ROHIT)\n assert num_clusters == 'auto' or num_clusters > 10\n \n #if 'auto', we set num_clusters = ncells/nhvg\n if num_clusters == 'auto': \n try: \n num_clusters = _get_auto_num_clusters(adata, num_hvg=num_hvg)\n except Exception as e:\n print(\"Error: if num_clusters is 'auto', you must specify num_hvg.\", file=sys.stderr)\n print(f\"num_hvg is set to {num_hvg} and of type {type(num_hvg)}.\", file=sys.stderr)\n raise e\n\n # cluster - either stratify or don't\n stratify_cols = [stratify_cols] if isinstance(stratify_cols, str) else stratify_cols \n if len(stratify_cols) > 0 and stratify_cols[0].lower() != 'none':\n arrs = [adata.obs[c].ravel().astype(str) for c in stratify_cols]\n stratify_vec = functools.reduce(lambda x, y: np.char.add(np.char.add(x, '||'), y), arrs)\n unique_strats = np.unique(stratify_vec)\n \n print(f'{unique_strats.shape} groups to stratify clustering')\n \n groups = [(adata[stratify_vec == strat,:], np.where(stratify_vec==strat)[0], strat) \\\n for strat in unique_strats]\n else:\n groups = [(adata, np.arange(adata.shape[0]), 'all')]\n\n # storing PCA results as well\n pca_results = {}\n \n # storing kmeans objects as well\n kmeans_models = {}\n \n kmeans_cluster_dict = {}\n curr_cluster_count = 0\n for idx, (group_adata, orig_indices, strat_desc) in enumerate(groups): \n # Cluster in PC space. \n U, s, Vt = fbpca.pca(group_adata.X, k=100) \n\n X_dimred = U[:,:100]* s[:100]\n print(f\"PCA done for stratification group {idx+1} '{strat_desc}'\")\n \n # save the pca results\n pca_means = np.array(group_adata.X.mean(axis=0)).squeeze()\n pca_results[strat_desc] = {'U': U, 's': s, 'Vt': Vt, 'means': pca_means}\n \n group_num_clusters = max(1, int(float(group_adata.shape[0])/adata.shape[0] * num_clusters))\n \n kmeans = KMeans(n_clusters=group_num_clusters, n_init=n_init)\n kmeans.fit(X_dimred)\n print(f\"Fitting done k means with {group_num_clusters} clusters for stratification group {idx+1} '{strat_desc}'\")\n kmeans_cluster_assignments = kmeans.labels_\n \n # save the kmeans model\n kmeans_models[strat_desc] = (kmeans, curr_cluster_count)\n\n for i in range(group_num_clusters):\n # save keys as strings so we can save to .h5ad\n kmeans_cluster_dict[str(curr_cluster_count)] = orig_indices[np.where(kmeans_cluster_assignments == i)[0]].tolist()\n curr_cluster_count += 1\n\n cnt_sizeLT10 = len([v for v in kmeans_cluster_dict.values() if len(v) < 10])\n cnt_sizeLT50 = len([v for v in kmeans_cluster_dict.values() if len(v) < 50])\n print(f'Finished clustering with {len(kmeans_cluster_dict)} clusters (originally intended {num_clusters}). Size < 10: {cnt_sizeLT10}, Size < 50: {cnt_sizeLT50}') \n clustering_results_dict = {\"cell2cluster\" : kmeans_cluster_dict, \n \"cluster_pca_matrices\": pca_results,\n \"kmeans_models\": kmeans_models,\n \"stratify_cols\": stratify_cols}\n \n \n if clustering_filename:\n with open(clustering_filename, 'wb') as f:\n pickle.dump(clustering_results_dict, f)\n\n # write to adata.uns \n adata.uns[uns_key][cluster_key] = kmeans_cluster_dict\n adata.uns[uns_key][stratify_key] = stratify_cols\n \n out = ()\n if copy:\n out += (adata,)\n if return_results:\n out += (clustering_results_dict,)\n \n return out if len(out) > 1 else out[0] if len(out) == 1 else None" }, { "identifier": "_get_locally_variable_genes", "path": "sceodesic/sceo_main/get_locally_variable_genes.py", "snippet": "def _get_locally_variable_genes(adata, num_hvg, num_hvg_per_cluster, global_hvg, hvg_filename=None, \n copy=False, return_results=False, clustering_results=None, \n uns_key=None, hvg_key=None):\n \n if uns_key is None:\n uns_key = UNS_KEY\n \n if hvg_key is None: \n hvg_key = HVG_KEY\n \n if copy:\n adata = adata.copy()\n \n # change later \n results_clustering = clustering_results\n \n \n # Store the cluster data matrices.\n cell2cluster = results_clustering[\"cell2cluster\"]\n \n full_data_clusters = []\n for key in cell2cluster.keys():\n cluster_indices = cell2cluster[key]\n ## full_data_clusters.append(adata[cluster_indices,:])\n full_data_clusters.append(cluster_indices)\n\n if global_hvg:\n sc.pp.highly_variable_genes(adata, layer=None, n_top_genes=num_hvg)\n top_gene_idxs = np.where(adata.var['highly_variable'])[0]\n top_gene_names = adata.var_names[top_gene_idxs]\n else:\n # Now compute bottoms up hvgs.\n hvg_count_vec = np.zeros(adata.shape[1])\n for i, clusterids in enumerate(full_data_clusters):\n cluster = adata[clusterids,:].copy()\n sc.pp.highly_variable_genes(cluster, layer=None, n_top_genes=num_hvg_per_cluster)\n hvg_count_vec += np.where(cluster.var['highly_variable'], 1, 0)\n\n # select the indices of the first num_hvg highest values in hvg_count_vec\n top_gene_idxs = np.argsort(hvg_count_vec)[-num_hvg:]\n top_gene_names = adata.var_names[top_gene_idxs]\n \n if hvg_filename:\n with open(hvg_filename, 'wb') as f:\n pickle.dump((top_gene_idxs, top_gene_names), f)\n \n adata.uns[uns_key][hvg_key] = top_gene_names.tolist()\n\n out = ()\n if copy:\n out += (adata,)\n if return_results:\n out += ((top_gene_idxs, top_gene_names.tolist()),)\n \n return out if len(out) > 1 else out[0] if len(out) == 1 else None" }, { "identifier": "_estimate_covariances", "path": "sceodesic/sceo_main/estimate_covariances.py", "snippet": "def _estimate_covariances(adata, max_condition_number, pvd_pct=0.9, \n copy=False, return_results=False, coexpression_filename=None,\n top_genes=None, results_clustering=None, \n uns_key=None, cluster_covar_key=None,\n cluster_var_ct_key=None):\n \n if uns_key is None:\n uns_key = UNS_KEY\n \n if cluster_covar_key is None:\n cluster_covar_key = CLUSTER_COVAR_KEY\n \n if cluster_var_ct_key is None:\n cluster_var_ct_key = CLUSTER_VAR_CT_KEY\n \n if copy:\n adata = adata.copy()\n \n # change later \n top_genes = top_genes\n results_clustering = results_clustering\n \n cell2cluster = results_clustering[\"cell2cluster\"]\n \n filtered_data = adata[:,top_genes]\n \n # Get the clusters from the reduced data.\n clusters = {}\n\n processed_data = None\n if scipy.sparse.issparse(filtered_data.X):\n processed_data = filtered_data.X.A\n else:\n processed_data = filtered_data.X\n\n for key in cell2cluster.keys():\n cluster_indices = cell2cluster[key]\n clusters[key] = processed_data[cluster_indices,:]\n \n cluster_covariances = {}\n cluster_var_count = {} \n for i,cluster in clusters.items():\n cluster_covar, var_count = compute_covariance_and_ncomps_pct_variance(cluster, max_condition_number, pvd_pct)\n cluster_covariances[i] = cluster_covar # Ensures a PSD matrix.\n cluster_var_count[i] = var_count\n\n ### invariant based programming: put in asserts on what you expect the shape to be\n\n results_coexp = {\"cluster_covariances\": cluster_covariances, \"cluster_var_count\": cluster_var_count}\n\n if coexpression_filename:\n with open(coexpression_filename, 'wb') as f:\n pickle.dump(results_coexp, f)\n \n # write to adata.uns\n adata.uns[uns_key][cluster_covar_key] = results_coexp['cluster_covariances']\n adata.uns[uns_key][cluster_var_ct_key] = results_coexp['cluster_var_count']\n \n out = ()\n if copy:\n out += (adata,)\n if return_results:\n out += (results_coexp,)\n \n return out if len(out) > 1 else out[0] if len(out) == 1 else None " }, { "identifier": "_reconstruct_programs", "path": "sceodesic/sceo_main/reconstruct_programs.py", "snippet": "def _reconstruct_programs(adata, sparse_pca_lambda, embedding_filename=None, \n copy=False, return_results=False, results_coexp=None, \n uns_key=None, embeddings_dict_key=None, \n modules_key=None):\n \n if uns_key is None:\n uns_key = UNS_KEY\n \n if embeddings_dict_key is None:\n embeddings_dict_key = EMBEDDINGS_DICT_KEY \n \n if modules_key is None:\n modules_key = MODULES_KEY\n \n if copy:\n adata = adata.copy()\n \n if results_coexp is None:\n with open(kwargs.get('coexpression_filename'), 'rb') as f:\n results_coexp = pickle.load(f)\n \n covariance_matrices = results_coexp[\"cluster_covariances\"]\n cluster_var_count = results_coexp[\"cluster_var_count\"]\n\n cluster_eigendecomposition = {}\n for cluster_index in covariance_matrices:\n current_covariance = covariance_matrices[cluster_index]\n S,U = np.linalg.eigh(current_covariance)\n cluster_eigendecomposition[cluster_index] = [S,U]\n\n all_eigenvectors_horizontal = np.vstack([cluster_eigendecomposition[cluster_index][1][:, :cluster_var_count[cluster_index]].T for cluster_index in cluster_eigendecomposition])\n\n print(\"Concatenated eigenvectors matrix: \", all_eigenvectors_horizontal.shape)\n print(np.allclose(all_eigenvectors_horizontal, all_eigenvectors_horizontal.real))\n\n # set to regular pca if sparse pca is zero\n sparse_pca = None \n if sparse_pca_lambda > 0.0: \n sparse_pca = MiniBatchSparsePCA(alpha=sparse_pca_lambda)\n else:\n sparse_pca = PCA()\n\n # all imaginary parts are already zero per check (redundant)\n sparse_pca.fit(all_eigenvectors_horizontal.real) \n sparse_pca_eigenvectors = sparse_pca.components_\n print(\"sparse_pca_eigenvectors dim: \", sparse_pca_eigenvectors.shape)\n print(\"Done training\")\n embeddings = {}\n for cluster_index in cluster_eigendecomposition:\n sigma_i = covariance_matrices[cluster_index]\n M_star = sparse_pca_eigenvectors.T @ scipy.linalg.logm(sigma_i) @ sparse_pca_eigenvectors\n diagonal = np.diagonal(M_star)\n embedding = diagonal \n embeddings[cluster_index] = embedding\n\n results_embedding = {\"embedding_dictionary\":embeddings, \"cluster_svd\": cluster_eigendecomposition, \"modules\": sparse_pca_eigenvectors}\n\n if embedding_filename:\n with open(embedding_filename, 'wb') as f:\n pickle.dump(results_embedding, f)\n \n # write to adata.uns\n adata.uns[uns_key][embeddings_dict_key] = results_embedding[\"embedding_dictionary\"]\n adata.uns[uns_key][modules_key] = results_embedding[\"modules\"]\n \n out = ()\n if copy:\n out += (adata,)\n if return_results:\n out += (results_embedding,)\n \n return out if len(out) > 1 else out[0] if len(out) == 1 else None" }, { "identifier": "_write_embedding", "path": "sceodesic/sceo_main/write_embedding.py", "snippet": "def _write_embedding(adata, num_hvg, config=None, \n results_clustering=None,\n results_hvg=None, \n results_embedding=None,\n sceodesic_adata_filename=None,\n key_added=None,\n config_key=SCEO_CONFIG_KEY,\n modules_key=MODULES_KEY,\n uns_key=UNS_KEY,\n copy=False):\n \n if key_added is None:\n obsm_key_added = SCEO_EMBEDDINGS_KEY\n varm_key_added = MODULES_KEY\n else:\n obsm_key_added = key_added\n varm_key_added = key_added\n \n if copy:\n adata = adata.copy() \n \n num_hvg = num_hvg\n \n top_gene_idxs, top_gene_names = results_hvg\n \n cell2cluster = results_clustering[\"cell2cluster\"]\n \n embeddings = results_embedding[\"embedding_dictionary\"]\n modules = results_embedding[\"modules\"]\n _, top_genes = results_hvg\n \n # making the .obsm object \n observation_count = adata.n_obs \n data_embedding = np.zeros((observation_count, num_hvg))\n for i, embed in embeddings.items():\n cluster_indices = cell2cluster[i]\n for cell in cluster_indices:\n data_embedding[cell, :] = embed\n \n data_embedding, modules = order_by_second_moment(data_embedding, modules)\n \n # make the .varm data matrix \n not_top_genes = adata.var_names[~np.isin(adata.var_names, top_genes)]\n tdf = pd.DataFrame(modules, index=top_genes)\n ntdf = pd.DataFrame(np.zeros((adata.shape[1]-len(top_genes), num_hvg)), index=not_top_genes)\n edf = pd.DataFrame(index=adata.var_names)\n varm = edf.join(pd.concat([tdf, ntdf]))\n\n adata.varm[varm_key_added] = varm.values\n adata.obsm[obsm_key_added] = data_embedding\n adata.uns[uns_key][modules_key] = modules\n results_embedding[\"modules\"] = modules\n \n # save config settings to anndata object \n if config:\n adata.uns[config_key] = config\n\n if sceodesic_adata_filename:\n adata.write_h5ad(sceodesic_adata_filename)\n \n return adata" } ]
import numpy as np import pandas as pd import scipy import scanpy as sc import anndata import fbpca import sklearn import os, sys, yaml, pickle import functools, random import time from numpy.linalg import eig, eigh from scipy.linalg import logm, svd, expm from scipy.stats import mannwhitneyu, pearsonr, spearmanr, kendalltau, rankdata from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA, PCA from sklearn.neural_network import MLPClassifier from sklearn.linear_model import LogisticRegression from sklearn.cluster import KMeans from .utils import fn_timer from .sceo_io.sceo_command_line_parser import parse_sceo_command_line_args from .sceo_io.sceo_load_input import load_input from .helper import compute_covariance_and_ncomps_pct_variance from .sceo_main.get_cell_cohorts import _get_cell_cohorts from .sceo_main.get_locally_variable_genes import _get_locally_variable_genes from .sceo_main.estimate_covariances import _estimate_covariances from .sceo_main.reconstruct_programs import _reconstruct_programs from .sceo_main.write_embedding import _write_embedding
5,936
## package-specific modules # Default configuration DEFAULT_CONFIG = { 'num_clusters': 500, 'num_hvg': 300, 'max_condition_number': 50, 'sparse_pca_lambda': 0.03, 'stratify_clustering_by_columns': 'none', 'filepath': '', 'num_hvg_per_cluster': 100, 'pvd_pct': 0.90, 'do_global_hvg': False, # for very advanced users 'n_init': 1 # K-Means } def main(): args = parse_sceo_command_line_args(DEFAULT_CONFIG) ### TESTING ### print("config:", args.config) ### TESTING ### output_identifier = "%s_%d_hvgs_%s_clusters_%g_sparsity" % (args.output_prefix, args.num_hvg, str(args.config.get('num_clusters')), args.config.get('sparse_pca_lambda')) args.config['output_identifier'] = output_identifier filepath = args.config.get('filepath', DEFAULT_CONFIG['filepath']) + '/' #if the backslash is extra, it won't hurt args.config['clustering_filename'] = f"{filepath}clustering_results_{output_identifier}.pkl" args.config['hvg_filename'] = f"{filepath}hvg_results_{output_identifier}.pkl" args.config['coexpression_filename'] = f"{filepath}coexpression_results_{output_identifier}.pkl" args.config['embedding_filename'] = f"{filepath}embedding_results_{output_identifier}.pkl" # in case we want a custom output name for our output file if args.adata_output_name: args.config['sceodesic_adata_filename'] = args.adata_output_name else: args.config['sceodesic_adata_filename'] = f"{filepath}sceodesic_adata_results_{output_identifier}.h5ad" # run info file output run_info_file_fname = f"{filepath}run_info_{output_identifier}.yaml" results_coexp = None results_embedding = None # Data preprocessing. adata = load_input(args.inp_data) # Flag 1: Clustering if args.action <= 1: print("At FLAG 1: clustering") num_clusters = args.config['num_clusters'] stratify_cols = args.config['stratify_clustering_by_columns'] num_hvg = args.config['num_hvg'] n_init = args.config['n_init'] clustering_filename = args.config['clustering_filename'] clustering_results_dict = _get_cell_cohorts( adata, num_clusters, stratify_cols=stratify_cols, num_hvg=num_hvg, n_init=n_init, clustering_filename=clustering_filename, copy=False, return_results=True ) # Flag 2: Compute Covariances if args.action <= 2: print("At FLAG 2: compute covariances") # compute hvg num_hvg = args.config['num_hvg'] do_global_hvg = args.config['do_global_hvg'], num_hvg_per_cluster = args.config['num_hvg_per_cluster'] hvg_filename = args.config['hvg_filename'] top_genes, top_gene_names = _get_locally_variable_genes( adata, num_hvg, num_hvg_per_cluster=num_hvg_per_cluster, global_hvg=do_global_hvg, hvg_filename=hvg_filename, copy=False, return_results=True, clustering_results=clustering_results_dict ) # compute coexpression results max_condition_number = args.config['max_condition_number'] pvd_pct = args.config['pvd_pct'] coexpression_filename = args.config['coexpression_filename'] results_coexp = _estimate_covariances( adata, max_condition_number, pvd_pct=pvd_pct, coexpression_filename=coexpression_filename, copy=False, return_results=True, top_genes=top_gene_names, results_clustering=clustering_results_dict ) # Flag 3: Embeddings/Modules if args.action <= 3: print("At FLAG 3: common PCA") sparse_pca_lambda = args.config['sparse_pca_lambda'] embedding_filename = args.config['embedding_filename']
#!/usr/bin/env python ## package-specific modules # Default configuration DEFAULT_CONFIG = { 'num_clusters': 500, 'num_hvg': 300, 'max_condition_number': 50, 'sparse_pca_lambda': 0.03, 'stratify_clustering_by_columns': 'none', 'filepath': '', 'num_hvg_per_cluster': 100, 'pvd_pct': 0.90, 'do_global_hvg': False, # for very advanced users 'n_init': 1 # K-Means } def main(): args = parse_sceo_command_line_args(DEFAULT_CONFIG) ### TESTING ### print("config:", args.config) ### TESTING ### output_identifier = "%s_%d_hvgs_%s_clusters_%g_sparsity" % (args.output_prefix, args.num_hvg, str(args.config.get('num_clusters')), args.config.get('sparse_pca_lambda')) args.config['output_identifier'] = output_identifier filepath = args.config.get('filepath', DEFAULT_CONFIG['filepath']) + '/' #if the backslash is extra, it won't hurt args.config['clustering_filename'] = f"{filepath}clustering_results_{output_identifier}.pkl" args.config['hvg_filename'] = f"{filepath}hvg_results_{output_identifier}.pkl" args.config['coexpression_filename'] = f"{filepath}coexpression_results_{output_identifier}.pkl" args.config['embedding_filename'] = f"{filepath}embedding_results_{output_identifier}.pkl" # in case we want a custom output name for our output file if args.adata_output_name: args.config['sceodesic_adata_filename'] = args.adata_output_name else: args.config['sceodesic_adata_filename'] = f"{filepath}sceodesic_adata_results_{output_identifier}.h5ad" # run info file output run_info_file_fname = f"{filepath}run_info_{output_identifier}.yaml" results_coexp = None results_embedding = None # Data preprocessing. adata = load_input(args.inp_data) # Flag 1: Clustering if args.action <= 1: print("At FLAG 1: clustering") num_clusters = args.config['num_clusters'] stratify_cols = args.config['stratify_clustering_by_columns'] num_hvg = args.config['num_hvg'] n_init = args.config['n_init'] clustering_filename = args.config['clustering_filename'] clustering_results_dict = _get_cell_cohorts( adata, num_clusters, stratify_cols=stratify_cols, num_hvg=num_hvg, n_init=n_init, clustering_filename=clustering_filename, copy=False, return_results=True ) # Flag 2: Compute Covariances if args.action <= 2: print("At FLAG 2: compute covariances") # compute hvg num_hvg = args.config['num_hvg'] do_global_hvg = args.config['do_global_hvg'], num_hvg_per_cluster = args.config['num_hvg_per_cluster'] hvg_filename = args.config['hvg_filename'] top_genes, top_gene_names = _get_locally_variable_genes( adata, num_hvg, num_hvg_per_cluster=num_hvg_per_cluster, global_hvg=do_global_hvg, hvg_filename=hvg_filename, copy=False, return_results=True, clustering_results=clustering_results_dict ) # compute coexpression results max_condition_number = args.config['max_condition_number'] pvd_pct = args.config['pvd_pct'] coexpression_filename = args.config['coexpression_filename'] results_coexp = _estimate_covariances( adata, max_condition_number, pvd_pct=pvd_pct, coexpression_filename=coexpression_filename, copy=False, return_results=True, top_genes=top_gene_names, results_clustering=clustering_results_dict ) # Flag 3: Embeddings/Modules if args.action <= 3: print("At FLAG 3: common PCA") sparse_pca_lambda = args.config['sparse_pca_lambda'] embedding_filename = args.config['embedding_filename']
results_embedding = _reconstruct_programs(
7
2023-11-10 12:28:33+00:00
8k
iramluism/basel
tests/unit_tests/reports/reports_test.py
[ { "identifier": "Component", "path": "basel/components/components.py", "snippet": "class Component(metaclass=abc.ABCMeta):\n def __init__(\n self,\n name: str,\n nodes: List[Node] = None,\n instability: Optional[float] = 1,\n abstraction: Optional[float] = 1,\n error: Optional[float] = 1,\n ) -> None:\n self.name = name\n self.nodes = {}\n self.instability = instability\n self.abstraction = abstraction\n self.error = error\n\n for node in nodes or []:\n self.add_node(node)\n\n def set_error(self, error):\n self.error = error\n\n def set_instability(self, instability):\n self.instability = instability\n\n def set_abstraction(self, abstraction):\n self.abstraction = abstraction\n\n def get_classes(self):\n classes = []\n nodes = list(self.nodes.values())\n\n while nodes:\n node = nodes.pop(0)\n\n if not node:\n break\n\n children = node.get_children()\n nodes.extend(children)\n\n if isinstance(node, ClassNode):\n classes.append(node)\n\n return classes\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}:{self.name}>\"\n\n def has_node(self, node_name):\n return node_name in self.nodes\n\n def add_node(self, node: Node):\n self.nodes[node.name] = node\n\n def get_node(self, node_name):\n return self.nodes.get(node_name)\n\n def __iter__(self):\n for node in self.nodes.values():\n yield node\n\n def __eq__(self, component):\n if not component:\n return False\n\n equal_names = self.name == component.name\n\n for other_node in component:\n self_node = self.get_node(other_node.name)\n if other_node != self_node:\n return False\n\n return equal_names\n\n def __ne__(self, component):\n return not self.__eq__(component)" }, { "identifier": "Link", "path": "basel/components/links.py", "snippet": "class Link(metaclass=abc.ABCMeta):\n def __init__(self, source: Node, target: Node) -> None:\n self.source = source\n self.target = target\n\n def __eq__(self, other_link):\n return self.source == other_link.source and self.target == other_link.target" }, { "identifier": "Loader", "path": "basel/loaders/loaders.py", "snippet": "class Loader(metaclass=abc.ABCMeta):\n components: Dict[str, Component]\n\n def __init__(\n self,\n parser: Parser,\n components: Optional[List[Component]] = None,\n links: List[Link] = None,\n ) -> None:\n self.links = links or []\n self.parser = parser\n self.components = {}\n\n for comp in components or []:\n self.add_component(comp)\n\n @abc.abstractmethod\n def load_components(self, *args, **kwargs):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def load_links(self, *args, **kwargs):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def calculate_error(self):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def calculate_instability(self):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def calculate_abstraction(self):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def calculate_mean_error(self):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def calculate_mean_abstraction(self):\n raise NotImplementedError()\n\n @abc.abstractmethod\n def calculate_mean_instability(self):\n raise NotImplementedError()\n\n def get_component(self, component_name):\n return self.components.get(component_name)\n\n def get_components(self):\n return list(self.components.values())\n\n def add_component(self, component: Component):\n self.components[component.name] = component\n\n def link_component(self, source, target):\n link = Link(source, target)\n self.links.append(link)\n\n def get_links(self):\n return self.links" }, { "identifier": "ASReport", "path": "basel/reports/as_plane.py", "snippet": "class ASReport(Report):\n name: str = \"AS plane\"" }, { "identifier": "LinkReport", "path": "basel/reports/reports.py", "snippet": "class LinkReport(Report):\n name: str = \"Link Report\"" }, { "identifier": "Reporter", "path": "basel/reports/reporter.py", "snippet": "class Reporter:\n def __init__(self, loader: Optional[Loader] = None):\n self.set_loader(loader)\n\n def set_loader(self, loader):\n self._loader = loader\n\n def _filter(self, obj: dict, filters: ReportFilter):\n conditions = dict.fromkeys(filters)\n\n operations = {\n \"eq\": lambda a, b: a == b,\n \"not eq\": lambda a, b: a != b,\n \"match\": lambda a, b: pathlib.Path(a).match(b),\n \"match in\": lambda a, b: any(pathlib.Path(a).match(r) for r in b),\n \"gte\": lambda a, b: a >= b,\n \"lte\": lambda a, b: a <= b,\n \"lt\": lambda a, b: a < b,\n \"gt\": lambda a, b: a > b,\n }\n\n for filter_field, filter_value in filters.items():\n obj_value = getattr(obj, filter_field, None)\n\n op = \"eq\"\n if isinstance(filter_value, list):\n op, filter_value = filter_value\n\n operation = operations.get(op)\n if not operation:\n raise ValueError(\n f\"Not exists the operator {op},\"\n \"posibles value {list(operations.keys())}\"\n )\n\n conditions[filter_field] = operation(obj_value, filter_value)\n\n match_all_conditions = all(conditions.values())\n\n return match_all_conditions\n\n def get_component_links_report(\n self, filters: Optional[ReportFilter] = None\n ) -> LinkReport:\n self._loader.load_links()\n\n out_deps = {}\n for link in self._loader.get_links():\n deps = out_deps.get(link.source.name, [])\n deps.append(link.target.name)\n out_deps[link.source.name] = deps\n\n data = []\n\n labels = {}\n for idx, comp in enumerate(self._loader.get_components()):\n if filters and not self._filter(comp, filters):\n continue\n\n label = str(idx + 1)\n labels[comp.name] = label\n\n for eval_comp, idx in labels.items():\n row = [idx]\n for comp in labels.keys():\n value = 0\n deps = out_deps.get(comp, [])\n if eval_comp in deps:\n value = 1\n row.append(value)\n\n data.append(tuple(row))\n\n columns = [\"Components\"] + list(labels.values())\n\n footer = \"\\nLabels:\\n\"\n for comp_name, label in labels.items():\n footer += f\"{label}: {comp_name}\\n\"\n\n report = LinkReport(columns=columns, data=data, footer=footer)\n\n return report\n\n def get_as_report(self, filters: Optional[ReportFilter] = None) -> ASReport:\n data = []\n columns = [\"Component\", \"I\", \"A\", \"E\"]\n\n self._loader.calculate_abstraction()\n self._loader.calculate_instability()\n self._loader.calculate_error()\n\n components = self._loader.get_components()\n\n for component in components:\n if filters and not self._filter(component, filters):\n continue\n\n name = component.name\n instability = component.instability\n abstraction = component.abstraction\n error = component.error\n\n row = (name, instability, abstraction, error)\n\n data.append(row)\n\n mean_error = self._loader.calculate_mean_error()\n mean_abstraction = self._loader.calculate_mean_abstraction()\n mean_instability = self._loader.calculate_mean_instability()\n\n data.append(None)\n data.append((\"Mean\", mean_instability, mean_abstraction, mean_error))\n\n report = ASReport(\n columns=columns,\n data=data,\n )\n\n return report\n\n def format_report(\n self, report: Report, report_format: Optional[ReportFormat] = None\n ):\n _report_formats = {\n ReportFormat.BASIC: (self._format_basic, \"*\"),\n ReportFormat.HTML: (self._format_html, \"*\"),\n ReportFormat.MEAN_I: (self._format_instability_mean, [ASReport.name]),\n ReportFormat.MEAN_A: (self._format_abstraction_mean, [ASReport.name]),\n ReportFormat.MEAN_E: (self._format_error_mean, [ASReport.name]),\n ReportFormat.MEAN: (self._format_error_mean, [ASReport.name]),\n ReportFormat.UML: (self._format_uml, [LinkReport.name]),\n ReportFormat.UML_IMG: (self._format_uml_img, [LinkReport.name]),\n }\n\n if not report_format:\n report_format = ReportFormat.BASIC\n\n format_fn, available_reports = _report_formats.get(report_format)\n\n if available_reports != \"*\" and report.name not in available_reports:\n raise ValueError(\n f\"The format {report_format} is not valid for {report.name}\"\n )\n\n return format_fn(report)\n\n def _format_basic(self, report: Report) -> str:\n table = self._get_table_from_report(report, report_format=ReportFormat.BASIC)\n return table\n\n def _format_html(self, report: Report) -> str:\n table = self._get_table_from_report(report, report_format=ReportFormat.HTML)\n return table\n\n def _format_instability_mean(self, report: Report) -> str:\n totals = report.data[-1]\n i_total = totals[1]\n return i_total\n\n def _format_abstraction_mean(self, report: Report) -> str:\n totals = report.data[-1]\n i_total = totals[2]\n return i_total\n\n def _format_error_mean(self, report: Report) -> str:\n totals = report.data[-1]\n i_total = totals[3]\n return i_total\n\n def _format_uml(self, report: LinkReport) -> str:\n labels = {}\n if not report.footer:\n raise TypeError(\"Missing Footer\")\n\n for text in report.footer.split(\"\\n\"):\n if re.match(r\"^\\d+:.*$\", text):\n idx, comp_name = text.split(\": \")\n labels[idx] = comp_name\n\n source_depx_comp_idx = report.columns[1:]\n uml_staments = []\n\n for label, component in labels.items():\n uml_staments.append(f\"component [{component}]\")\n\n for deps in report.data:\n comp_name = deps[0]\n for idx, dep in enumerate(deps[1:]):\n if not dep:\n continue\n\n from_comp_idx = source_depx_comp_idx[idx]\n to_comp_idx = comp_name\n\n from_comp_name = labels.get(from_comp_idx)\n to_comp_name = labels.get(to_comp_idx)\n uml_staments.append(f\"[{from_comp_name}] --> [{to_comp_name}]\")\n\n uml_staments = [\"@startuml\"] + uml_staments + [\"@enduml\"]\n\n uml_text = \"\\n\".join(uml_staments)\n return uml_text\n\n def _format_uml_img(self, report):\n uml = self._format_uml(report)\n plant_uml = PlantUML(url=config.PLANTUML_URL)\n uml_img = plant_uml.processes(uml)\n return uml_img\n\n def _get_table_from_report(\n self, report: Report, report_format: ReportFormat = ReportFormat.BASIC\n ):\n _tabulate_formats = {\n ReportFormat.BASIC: \"simple\",\n ReportFormat.HTML: \"html\",\n }\n\n data = []\n report_data_len = len(report.data)\n for i, row in enumerate(report.data):\n if not row and (i + 1) != report_data_len:\n data.append(SEPARATING_LINE)\n else:\n data.append(list(row))\n\n tabulate_format = _tabulate_formats.get(report_format, ReportFormat.BASIC)\n table = tabulate(data, headers=report.columns, tablefmt=tabulate_format)\n\n if report.footer:\n table += report.footer\n\n return table" }, { "identifier": "ReportFormat", "path": "basel/reports/formats.py", "snippet": "class ReportFormat(str, Enum):\n BASIC = \"basic\"\n HTML = \"html\"\n MEAN_I = \"mean_i\"\n MEAN_A = \"mean_a\"\n MEAN_E = \"mean_e\"\n MEAN = \"mean\"\n UML = \"uml\"\n UML_IMG = \"img\"" } ]
from unittest.mock import Mock from basel.components import Component from basel.components import Link from basel.loaders import Loader from basel.reports import ASReport from basel.reports import LinkReport from basel.reports import Reporter from basel.reports import ReportFormat import pytest
4,401
(0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_B", 0, 1, 0), ("Component_C", 0.25, 0.5, 0.25), ("Component_D", 0.7, 0, 0.7), ("Component_E", 0, 0, 1), None, ("Mean", 0.39, 0.51, 0.59), ], ), None, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"name": ["match", "Component_A"]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_B", 0, 1, 0), ("Component_C", 0.25, 0.5, 0.25), ("Component_D", 0.7, 0, 0.7), ("Component_E", 0, 0, 1), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"name": ["match in", ["Component_*", "Component_E"]]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_B", 0, 1, 0), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"abstraction": 1}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_D", 0.7, 0, 0.7), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"instability": ["gte", 0.7]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_B", 0, 1, 0), ("Component_C", 0.25, 0.5, 0.25), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"error": ["lte", 0.5]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_C", 0.25, 0.5, 0.25), ("Component_D", 0.7, 0, 0.7), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"instability": ["not eq", 0]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_D", 0.7, 0, 0.7), ("Component_E", 0, 0, 1), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"error": ["gt", 0.5]}, ), ], ) def test_get_as_report(components, means, expected_report, filters):
MockComponent = Mock(spec=Component) MOCK_COMPONENTS_LIST = [ Component(name="Component_A", instability=1, abstraction=1, error=1), Component(name="Component_B", instability=0, abstraction=1, error=0), Component(name="Component_C", instability=0.25, abstraction=0.5, error=0.25), Component(name="Component_D", instability=0.7, abstraction=0, error=0.7), Component(name="Component_E", instability=0, abstraction=0, error=1), ] @pytest.mark.parametrize( "components,means,expected_report,filters", [ ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_B", 0, 1, 0), ("Component_C", 0.25, 0.5, 0.25), ("Component_D", 0.7, 0, 0.7), ("Component_E", 0, 0, 1), None, ("Mean", 0.39, 0.51, 0.59), ], ), None, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"name": ["match", "Component_A"]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_B", 0, 1, 0), ("Component_C", 0.25, 0.5, 0.25), ("Component_D", 0.7, 0, 0.7), ("Component_E", 0, 0, 1), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"name": ["match in", ["Component_*", "Component_E"]]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_B", 0, 1, 0), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"abstraction": 1}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_D", 0.7, 0, 0.7), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"instability": ["gte", 0.7]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_B", 0, 1, 0), ("Component_C", 0.25, 0.5, 0.25), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"error": ["lte", 0.5]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_C", 0.25, 0.5, 0.25), ("Component_D", 0.7, 0, 0.7), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"instability": ["not eq", 0]}, ), ( MOCK_COMPONENTS_LIST, (0.39, 0.51, 0.59), ASReport( columns=["Component", "I", "A", "E"], data=[ ("Component_A", 1, 1, 1), ("Component_D", 0.7, 0, 0.7), ("Component_E", 0, 0, 1), None, ("Mean", 0.39, 0.51, 0.59), ], ), {"error": ["gt", 0.5]}, ), ], ) def test_get_as_report(components, means, expected_report, filters):
mock_loader = Mock(spec=Loader)
2
2023-11-18 13:47:55+00:00
8k
KevinXu02/ControlledDreamGaussian
frankmocap/renderer/visualizer.py
[ { "identifier": "viewer2D", "path": "frankmocap/renderer/viewer2D.py", "snippet": "def __ValidateNumpyImg(inputImg):\ndef ImShow(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):\ndef ImgSC(inputImg, waitTime=1, bConvRGB2BGR=False,name='image', scale=1.0):\ndef Vis_Bbox_minmaxPt(inputImg, min_pt, max_pt, color=None):\ndef Vis_Bbox_XYXY(inputImg, bbox_xyxy, color=None):\ndef Vis_Bbox(inputImg, bbox_xyhw, color= None):\ndef Vis_Bbox_XYWH(inputImg, bbox_xyhw, color= None):\ndef Vis_CocoBbox(inputImg, coco_annot):\ndef Vis_CocoSkeleton(keypoints, image=None):\ndef Vis_Densepose(inputImg, coco_annot):\ndef Vis_Skeleton_2D_H36m(pt2d, image = None, color=None):\ndef Vis_Skeleton_2D_SMC19(pt2d, image = None, color=None):\ndef Vis_Skeleton_2D_SMC19(pt2d, image = None, color=None):\ndef Vis_Skeleton_2D_Hand(pt2d, image = None, color=None):\ndef Vis_Skeleton_2D_smplCOCO(pt2d, pt2d_visibility=None, image = None, blankImSize = 1000, bVis = False, color=None, bBoxWidth=None):\ndef Vis_Skeleton_2D_smpl24(pt2d, image = None, bVis = False, color=None):\ndef Vis_Skeleton_2D_smpl45(pt2d, image = None, bVis = False, color=None):\ndef Vis_Skeleton_2D_MPII(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):\ndef Vis_Skeleton_2D_foot(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):\ndef Vis_Skeleton_2D_Openpose25(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):\ndef Vis_Skeleton_2D_Openpose_hand(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):\ndef Vis_Skeleton_2D_Openpose18(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):\ndef Vis_Skeleton_2D_SPIN24(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):\ndef Vis_Skeleton_2D_SPIN49(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None):\ndef Vis_Skeleton_2D_coco(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None , offsetXY =None):\ndef Vis_Skeleton_2D_general(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None , offsetXY =None):\ndef Vis_Skeleton_3Dto2D_general(pt2d, pt2d_visibility = None, image = None, bVis = False, color=None, offsetXY =None):\n DP = dp_utils.DensePoseMethods()" }, { "identifier": "glViewer", "path": "frankmocap/renderer/glViewer.py", "snippet": "ROOT_OFFSET_DIST = 160\nBACKGROUND_IMAGE_PLANE_DEPTH=500\nHOLDEN_DATA_SCALING = 5\n K = np.array([[2000, 0, 960],[0, 2000, 540],[0,0,1]]) #MTC default camera. for 1920 x 1080 input image\n K = g_camView_K.copy()\n K = K/Kscale\n P = np.array([-texWidth, -texHeight, d])\n P = [texWidth, -texHeight, d]\n P = [texWidth, texHeight, d]\n P = [-texWidth, texHeight, d]\n P = np.array([0, 0, 1])\n P = np.matmul(K_inv,P)\n P = P / P[2]\n P = [texWidth, 0, 1]\n P = np.matmul(K_inv,P)\n P = P / P[2]\n P = [texWidth, texHeight, 1]\n P = np.matmul(K_inv,P)\n P = P / P[2]\n P = [0, texHeight, 1]\n P = np.matmul(K_inv,P)\n P = P / P[2]\n ROT_PIVOT = np.array([0.003501, 0.475611, 0.115576])\n X = np.load('/ssd/codes/pytorch_motionSynth/motionsynth_data/data/processed/panoptic_npz/' + fileName)['clips'] #(17944, 240, 73)\n X = np.swapaxes(X, 1, 2).astype(np.float32) #(17944, 73, 240)\ndef init():\ndef init_minimum():\ndef RenderText(testStr):\ndef RenderDomeFloor():\ndef setFree3DView():\ndef setCamView_K(K):\ndef setCamView_K_DefaultForMTC():\ndef setCameraView():\ndef SetOrthoCamera(bOrtho=True):\ndef setCameraViewOrth():\ndef setRenderOutputSize(imWidth, imHeight):\ndef setWindowSize(new_width, new_height):\ndef reshape(width, height):\ndef SaveScenesToFile():\ndef SaveCamViewInfo():\ndef LoadCamViewInfo():\ndef PuttingObjectCenter():\ndef keyboard(key, x, y):\ndef mouse(button, state, x, y):\ndef motion(x, y):\ndef setBackgroundTexture(img):\ndef SetCameraPoses(camRots, camPoses):\ndef SetPtCloud(ptCloud, ptCloudColor = None):\ndef DrawBackgroundOrth():\ndef DrawBackground():\ndef specialkeys(key, x, y):\ndef init_gl_util():\ndef init_gl(maxIter=-10):\ndef DrawFaces():\ndef DrawHands():\ndef drawface_70(joints, color):\ndef SetMeshColor(colorName='blue'):\ndef DrawMeshes():\ndef drawNormal(normal3D, rootPt, color, normalLength=40):\ndef drawhand_21(joints, color, normal=None):\ndef DrawPosOnly():\ndef DrawTrajectory():\ndef DrawSkeletons():\ndef DrawSkeletonsGT():\ndef drawbody_LSP14(joints, color):\ndef drawbody_SMPLCOCO19(joints, color, normal=None):\ndef drawbody_SMC19(joints, color, normal=None):\ndef drawbody_SMPLCOCO_TotalCap26(joints, color, normal=None):\ndef drawbody_joint14(joints, color, normal=None):\ndef RenderString(str):\ndef draw_speaking_joint19(joints, bSpeak, word, color, normal=None):\ndef draw_speaking_joint22(joints, bSpeak, word, color, offset=20, normal=None):\ndef draw_speaking_general(facePt, bSpeak, word, color, offsetLength=20, offset=None):\ndef drawbody_joint_ptOnly(joints, color, normal=None):\ndef drawbody_joint32_human36m(joints, color, normal=None):\ndef drawbody_joint17_human36m(joints, color, normal=None):\ndef drawbody_joint24_smplLBS(joints, color, normal=None):\ndef drawbody_joint31(joints, color, normal=None):\ndef drawbody_jointAdam(joints, color, normal=None, ignore_root=False):\ndef drawbody_jointMTC86(joints, color, normal=None, ignore_root=False):\ndef drawbody_jointSpin49(joints, color, normal=None, ignore_root=False):\ndef drawbody_jointOpenPose18(joints, color, normal=None, ignore_root=False):\ndef drawbody_jointSpin24(joints, color, normal=None, ignore_root=False):\ndef drawhand_joint21(joints, color, normal=None, ignore_root=False, type = 'hand_smplx'):\ndef drawbody_joint22(joints, color, normal=None, ignore_root=False):\ndef set_Holden_Trajectory_3(traj_list, initRot = None, initTrans=None ):\ndef setSpeech_binary(speech_list):\ndef setSpeechGT_binary(speech_list):\ndef setSpeech(speech_list):\ndef setSpeech_withRoot(speech_list, root_list):\ndef setSpeechGT(speech_list):\ndef showFace(face_list):\ndef setFace(face_list):\ndef setFaceNormal(faceNormal_list):\ndef setBodyNormal(bodyNormal_list):\ndef setPosOnly(pos_list):\ndef setHand_left(hand_list):\ndef setHand_right(hand_list):\ndef setTrajectory(traj_list):\ndef addSkeleton(skel_list, jointType=None, colorRGB=None): \ndef resetSkeleton(): \ndef setSkeleton(skel_list, jointType=None, colorRGB=None, bReset= True):\ndef showSkeleton(skel_list):\ndef setMeshData(mesh_list, bComputeNormal = False):\ndef setFrameLimit():\ndef resetFrameLimit():\ndef resetMeshData(): \ndef addMeshData(mesh_list, bComputeNormal = False):\ndef setFaceParmData(faceParam_list, bComputeNormal = True):\ndef setFaceParmDataWithTrans(faceParam_list, bComputeNormal = True, trans= None, rot = None):\ndef ComputeFaceNormal(face_list):\ndef ComputeBodyNormal_panoptic(body_list):\ndef getFaceRootCenter():\ndef GetFaceMesh(faceModel, faceParam_list, bComputeNormal = True, bApplyRot = False, bApplyTrans = False, bShowFaceId = False, bApplyRotFlip=False):\ndef LoadHagglingDataKeypoints(fileName):\ndef LoadHagglingData(fileName):\ndef LoadHagglingData_Caller():\ndef setSaveOnlyMode(mode):\ndef setSave(mode):\ndef setSaveFolderName(folderName):\ndef setSaveImgName(imgName):\ndef LoadObjMesh(filename):\ndef setupRotationView():\ndef DrawPyramid(camWidth,camHeight,camDepth,lineWith=1):\ndef DrawPtCloud():\ndef DrawCameras():\ndef renderscene():\ndef show_SMPL_sideView(bSaveToFile = False, bResetSaveImgCnt=True, countImg = True, bReturnRendered= True):\ndef show_SMPL_youtubeView(bSaveToFile = False, bResetSaveImgCnt=True, countImg = True, zoom = 230, bReturnRendered= False):\ndef show_SMPL_cameraView(bSaveToFile = False, bResetSaveImgCnt=True, countImg = True, bShowBG = True, bReturnRendered= False):\ndef show_SMPL(bSaveToFile = False, bResetSaveImgCnt = True, countImg = True, bShowBG = True, zoom = 230, mode = 'camera'):\ndef render_on_image(saveDir, saveFileName, inputImage, scaleFactor=1, is_showBackground=True):\ndef VisSkeleton_single(skel):\ndef setNearPlane(p):\ndef show(maxIter=-10):" }, { "identifier": "meshRenderer", "path": "frankmocap/renderer/meshRenderer.py", "snippet": "class meshRenderer(glRenderer):\n\n def __init__(self, width=1600, height=1200, name='GL Renderer',\n # program_files=['renderer/shaders/simple140.fs', 'renderer/shaders/simple140.vs'],\n # program_files=['renderer/shaders/normal140.fs', 'renderer/shaders/normal140.vs'],\n # program_files=['renderer/shaders/geo140.fs', 'renderer/shaders/geo140.vs'],\n render_mode =\"normal\", #color, geo, normal\n color_size=1, ms_rate=1):\n\n self.render_mode = render_mode\n self.program_files ={}\n self.program_files['color'] = ['renderer/shaders/simple140.fs', 'renderer/shaders/simple140.vs']\n self.program_files['normal'] = ['renderer/shaders/normal140.fs', 'renderer/shaders/normal140.vs']\n self.program_files['geo'] = ['renderer/shaders/colorgeo140.fs', 'renderer/shaders/colorgeo140.vs']\n\n glRenderer.__init__(self, width, height, name, self.program_files[render_mode], color_size, ms_rate)\n\n def setRenderMode(self, render_mode):\n \"\"\"\n Set render mode among ['color', 'normal', 'geo']\n \"\"\"\n if self.render_mode == render_mode:\n return\n \n self.render_mode = render_mode\n self.initShaderProgram(self.program_files[render_mode])\n\n\n def drawMesh(self):\n\n if self.vertex_dim is None:\n return\n # self.draw_init()\n\n glColor3f(1,1,0)\n glUseProgram(self.program)\n \n mvMat = glGetFloatv(GL_MODELVIEW_MATRIX)\n pMat = glGetFloatv(GL_PROJECTION_MATRIX)\n # mvpMat = pMat*mvMat\n\n self.model_view_matrix = mvMat\n self.projection_matrix = pMat\n\n # glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE, self.model_view_matrix.transpose())\n # glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE, self.projection_matrix.transpose())\n glUniformMatrix4fv(self.model_mat_unif, 1, GL_FALSE, self.model_view_matrix)\n glUniformMatrix4fv(self.persp_mat_unif, 1, GL_FALSE, self.projection_matrix)\n\n # Handle vertex buffer\n glBindBuffer(GL_ARRAY_BUFFER, self.vertex_buffer)\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, self.vertex_dim, GL_DOUBLE, GL_FALSE, 0, None)\n\n # # Handle normal buffer\n glBindBuffer(GL_ARRAY_BUFFER, self.normal_buffer)\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 3, GL_DOUBLE, GL_FALSE, 0, None)\n\n # # Handle color buffer\n glBindBuffer(GL_ARRAY_BUFFER, self.color_buffer)\n glEnableVertexAttribArray(2)\n glVertexAttribPointer(2, 3, GL_DOUBLE, GL_FALSE, 0, None)\n \n\n if True:#self.meshindex_data:\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.index_buffer) #Note \"GL_ELEMENT_ARRAY_BUFFER\" instead of GL_ARRAY_BUFFER\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, self.meshindex_data, GL_STATIC_DRAW)\n\n # glDrawArrays(GL_TRIANGLES, 0, self.n_vertices)\n glDrawElements(GL_TRIANGLES, len(self.meshindex_data), GL_UNSIGNED_INT, None) #For index array (mesh face data)\n glDisableVertexAttribArray(0)\n glBindBuffer(GL_ARRAY_BUFFER, 0)\n\n glUseProgram(0)" }, { "identifier": "convert_smpl_to_bbox", "path": "frankmocap/mocap_utils/coordconv.py", "snippet": "def convert_smpl_to_bbox(data3D, scale, trans, bAppTransFirst=False):\n data3D = data3D.copy()\n resnet_input_size_half = 224 *0.5\n if bAppTransFirst: # Hand model\n data3D[:,0:2] += trans\n data3D *= scale # apply scaling\n else:\n data3D *= scale # apply scaling\n data3D[:,0:2] += trans\n \n data3D*= resnet_input_size_half # 112 is originated from hrm's input size (224,24)\n # data3D[:,:2]*= resnet_input_size_half # 112 is originated from hrm's input size (224,24)\n return data3D" }, { "identifier": "convert_bbox_to_oriIm", "path": "frankmocap/mocap_utils/coordconv.py", "snippet": "def convert_bbox_to_oriIm(data3D, boxScale_o2n, bboxTopLeft, imgSizeW, imgSizeH):\n data3D = data3D.copy()\n resnet_input_size_half = 224 *0.5\n imgSize = np.array([imgSizeW,imgSizeH])\n\n data3D /= boxScale_o2n\n\n if not isinstance(bboxTopLeft, np.ndarray):\n assert isinstance(bboxTopLeft, tuple)\n assert len(bboxTopLeft) == 2\n bboxTopLeft = np.array(bboxTopLeft)\n\n data3D[:,:2] += (bboxTopLeft + resnet_input_size_half/boxScale_o2n)\n\n return data3D" }, { "identifier": "draw_raw_bbox", "path": "frankmocap/renderer/image_utils.py", "snippet": "def draw_raw_bbox(img, bboxes):\n img = img.copy()\n for bbox in bboxes:\n x0, y0, w, h = bbox\n bbox_xyxy = (x0, y0, x0+w, y0+h)\n img = draw_bbox(img, bbox_xyxy)\n return img" }, { "identifier": "draw_hand_bbox", "path": "frankmocap/renderer/image_utils.py", "snippet": "def draw_hand_bbox(img, hand_bbox_list):\n img = img.copy()\n for hand_bboxes in hand_bbox_list:\n if hand_bboxes is not None:\n for key in hand_bboxes:\n bbox = hand_bboxes[key]\n if bbox is not None:\n x0, y0, w, h = bbox\n bbox_new = (x0, y0, x0+w, y0+h)\n color = (255, 0, 0) if key == 'left_hand' else (0, 255, 0)\n img = draw_bbox(img, bbox_new, color=color)\n return img" }, { "identifier": "draw_body_bbox", "path": "frankmocap/renderer/image_utils.py", "snippet": "def draw_body_bbox(img, body_bbox_list):\n img = img.copy()\n for body_bbox in body_bbox_list:\n if body_bbox is not None:\n x0, y0, w, h = body_bbox\n img = draw_bbox(img, (x0, y0, x0+w, y0+h))\n return img" }, { "identifier": "draw_arm_pose", "path": "frankmocap/renderer/image_utils.py", "snippet": "def draw_arm_pose(img, body_pose_list):\n img = img.copy()\n for body_pose in body_pose_list:\n # left & right arm\n img = draw_keypoints(\n img, body_pose[6:8, :], radius=10, color=(255, 0, 0))\n img = draw_keypoints(\n img, body_pose[3:5, :], radius=10, color=(0, 0, 255))\n return img" } ]
import numpy as np import cv2 import torch from frankmocap.renderer import viewer2D#, glViewer, glRenderer from frankmocap.renderer import glViewer from frankmocap.renderer import meshRenderer #glRenderer from frankmocap.mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm from frankmocap.renderer.image_utils import draw_raw_bbox, draw_hand_bbox, draw_body_bbox, draw_arm_pose from renderer import glViewer #glRenderer
4,856
# Copyright (c) Facebook, Inc. and its affiliates. """ Visualizing 3D humans via Opengl - Options: GUI mode: a screen is required Scnreenless mode: xvfb-run can be used to avoid screen requirement """ class Visualizer(object): """ Visualizer to visuzlie SMPL reconstruction output from HMR family (HMR, SPIN, EFT) Args: reconstruction output rawImg, bbox, smpl_params (shape, pose, cams ) """ def __init__( self, rendererType ='opengl_gui' #nongui or gui ): self.rendererType = rendererType if rendererType != "opengl_gui" and rendererType!= "opengl": print("Wrong rendererType: {rendererType}") assert False self.cam_all = [] self.vert_all = [] self.bboxXYXY_all = [] self.bg_image = None #Screenless rendering if rendererType =='opengl': self.renderer = meshRenderer.meshRenderer() self.renderer.setRenderMode('geo') self.renderer.offscreenMode(True) else: self.renderer = None #Output rendering self.renderout = None # def setSMPLParam(self, smpl_vertices, cam, bbox_xyxy): # """ # smpl_vertices: (6890,3) # cam: (3,) # bbox_xyxy: (3,) # """ # self.cam_all.append(smpl_vertices) # self.vert_all.append(cam) # self.bboxXYXY_all.append(bbox_xyxy) # def setImg(self, image): # self.bg_image = image # def setWindowSize(width_, height_): # if self.rendererType=="gui": # glViewer.setWindowSize(width_, height_) # else: # assert False def visualize(self, input_img, hand_bbox_list = None, body_bbox_list = None, body_pose_list = None, raw_hand_bboxes = None, pred_mesh_list = None, vis_raw_hand_bbox = True, vis_body_pose = True, vis_hand_bbox = True, ): # init res_img = input_img.copy() # draw raw hand bboxes if raw_hand_bboxes is not None and vis_raw_hand_bbox:
# Copyright (c) Facebook, Inc. and its affiliates. """ Visualizing 3D humans via Opengl - Options: GUI mode: a screen is required Scnreenless mode: xvfb-run can be used to avoid screen requirement """ class Visualizer(object): """ Visualizer to visuzlie SMPL reconstruction output from HMR family (HMR, SPIN, EFT) Args: reconstruction output rawImg, bbox, smpl_params (shape, pose, cams ) """ def __init__( self, rendererType ='opengl_gui' #nongui or gui ): self.rendererType = rendererType if rendererType != "opengl_gui" and rendererType!= "opengl": print("Wrong rendererType: {rendererType}") assert False self.cam_all = [] self.vert_all = [] self.bboxXYXY_all = [] self.bg_image = None #Screenless rendering if rendererType =='opengl': self.renderer = meshRenderer.meshRenderer() self.renderer.setRenderMode('geo') self.renderer.offscreenMode(True) else: self.renderer = None #Output rendering self.renderout = None # def setSMPLParam(self, smpl_vertices, cam, bbox_xyxy): # """ # smpl_vertices: (6890,3) # cam: (3,) # bbox_xyxy: (3,) # """ # self.cam_all.append(smpl_vertices) # self.vert_all.append(cam) # self.bboxXYXY_all.append(bbox_xyxy) # def setImg(self, image): # self.bg_image = image # def setWindowSize(width_, height_): # if self.rendererType=="gui": # glViewer.setWindowSize(width_, height_) # else: # assert False def visualize(self, input_img, hand_bbox_list = None, body_bbox_list = None, body_pose_list = None, raw_hand_bboxes = None, pred_mesh_list = None, vis_raw_hand_bbox = True, vis_body_pose = True, vis_hand_bbox = True, ): # init res_img = input_img.copy() # draw raw hand bboxes if raw_hand_bboxes is not None and vis_raw_hand_bbox:
res_img = draw_raw_bbox(input_img, raw_hand_bboxes)
5
2023-11-17 05:21:26+00:00
8k
dazhangyu123/OCL
train_source.py
[ { "identifier": "Eval", "path": "utils/eval.py", "snippet": "class Eval():\n def __init__(self, num_class):\n self.num_class = num_class\n self.confusion_matrix = np.zeros((self.num_class,)*2)\n self.ignore_index = None\n self.synthia = True if num_class == 16 else False\n\n\n def Pixel_Accuracy(self):\n if np.sum(self.confusion_matrix) == 0:\n print(\"Attention: pixel_total is zero!!!\")\n PA = 0\n else:\n PA = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()\n\n return PA\n\n def Mean_Pixel_Accuracy(self, out_16_13=False):\n MPA = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)\n if self.synthia:\n MPA_16 = np.nanmean(MPA[:self.ignore_index])\n MPA_13 = np.nanmean(MPA[synthia_set_16_to_13])\n return MPA_16, MPA_13\n if out_16_13:\n MPA_16 = np.nanmean(MPA[synthia_set_16])\n MPA_13 = np.nanmean(MPA[synthia_set_13])\n return MPA_16, MPA_13\n MPA = np.nanmean(MPA[:self.ignore_index])\n\n return MPA\n\n def Mean_Intersection_over_Union(self, out_16_13=False):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n if self.synthia:\n MIoU_16 = np.nanmean(MIoU[:self.ignore_index])\n MIoU_13 = np.nanmean(MIoU[synthia_set_16_to_13])\n return MIoU_16, MIoU_13\n if out_16_13:\n MIoU_16 = np.nanmean(MIoU[synthia_set_16])\n MIoU_13 = np.nanmean(MIoU[synthia_set_13])\n return MIoU_16, MIoU_13\n MIoU = np.nanmean(MIoU[:self.ignore_index])\n\n return MIoU\n\n def Frequency_Weighted_Intersection_over_Union(self, out_16_13=False):\n FWIoU = np.multiply(np.sum(self.confusion_matrix, axis=1), np.diag(self.confusion_matrix))\n FWIoU = FWIoU / (np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n if self.synthia:\n FWIoU_16 = np.sum(i for i in FWIoU if not np.isnan(i)) / np.sum(self.confusion_matrix)\n FWIoU_13 = np.sum(i for i in FWIoU[synthia_set_16_to_13] if not np.isnan(i)) / np.sum(self.confusion_matrix)\n return FWIoU_16, FWIoU_13\n if out_16_13:\n FWIoU_16 = np.sum(i for i in FWIoU[synthia_set_16] if not np.isnan(i)) / np.sum(self.confusion_matrix)\n FWIoU_13 = np.sum(i for i in FWIoU[synthia_set_13] if not np.isnan(i)) / np.sum(self.confusion_matrix)\n return FWIoU_16, FWIoU_13\n FWIoU = sum(i for i in FWIoU if not np.isnan(i)) / np.sum(self.confusion_matrix)\n\n return FWIoU\n\n def Mean_Precision(self, out_16_13=False):\n Precision = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=0)\n if self.synthia:\n Precision_16 = np.nanmean(Precision[:self.ignore_index])\n Precision_13 = np.nanmean(Precision[synthia_set_16_to_13])\n return Precision_16, Precision_13\n if out_16_13:\n Precision_16 = np.nanmean(Precision[synthia_set_16])\n Precision_13 = np.nanmean(Precision[synthia_set_13])\n return Precision_16, Precision_13\n Precision = np.nanmean(Precision[:self.ignore_index])\n return Precision\n \n def Print_Every_class_Eval(self, out_16_13=False):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n MPA = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)\n Precision = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=0)\n Class_ratio = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)\n Pred_retio = np.sum(self.confusion_matrix, axis=0) / np.sum(self.confusion_matrix)\n print('===>Everyclass:\\t' + 'MPA\\t' + 'MIoU\\t' + 'PC\\t' + 'Ratio\\t' + 'Pred_Retio')\n if out_16_13: MIoU = MIoU[synthia_set_16]\n for ind_class in range(len(MIoU)):\n pa = str(round(MPA[ind_class] * 100, 2)) if not np.isnan(MPA[ind_class]) else 'nan'\n iou = str(round(MIoU[ind_class] * 100, 2)) if not np.isnan(MIoU[ind_class]) else 'nan'\n pc = str(round(Precision[ind_class] * 100, 2)) if not np.isnan(Precision[ind_class]) else 'nan'\n cr = str(round(Class_ratio[ind_class] * 100, 2)) if not np.isnan(Class_ratio[ind_class]) else 'nan'\n pr = str(round(Pred_retio[ind_class] * 100, 2)) if not np.isnan(Pred_retio[ind_class]) else 'nan'\n print('===>' + name_classes[ind_class] + ':\\t' + pa + '\\t' + iou + '\\t' + pc + '\\t' + cr + '\\t' + pr)\n\n def Get_class_ratio(self):\n MIoU = np.diag(self.confusion_matrix) / (\n np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -\n np.diag(self.confusion_matrix))\n Class_ratio = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)\n Pred_retio = np.sum(self.confusion_matrix, axis=0) / np.sum(self.confusion_matrix)\n return MIoU, Class_ratio, Pred_retio\n\n # generate confusion matrix\n def __generate_matrix(self, gt_image, pre_image):\n\n mask = (gt_image >= 0) & (gt_image < self.num_class)\n label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]\n count = np.bincount(label, minlength=self.num_class**2)\n confusion_matrix = count.reshape(self.num_class, self.num_class)\n return confusion_matrix\n\n def add_batch(self, gt_image, pre_image):\n # assert the size of two images are same\n assert gt_image.shape == pre_image.shape\n\n self.confusion_matrix += self.__generate_matrix(gt_image, pre_image)\n\n def reset(self):\n self.confusion_matrix = np.zeros((self.num_class,) * 2)" }, { "identifier": "get_model", "path": "utils/train_helper.py", "snippet": "def get_model(args):\n if args.backbone == \"deeplabv2_multi\":\n model = DeeplabMulti(num_classes=args.num_classes,\n pretrained=args.imagenet_pretrained)\n params = model.optim_parameters(args)\n args.numpy_transform = True\n return model, params" }, { "identifier": "City_Dataset", "path": "datasets/cityscapes_Dataset.py", "snippet": "class City_Dataset(data.Dataset):\n def __init__(self,\n args,\n data_root_path='/data/zyl/dataset/cityscapes',\n list_path=os.path.abspath('./datasets/city_list'),\n split='train',\n base_size=769,\n crop_size=769,\n training=True,\n class_16=False,\n class_13=False):\n \"\"\"\n\n :param root_path:\n :param dataset:\n :param base_size:\n :param is_trainging:\n :param transforms:\n \"\"\"\n self.args = args\n self.data_path=data_root_path\n self.list_path=list_path\n self.split=split\n self.base_size=base_size\n self.crop_size=crop_size\n\n self.base_size = self.base_size if isinstance(self.base_size, tuple) else (self.base_size, self.base_size)\n self.crop_size = self.crop_size if isinstance(self.crop_size, tuple) else (self.crop_size, self.crop_size)\n self.training = training\n\n self.random_mirror = args.random_mirror\n self.random_crop = args.random_crop\n self.resize = args.resize\n self.gaussian_blur = args.gaussian_blur\n\n item_list_filepath = os.path.join(self.list_path, self.split+\".txt\")\n if not os.path.exists(item_list_filepath):\n raise Warning(\"split must be train/val/trainval\")\n\n self.image_filepath = os.path.join(self.data_path, \"leftImg8bit\")\n\n self.gt_filepath = os.path.join(self.data_path, \"gtFine\")\n\n self.items = [id.strip() for id in open(item_list_filepath)]\n\n ignore_label = -1\n self.id_to_trainid = {-1: ignore_label, 0: ignore_label, 1: ignore_label, 2: ignore_label,\n 3: ignore_label, 4: ignore_label, 5: ignore_label, 6: ignore_label,\n 7: 0, 8: 1, 9: ignore_label, 10: ignore_label, 11: 2, 12: 3, 13: 4,\n 14: ignore_label, 15: ignore_label, 16: ignore_label, 17: 5,\n 18: ignore_label, 19: 6, 20: 7, 21: 8, 22: 9, 23: 10, 24: 11, 25: 12, 26: 13, 27: 14,\n 28: 15, 29: ignore_label, 30: ignore_label, 31: 16, 32: 17, 33: 18}\n # In SYNTHIA-to-Cityscapes case, only consider 16 shared classes\n self.class_16 = class_16\n synthia_set_16 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18]\n self.trainid_to_16id = {id:i for i,id in enumerate(synthia_set_16)}\n # In Cityscapes-to-NTHU case, only consider 13 shared classes\n self.class_13 = class_13\n synthia_set_13 = [0, 1, 2, 6, 7, 8, 10, 11, 12, 13, 15, 17, 18]\n self.trainid_to_13id = {id:i for i,id in enumerate(synthia_set_13)}\n \n print(\"{} num images in Cityscapes {} set have been loaded.\".format(len(self.items), self.split))\n if self.args.numpy_transform:\n print(\"use numpy_transform, instead of tensor transform!\")\n\n def id2trainId(self, label, reverse=False, ignore_label=-1):\n label_copy = ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.id_to_trainid.items():\n label_copy[label == k] = v\n if self.class_16:\n label_copy_16 = ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.trainid_to_16id.items():\n label_copy_16[label_copy == k] = v\n label_copy = label_copy_16\n if self.class_13:\n label_copy_13 = ignore_label * np.ones(label.shape, dtype=np.float32)\n for k, v in self.trainid_to_13id.items():\n label_copy_13[label_copy == k] = v\n label_copy = label_copy_13\n return label_copy\n\n def __getitem__(self, item):\n id = self.items[item]\n filename = id.split(\"train_\")[-1].split(\"val_\")[-1].split(\"test_\")[-1]\n image_filepath = os.path.join(self.image_filepath, id.split(\"_\")[0], id.split(\"_\")[1])\n image_filename = filename + \"_leftImg8bit.png\"\n image_path = os.path.join(image_filepath, image_filename)\n image = Image.open(image_path).convert(\"RGB\")\n\n gt_filepath = os.path.join(self.gt_filepath, id.split(\"_\")[0], id.split(\"_\")[1])\n gt_filename = filename + \"_gtFine_labelIds.png\"\n gt_image_path = os.path.join(gt_filepath, gt_filename)\n gt_image = Image.open(gt_image_path)\n\n if (self.split == \"train\" or self.split == \"trainval\") and self.training:\n image, gt_image = self._train_sync_transform(image, gt_image)\n else:\n image, gt_image = self._val_sync_transform(image, gt_image)\n\n return image, gt_image, item\n\n def _train_sync_transform(self, img, mask):\n '''\n :param image: PIL input image\n :param gt_image: PIL input gt_image\n :return:\n '''\n if self.random_mirror:\n # random mirror\n if random.random() < 0.5:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if mask: mask = mask.transpose(Image.FLIP_LEFT_RIGHT)\n crop_w, crop_h = self.crop_size\n\n if self.random_crop:\n # random scale\n base_w , base_h = self.base_size\n w, h = img.size\n assert w >= h\n if (base_w / w) > (base_h / h):\n base_size = base_w \n short_size = random.randint(int(base_size * 0.5), int(base_size * 2.0))\n ow = short_size\n oh = int(1.0 * h * ow / w)\n else:\n base_size = base_h\n short_size = random.randint(int(base_size * 0.5), int(base_size * 2.0))\n oh = short_size\n ow = int(1.0 * w * oh / h)\n\n img = img.resize((ow, oh), Image.BICUBIC)\n if mask: mask = mask.resize((ow, oh), Image.NEAREST)\n # pad crop\n if ow < crop_w or oh < crop_h:\n padh = crop_h - oh if oh < crop_h else 0\n padw = crop_w - ow if ow < crop_w else 0\n img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)\n if mask: mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)\n # random crop crop_size\n w, h = img.size\n x1 = random.randint(0, w - crop_w)\n y1 = random.randint(0, h - crop_h)\n img = img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n if mask: mask = mask.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n elif self.resize:\n img = img.resize(self.crop_size, Image.BICUBIC)\n if mask: mask = mask.resize(self.crop_size, Image.NEAREST)\n \n if self.gaussian_blur:\n # gaussian blur as in PSP\n if random.random() < 0.5:\n img = img.filter(ImageFilter.GaussianBlur(\n radius=random.random()))\n # final transform\n if mask: \n img, mask = self._img_transform(img), self._mask_transform(mask)\n return img, mask\n else:\n img = self._img_transform(img)\n return img\n\n def _val_sync_transform(self, img, mask):\n if self.random_crop:\n crop_w, crop_h = self.crop_size\n w, h = img.size\n if crop_w / w < crop_h / h:\n oh = crop_h\n ow = int(1.0 * w * oh / h)\n else:\n ow = crop_w\n oh = int(1.0 * h * ow / w)\n img = img.resize((ow, oh), Image.BICUBIC)\n mask = mask.resize((ow, oh), Image.NEAREST)\n # center crop\n w, h = img.size\n x1 = int(round((w - crop_w) / 2.))\n y1 = int(round((h - crop_h) / 2.))\n img = img.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n mask = mask.crop((x1, y1, x1 + crop_w, y1 + crop_h))\n elif self.resize:\n img = img.resize(self.crop_size, Image.BICUBIC)\n mask = mask.resize(self.crop_size, Image.NEAREST)\n\n # final transform\n img, mask = self._img_transform(img), self._mask_transform(mask)\n return img, mask\n\n def _img_transform(self, image):\n if self.args.numpy_transform:\n image = np.asarray(image, np.float32)\n image = image[:, :, ::-1] # change to BGR\n image -= IMG_MEAN\n image = image.transpose((2, 0, 1)).copy() # (C x H x W)\n new_image = torch.from_numpy(image)\n else:\n image_transforms = ttransforms.Compose([\n ttransforms.ToTensor(),\n ttransforms.Normalize([.485, .456, .406], [.229, .224, .225]),\n ])\n new_image = image_transforms(image)\n return new_image\n\n def _mask_transform(self, gt_image):\n target = np.asarray(gt_image, np.float32)\n target = self.id2trainId(target).copy()\n target = torch.from_numpy(target)\n\n return target\n\n def __len__(self):\n return len(self.items)" }, { "identifier": "City_DataLoader", "path": "datasets/cityscapes_Dataset.py", "snippet": "class City_DataLoader():\n def __init__(self, args, training=True):\n\n self.args = args\n\n data_set = City_Dataset(args, \n data_root_path='/mnt/Xsky/zyl/dataset/cityscapes',\n list_path='./datasets/city_list',\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training,\n class_16=args.class_16,\n class_13=args.class_13)\n\n if (self.args.split == \"train\" or self.args.split == \"trainval\") and training:\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n\n val_set = City_Dataset(args, \n data_root_path='./datasets/Cityscapes',\n list_path='./datasets/city_list',\n split='val',\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False,\n class_16=args.class_16,\n class_13=args.class_13)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size" }, { "identifier": "inv_preprocess", "path": "datasets/cityscapes_Dataset.py", "snippet": "def inv_preprocess(imgs, num_images=1, img_mean=IMG_MEAN, numpy_transform=False):\n \"\"\"Inverse preprocessing of the batch of images.\n \n Args:\n imgs: batch of input images.\n num_images: number of images to apply the inverse transformations on.\n img_mean: vector of mean colour values.\n numpy_transform: whether change RGB to BGR during img_transform.\n \n Returns:\n The batch of the size num_images with the same spatial dimensions as the input.\n \"\"\"\n if numpy_transform:\n imgs = flip(imgs, 1)\n def norm_ip(img, min, max):\n img.clamp_(min=min, max=max)\n img.add_(-min).div_(max - min + 1e-5)\n norm_ip(imgs, float(imgs.min()), float(imgs.max()))\n return imgs" }, { "identifier": "decode_labels", "path": "datasets/cityscapes_Dataset.py", "snippet": "def decode_labels(mask, num_images=1, num_classes=NUM_CLASSES):\n \"\"\"Decode batch of segmentation masks.\n \n Args:\n mask: result of inference after taking argmax.\n num_images: number of images to decode from the batch.\n num_classes: number of classes to predict.\n \n Returns:\n A batch with num_images RGB images of the same size as the input. \n \"\"\"\n if isinstance(mask, torch.Tensor):\n mask = mask.data.cpu().numpy()\n n, h, w = mask.shape\n if n < num_images:\n num_images = n\n outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)\n for i in range(num_images):\n img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))\n pixels = img.load()\n for j_, j in enumerate(mask[i, :, :]):\n for k_, k in enumerate(j):\n if k < num_classes:\n pixels[k_,j_] = label_colours[k]\n outputs[i] = np.array(img)\n return torch.from_numpy(outputs.transpose([0, 3, 1, 2]).astype('float32')).div_(255.0)" }, { "identifier": "GTA5_DataLoader", "path": "datasets/gta5_Dataset.py", "snippet": "class GTA5_DataLoader():\n def __init__(self, args, training=True):\n\n self.args = args\n\n data_set = GTA5_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training)\n\n if self.args.split == \"train\" or self.args.split == \"trainval\" or self.args.split ==\"all\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n elif self.args.split ==\"val\" or self.args.split == \"test\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n raise Warning(\"split must be train/val/trainavl/test/all\")\n\n val_split = 'val' if self.args.split == \"train\" else 'test'\n val_set = GTA5_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=val_split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size" }, { "identifier": "SYNTHIA_DataLoader", "path": "datasets/synthia_Dataset.py", "snippet": "class SYNTHIA_DataLoader():\n def __init__(self, args, training=True):\n\n self.args = args\n\n data_set = SYNTHIA_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=args.split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=training,\n class_16=args.class_16)\n\n if self.args.split == \"train\" or self.args.split == \"trainval\" or self.args.split ==\"all\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=True,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n elif self.args.split ==\"val\" or self.args.split == \"test\":\n self.data_loader = data.DataLoader(data_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n else:\n raise Warning(\"split must be train/val/trainavl/test/all\")\n\n val_split = 'val' if self.args.split == \"train\" else 'test'\n val_set = SYNTHIA_Dataset(args, \n data_root_path=args.data_root_path,\n list_path=args.list_path,\n split=val_split,\n base_size=args.base_size,\n crop_size=args.crop_size,\n training=False,\n class_16=args.class_16)\n self.val_loader = data.DataLoader(val_set,\n batch_size=self.args.batch_size,\n shuffle=False,\n num_workers=self.args.data_loader_workers,\n pin_memory=self.args.pin_memory,\n drop_last=True)\n self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size\n\n self.num_iterations = (len(data_set) + self.args.batch_size) // self.args.batch_size" } ]
import os import random import logging import argparse import torch import torch.nn as nn import torch.utils.data as data import torch.nn.functional as F import numpy as np import sys import shutil from tqdm import tqdm from math import ceil from distutils.version import LooseVersion from tensorboardX import SummaryWriter from torchvision.utils import make_grid from utils.eval import Eval from utils.train_helper import get_model from datasets.cityscapes_Dataset import City_Dataset, City_DataLoader, inv_preprocess, decode_labels from datasets.gta5_Dataset import GTA5_DataLoader from datasets.synthia_Dataset import SYNTHIA_DataLoader
7,139
sys.path.append(os.path.abspath('tools')) datasets_path={ 'cityscapes': {'data_root_path': '/mnt/Xsky/zyl/dataset/dataset/Cityscapes', 'list_path': './datasets/city_list', 'image_path':'/mnt/Xsky/zyl/dataset/Cityscapes/leftImg8bit', 'gt_path': './datasets/Cityscapes/gtFine'}, 'gta5': {'data_root_path': '/mnt/Xsky/zyl/dataset/GTA5', 'list_path': './datasets/gta5_list', 'image_path':'/mnt/Xsky/zyl/dataset/GTA5/images', 'gt_path': './datasets/GTA5/labels'}, 'synthia': {'data_root_path': '/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES', 'list_path': './datasets/synthia_list', 'image_path':'/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES/RGB', 'gt_path': './datasets/SYNTHIA/GT/LABELS'}, 'NTHU': {'data_root_path': './datasets/NTHU_Datasets', 'list_path': './datasets/NTHU_list'} } def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Unsupported value encountered.') ITER_MAX = 5000 class Trainer(): def __init__(self, args, cuda=None, train_id="None", logger=None): self.args = args os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu self.cuda = cuda and torch.cuda.is_available() self.device = torch.device('cuda' if self.cuda else 'cpu') self.train_id = train_id self.logger = logger self.current_MIoU = 0 self.best_MIou = 0 self.best_source_MIou = 0 self.current_epoch = 0 self.current_iter = 0 self.second_best_MIou = 0 # set TensorboardX self.writer = SummaryWriter(self.args.checkpoint_dir) # Metric definition self.Eval = Eval(self.args.num_classes) # loss definition self.loss = nn.CrossEntropyLoss(weight=None, ignore_index= -1) self.loss.to(self.device) # model self.model, params = get_model(self.args) self.model = nn.DataParallel(self.model, device_ids=[0]) self.model.to(self.device) if self.args.optim == "SGD": self.optimizer = torch.optim.SGD( params=params, momentum=self.args.momentum, weight_decay=self.args.weight_decay ) elif self.args.optim == "Adam": self.optimizer = torch.optim.Adam(params, betas=(0.9, 0.99), weight_decay=self.args.weight_decay) # dataloader if self.args.dataset=="cityscapes":
sys.path.append(os.path.abspath('tools')) datasets_path={ 'cityscapes': {'data_root_path': '/mnt/Xsky/zyl/dataset/dataset/Cityscapes', 'list_path': './datasets/city_list', 'image_path':'/mnt/Xsky/zyl/dataset/Cityscapes/leftImg8bit', 'gt_path': './datasets/Cityscapes/gtFine'}, 'gta5': {'data_root_path': '/mnt/Xsky/zyl/dataset/GTA5', 'list_path': './datasets/gta5_list', 'image_path':'/mnt/Xsky/zyl/dataset/GTA5/images', 'gt_path': './datasets/GTA5/labels'}, 'synthia': {'data_root_path': '/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES', 'list_path': './datasets/synthia_list', 'image_path':'/mnt/Xsky/zyl/dataset/RAND_CITYSCAPES/RGB', 'gt_path': './datasets/SYNTHIA/GT/LABELS'}, 'NTHU': {'data_root_path': './datasets/NTHU_Datasets', 'list_path': './datasets/NTHU_list'} } def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Unsupported value encountered.') ITER_MAX = 5000 class Trainer(): def __init__(self, args, cuda=None, train_id="None", logger=None): self.args = args os.environ["CUDA_VISIBLE_DEVICES"] = self.args.gpu self.cuda = cuda and torch.cuda.is_available() self.device = torch.device('cuda' if self.cuda else 'cpu') self.train_id = train_id self.logger = logger self.current_MIoU = 0 self.best_MIou = 0 self.best_source_MIou = 0 self.current_epoch = 0 self.current_iter = 0 self.second_best_MIou = 0 # set TensorboardX self.writer = SummaryWriter(self.args.checkpoint_dir) # Metric definition self.Eval = Eval(self.args.num_classes) # loss definition self.loss = nn.CrossEntropyLoss(weight=None, ignore_index= -1) self.loss.to(self.device) # model self.model, params = get_model(self.args) self.model = nn.DataParallel(self.model, device_ids=[0]) self.model.to(self.device) if self.args.optim == "SGD": self.optimizer = torch.optim.SGD( params=params, momentum=self.args.momentum, weight_decay=self.args.weight_decay ) elif self.args.optim == "Adam": self.optimizer = torch.optim.Adam(params, betas=(0.9, 0.99), weight_decay=self.args.weight_decay) # dataloader if self.args.dataset=="cityscapes":
self.dataloader = City_DataLoader(self.args)
3
2023-11-14 02:01:11+00:00
8k
zhuhanqing/Lightening-Transformer-AE
software_model/ops/quantize.py
[ { "identifier": "_Conv2dQ", "path": "software_model/ops/_quant_base.py", "snippet": "class _Conv2dQ(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1,\n padding=0, dilation=1, groups=1, bias=True, **kwargs_q):\n super(_Conv2dQ, self).__init__(in_channels, out_channels, kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n self.kwargs_q = get_default_kwargs_q(kwargs_q, layer_type=self)\n self.nbits = kwargs_q['nbits']\n if self.nbits < 0:\n self.register_parameter('alpha', None)\n return\n self.q_mode = kwargs_q['mode']\n if self.q_mode == Qmodes.kernel_wise:\n self.alpha = Parameter(torch.Tensor(out_channels))\n else: # layer-wise quantization\n self.alpha = Parameter(torch.Tensor(1))\n self.register_buffer('init_state', torch.zeros(1))\n\n def add_param(self, param_k, param_v):\n self.kwargs_q[param_k] = param_v\n\n def set_bit(self, nbits):\n self.kwargs_q['nbits'] = nbits\n\n def extra_repr(self):\n s_prefix = super(_Conv2dQ, self).extra_repr()\n if self.alpha is None:\n return '{}, fake'.format(s_prefix)\n return '{}, {}'.format(s_prefix, self.kwargs_q)" }, { "identifier": "Qmodes", "path": "software_model/ops/_quant_base.py", "snippet": "class Qmodes(Enum):\n layer_wise = 1\n kernel_wise = 2" }, { "identifier": "_LinearQ", "path": "software_model/ops/_quant_base.py", "snippet": "class _LinearQ(nn.Linear):\n def __init__(self, in_features, out_features, bias=True, **kwargs_q):\n super(_LinearQ, self).__init__(in_features=in_features, out_features=out_features, bias=bias)\n self.kwargs_q = get_default_kwargs_q(kwargs_q, layer_type=self)\n self.nbits = kwargs_q['nbits']\n if self.nbits < 0:\n self.register_parameter('alpha', None)\n return\n self.q_mode = kwargs_q['mode']\n self.alpha = Parameter(torch.Tensor(1))\n if self.q_mode == Qmodes.kernel_wise:\n self.alpha = Parameter(torch.Tensor(out_features))\n self.register_buffer('init_state', torch.zeros(1))\n\n def add_param(self, param_k, param_v):\n self.kwargs_q[param_k] = param_v\n\n def extra_repr(self):\n s_prefix = super(_LinearQ, self).extra_repr()\n if self.alpha is None:\n return '{}, fake'.format(s_prefix)\n return '{}, {}'.format(s_prefix, self.kwargs_q)" }, { "identifier": "_ActQ", "path": "software_model/ops/_quant_base.py", "snippet": "class _ActQ(nn.Module):\n def __init__(self, in_features, **kwargs_q):\n super(_ActQ, self).__init__()\n self.kwargs_q = get_default_kwargs_q(kwargs_q, layer_type=self)\n self.nbits = kwargs_q['nbits']\n if self.nbits < 0:\n self.register_parameter('alpha', None)\n # self.register_parameter('zero_point', None)\n return\n # self.signed = kwargs_q['signed']\n self.q_mode = kwargs_q['mode']\n # print(kwargs_q)\n self.offset = kwargs_q['offset']\n self.zero_point = None\n if self.q_mode == Qmodes.kernel_wise:\n self.alpha = Parameter(torch.Tensor(in_features))\n if self.offset:\n self.zero_point = Parameter(torch.Tensor(in_features))\n torch.nn.init.zeros_(self.zero_point)\n else:\n self.alpha = Parameter(torch.Tensor(1))\n if self.offset:\n self.zero_point = Parameter(torch.Tensor([0]))\n # self.zero_point = Parameter(torch.Tensor([0]))\n self.register_buffer('init_state', torch.zeros(1))\n self.register_buffer('signed', torch.zeros(1))\n\n def add_param(self, param_k, param_v):\n self.kwargs_q[param_k] = param_v\n\n def set_bit(self, nbits):\n self.kwargs_q['nbits'] = nbits\n\n def extra_repr(self):\n # s_prefix = super(_ActQ, self).extra_repr()\n if self.alpha is None:\n return 'fake'\n return '{}'.format(self.kwargs_q)" }, { "identifier": "cal_coupler_wdm_error_list", "path": "software_model/ops/simulator.py", "snippet": "def cal_coupler_wdm_error_list(num_wavelength, channel_spacing):\n channel_spacing = channel_spacing *1e-3\n error_list = [] # 2 * kappa - 1\n \n def coupling_length(w, g=100):\n a = -5.44\n b = 3.53\n c = 0.185\n d = 0.15\n \n L_c = (a * (w - 1.55) + b) * math.exp(g / 1000 / (c * (w - 1.55) + d))\n \n return L_c\n odd_num_wavelength = True if num_wavelength % 2 == 1 else False\n \n for wave_length in range(num_wavelength):\n if odd_num_wavelength:\n wave_length = 1.55 + channel_spacing * (wave_length - (num_wavelength // 2))\n else:\n if wave_length < num_wavelength // 2:\n wave_length = 1.55 + channel_spacing * (wave_length - (num_wavelength // 2))\n else:\n wave_length = 1.55 + channel_spacing * (wave_length - (num_wavelength // 2) + 1)\n kappa = math.sin(math.pi / 4 * coupling_length(1.55) / coupling_length(wave_length)) ** 2\n error_list.append(2 * kappa - 1)\n \n return error_list" } ]
import torch import torch.nn.functional as F import math import numpy as np from ._quant_base import _Conv2dQ, Qmodes, _LinearQ, _ActQ from .simulator import cal_coupler_wdm_error_list
4,536
self.phase_noise_std = phase_noise_std if self.enable_linear_noise else 0 self.kappa_noise = None if not (enable_wdm_noise and enable_linear_noise) else cal_coupler_wdm_error_list( num_wavelength=num_wavelength, channel_spacing=channel_spacing) self.num_wavelength = num_wavelength self.out_features = out_features self.in_features = in_features if self.kappa_noise is not None: self.kappa_noise_term = torch.tensor(self.kappa_noise).unsqueeze(0).expand((in_features // self.num_wavelength) + 1, -1).reshape(-1).contiguous()[:in_features] else: self.kappa_noise_term = None self.act = QuantAct(in_features=in_features, nbits=nbits_a, mode=Qmodes.layer_wise, offset=offset, input_noise_std=self.input_noise_std) def add_input_noise(self, x): # the noise std is 2sigma not 1sigma, so should be devided by 2 if self.input_noise_std > 1e-5: # add mul noise here noise = torch.randn_like(x).mul( (self.input_noise_std)).mul(x.data.abs()) x = x + noise return x def add_output_noise(self, x): # the noise std is 2sigma not 1sigma, so should be devided by 2 if self.output_noise_std > 1e-5: noise = torch.randn_like(x).mul( (self.output_noise_std)).mul(x.data.abs()) x = x + noise return x def add_phase_noise(self, x, noise_std=2): # the noise std is 2sigma not 1sigma, so should be devided by 2 # DATE O2NN use 0.04 -> 0.04 * 360 / 2pi = 2.29 if noise_std > 1e-5: noise = (torch.randn_like(x).mul_((noise_std) / 180 * np.pi)).cos_() x = x * noise return x def forward(self, x): kappa_noise_scale_factor = 2 if self.alpha is None: return F.linear(x, self.weight, self.bias) Qn = -2 ** (self.nbits - 1) Qp = 2 ** (self.nbits - 1) - 1 if self.init_state == 0: print( f"Linear layer (mode: {self.q_mode}): initialize weight scale for int{self.nbits} quantization") self.alpha.data.copy_(2 * self.weight.abs().mean() / math.sqrt(Qp)) self.init_state.fill_(1) # lsq+ init # m, v = self.weight.abs().mean(), self.weight.abs().std() # self.alpha.data.copy_(torch.max(torch.abs(m - 3*v), torch.abs(m + 3*v)) / 2 ** (self.nbits - 1) ) assert self.init_state == 1 with torch.no_grad(): g = 1.0 / math.sqrt(self.weight.numel() * Qp) # g = 1.0 / math.sqrt(self.weight.numel()) / Qp # g = 1.0 / math.sqrt(self.weight.numel()) / 4 self.alpha.data.clamp_(min=1e-4) # Method1: alpha = grad_scale(self.alpha, g) # w_q = round_pass((self.weight / alpha).clamp(Qn, Qp)) * alpha # w_q = clamp(round_pass(self.weight / alpha), Qn, Qp) * alpha w_q = round_pass((self.weight / alpha).clamp(Qn, Qp)) * alpha # Method2: # w_q = FunLSQ.apply(self.weight, self.alpha, g, Qn, Qp) x = self.act(x) # add noise @ w_q if self.enable_linear_noise and self.input_noise_std > 1e-5: w_q = self.add_input_noise(w_q) if not self.training and self.phase_noise_std > 1e-5 and self.enable_linear_noise: noise_w_q_2 = 0 noise_x_2 = 0 if self.kappa_noise is not None: if self.kappa_noise_term.device != x.device: self.kappa_noise_term = self.kappa_noise_term.to(x.device) # obtain the scaling number alpha_x_to_w = self.act.alpha / alpha noise_x_2 = torch.matmul(x.square(), self.kappa_noise_term.unsqueeze(-1)) /(alpha_x_to_w * kappa_noise_scale_factor) # [bs, seq, 1] noise_w_q_2 = torch.matmul(w_q.square(), -self.kappa_noise_term.unsqueeze(-1))* (alpha_x_to_w / kappa_noise_scale_factor) # [output_features, 1] dim_3_flag = False if x.dim() == 3: dim_3_flag = True bs, N, D = x.shape bs = bs * N x = x.reshape(-1, D) else: bs, D = x.shape out = [] k = 2 num_chunks = self.out_features//k for i in range(k): if self.out_features%k != 0: raise RuntimeError noisy_x = self.add_phase_noise(x.unsqueeze(-2).expand(-1, num_chunks, -1)) out.append(torch.einsum('ibk, bk->ib', noisy_x, w_q[i * num_chunks: (i+1) * num_chunks, :])) out = torch.cat(out, 1) if self.bias is not None: out += self.bias if dim_3_flag: out = out.reshape(-1, N, self.out_features) out = out + (noise_x_2 + noise_w_q_2.squeeze(-1)) # add [bs, seq, 1] and [1, output_features] else: out = F.linear(x, w_q, self.bias) # add output noise if self.enable_linear_noise and self.output_noise_std > 1e-5: out = self.add_output_noise(out) return out
# -*- coding: utf-8 -*- # @Author: Hanqing Zhu # @Date: 2023-01-02 21:11:56 # @Last Modified by: Hanqing Zhu([email protected]) # @Last Modified time: 2023-11-09 21:57:41 """ @inproceedings{ esser2020learned, title={LEARNED STEP SIZE QUANTIZATION}, author={Steven K. Esser and Jeffrey L. McKinstry and Deepika Bablani and Rathinakumar Appuswamy and Dharmendra S. Modha}, booktitle={International Conference on Learning Representations}, year={2020}, url={https://openreview.net/forum?id=rkgO66VKDS} } https://quanoview.readthedocs.io/en/latest/_raw/LSQ.html """ __all__ = ["QuantLinear", "QuantAct", "QuantConv2d"] class FunLSQ(torch.autograd.Function): @staticmethod def forward(ctx, weight, alpha, g, Qn, Qp): assert alpha > 0, 'alpha = {}'.format(alpha) ctx.save_for_backward(weight, alpha) ctx.other = g, Qn, Qp q_w = (weight / alpha).round().clamp(Qn, Qp) w_q = q_w * alpha return w_q @staticmethod def backward(ctx, grad_weight): weight, alpha = ctx.saved_tensors g, Qn, Qp = ctx.other q_w = weight / alpha indicate_small = (q_w < Qn).float() indicate_big = (q_w > Qp).float() # indicate_middle = torch.ones(indicate_small.shape).to(indicate_small.device) - indicate_small - indicate_big indicate_middle = 1.0 - indicate_small - indicate_big # Thanks to @haolibai grad_alpha = ((indicate_small * Qn + indicate_big * Qp + indicate_middle * (-q_w + q_w.round())) * grad_weight * g).sum().unsqueeze(dim=0) # grad_alpha = ((indicate_small * Qn + indicate_big * Qp + indicate_middle * 0) * grad_weight * g).sum().unsqueeze(dim=0) grad_weight = indicate_middle * grad_weight return grad_weight, grad_alpha, None, None, None def grad_scale(x, scale): y = x y_grad = x * scale return y.detach() - y_grad.detach() + y_grad def round_pass(x): y = x.round() y_grad = x return y.detach() - y_grad.detach() + y_grad def clamp(x, minv, maxv): print(minv.dtype) x = torch.minimum(x, maxv) x = torch.maximum(x, minv) return x class QuantConv2d(_Conv2dQ): def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, nbits=-1, nbits_a=-1, mode=Qmodes.layer_wise, offset=False, input_noise_std=0, output_noise_std=0, phase_noise_std=0, enable_wdm_noise=False, num_wavelength=9, channel_spacing=0.4, enable_linear_noise=False, **kwargs): super(QuantConv2d, self).__init__( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, nbits=nbits, mode=mode,) self.enable_linear_noise = enable_linear_noise self.input_noise_std = input_noise_std if self.enable_linear_noise else 0 self.output_noise_std = output_noise_std if self.enable_linear_noise else 0 self.act = QuantAct(in_features=in_channels, nbits=nbits_a, mode=Qmodes.layer_wise, offset=offset, input_noise_std=self.input_noise_std) def add_output_noise(self, x): # the noise std is 2sigma not 1sigma, so should be devided by 2 if self.output_noise_std > 1e-5: noise = torch.randn_like(x).mul( (self.output_noise_std)).mul(x.data.abs()) x = x + noise return x def add_input_noise(self, x): # the noise std is 2sigma not 1sigma, so should be devided by 2 if self.input_noise_std > 1e-5: noise = torch.randn_like(x).mul( (self.input_noise_std)).mul(x.data.abs()) x = x + noise return x def forward(self, x): if self.alpha is None: return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups) # full range quantization -> -2**(k-1) -> 2**(k-1)-1 Qn = -2 ** (self.nbits - 1) Qp = 2 ** (self.nbits - 1) - 1 if self.init_state == 0: print( f"Conv layer (mode: {self.q_mode}): initialize weight scale for int{self.nbits} quantization") self.alpha.data.copy_(2 * self.weight.abs().mean() / math.sqrt(Qp)) # self.alpha.data.copy_(quantize_by_mse(self.weight, Qn, Qp)) self.init_state.fill_(1) with torch.no_grad(): g = 1.0 / math.sqrt(self.weight.numel() * Qp) self.alpha.data.clamp_(min=1e-4) # Method1: 31GB GPU memory (AlexNet w4a4 bs 2048) 17min/epoch alpha = grad_scale(self.alpha, g) w_q = round_pass((self.weight / alpha).clamp(Qn, Qp)) * alpha # Method2: 25GB GPU memory (AlexNet w4a4 bs 2048) 32min/epoch # w_q = FunLSQ.apply(self.weight, self.alpha, g, Qn, Qp) x = self.act(x) # add noise at w_q if self.enable_linear_noise and self.input_noise_std > 1e-5: w_q = self.add_input_noise(w_q) out = F.conv2d(x, w_q, self.bias, self.stride, self.padding, self.dilation, self.groups) if self.enable_linear_noise and self.output_noise_std > 1e-5: out = self.add_output_noise(out) return out class QuantLinear(_LinearQ): def __init__(self, in_features, out_features, bias=True, nbits=-1, nbits_a=-1, mode=Qmodes.layer_wise, offset=False, input_noise_std=0, output_noise_std=0, phase_noise_std=0, enable_wdm_noise=False, num_wavelength=9, channel_spacing=0.4, enable_linear_noise=False, **kwargs): super(QuantLinear, self).__init__(in_features=in_features, out_features=out_features, bias=bias, nbits=nbits, mode=mode) print( f"Linear layer (mode: {self.q_mode}): initialize weight scale for int{self.nbits} quantization") self.enable_linear_noise = enable_linear_noise self.input_noise_std = input_noise_std if self.enable_linear_noise else 0 self.output_noise_std = output_noise_std if self.enable_linear_noise else 0 self.phase_noise_std = phase_noise_std if self.enable_linear_noise else 0 self.kappa_noise = None if not (enable_wdm_noise and enable_linear_noise) else cal_coupler_wdm_error_list( num_wavelength=num_wavelength, channel_spacing=channel_spacing) self.num_wavelength = num_wavelength self.out_features = out_features self.in_features = in_features if self.kappa_noise is not None: self.kappa_noise_term = torch.tensor(self.kappa_noise).unsqueeze(0).expand((in_features // self.num_wavelength) + 1, -1).reshape(-1).contiguous()[:in_features] else: self.kappa_noise_term = None self.act = QuantAct(in_features=in_features, nbits=nbits_a, mode=Qmodes.layer_wise, offset=offset, input_noise_std=self.input_noise_std) def add_input_noise(self, x): # the noise std is 2sigma not 1sigma, so should be devided by 2 if self.input_noise_std > 1e-5: # add mul noise here noise = torch.randn_like(x).mul( (self.input_noise_std)).mul(x.data.abs()) x = x + noise return x def add_output_noise(self, x): # the noise std is 2sigma not 1sigma, so should be devided by 2 if self.output_noise_std > 1e-5: noise = torch.randn_like(x).mul( (self.output_noise_std)).mul(x.data.abs()) x = x + noise return x def add_phase_noise(self, x, noise_std=2): # the noise std is 2sigma not 1sigma, so should be devided by 2 # DATE O2NN use 0.04 -> 0.04 * 360 / 2pi = 2.29 if noise_std > 1e-5: noise = (torch.randn_like(x).mul_((noise_std) / 180 * np.pi)).cos_() x = x * noise return x def forward(self, x): kappa_noise_scale_factor = 2 if self.alpha is None: return F.linear(x, self.weight, self.bias) Qn = -2 ** (self.nbits - 1) Qp = 2 ** (self.nbits - 1) - 1 if self.init_state == 0: print( f"Linear layer (mode: {self.q_mode}): initialize weight scale for int{self.nbits} quantization") self.alpha.data.copy_(2 * self.weight.abs().mean() / math.sqrt(Qp)) self.init_state.fill_(1) # lsq+ init # m, v = self.weight.abs().mean(), self.weight.abs().std() # self.alpha.data.copy_(torch.max(torch.abs(m - 3*v), torch.abs(m + 3*v)) / 2 ** (self.nbits - 1) ) assert self.init_state == 1 with torch.no_grad(): g = 1.0 / math.sqrt(self.weight.numel() * Qp) # g = 1.0 / math.sqrt(self.weight.numel()) / Qp # g = 1.0 / math.sqrt(self.weight.numel()) / 4 self.alpha.data.clamp_(min=1e-4) # Method1: alpha = grad_scale(self.alpha, g) # w_q = round_pass((self.weight / alpha).clamp(Qn, Qp)) * alpha # w_q = clamp(round_pass(self.weight / alpha), Qn, Qp) * alpha w_q = round_pass((self.weight / alpha).clamp(Qn, Qp)) * alpha # Method2: # w_q = FunLSQ.apply(self.weight, self.alpha, g, Qn, Qp) x = self.act(x) # add noise @ w_q if self.enable_linear_noise and self.input_noise_std > 1e-5: w_q = self.add_input_noise(w_q) if not self.training and self.phase_noise_std > 1e-5 and self.enable_linear_noise: noise_w_q_2 = 0 noise_x_2 = 0 if self.kappa_noise is not None: if self.kappa_noise_term.device != x.device: self.kappa_noise_term = self.kappa_noise_term.to(x.device) # obtain the scaling number alpha_x_to_w = self.act.alpha / alpha noise_x_2 = torch.matmul(x.square(), self.kappa_noise_term.unsqueeze(-1)) /(alpha_x_to_w * kappa_noise_scale_factor) # [bs, seq, 1] noise_w_q_2 = torch.matmul(w_q.square(), -self.kappa_noise_term.unsqueeze(-1))* (alpha_x_to_w / kappa_noise_scale_factor) # [output_features, 1] dim_3_flag = False if x.dim() == 3: dim_3_flag = True bs, N, D = x.shape bs = bs * N x = x.reshape(-1, D) else: bs, D = x.shape out = [] k = 2 num_chunks = self.out_features//k for i in range(k): if self.out_features%k != 0: raise RuntimeError noisy_x = self.add_phase_noise(x.unsqueeze(-2).expand(-1, num_chunks, -1)) out.append(torch.einsum('ibk, bk->ib', noisy_x, w_q[i * num_chunks: (i+1) * num_chunks, :])) out = torch.cat(out, 1) if self.bias is not None: out += self.bias if dim_3_flag: out = out.reshape(-1, N, self.out_features) out = out + (noise_x_2 + noise_w_q_2.squeeze(-1)) # add [bs, seq, 1] and [1, output_features] else: out = F.linear(x, w_q, self.bias) # add output noise if self.enable_linear_noise and self.output_noise_std > 1e-5: out = self.add_output_noise(out) return out
class QuantAct(_ActQ):
3
2023-11-14 05:55:48+00:00
8k
davidhozic/TkClassWizard
tkclasswiz/object_frame/frame_struct.py
[ { "identifier": "Messagebox", "path": "tkclasswiz/messagebox.py", "snippet": "class Messagebox:\r\n \"\"\"\r\n Wrapper for some of Messagebox methods, that offers compatibility between\r\n ttk and ttkbootstrap.\r\n \"\"\"\r\n def _process_kwargs(kwargs):\r\n if \"master\" in kwargs:\r\n kwargs['parent'] = kwargs[\"master\"]\r\n del kwargs[\"master\"]\r\n\r\n if TTKBOOT_INSTALLED:\r\n @classmethod\r\n def yesnocancel(cls, title: str, message: str, **kwargs):\r\n cls._process_kwargs(kwargs)\r\n r = BootMb.yesnocancel(message, title, **kwargs)\r\n if r is not None and r != 'Cancel':\r\n return r == 'Yes'\r\n \r\n @classmethod\r\n def show_error(cls, title: str, message: str, **kwargs):\r\n cls._process_kwargs(kwargs)\r\n BootMb.show_error(message, title, **kwargs)\r\n\r\n @classmethod\r\n def show_info(cls, title: str, message: str, **kwargs):\r\n cls._process_kwargs(kwargs)\r\n BootMb.show_info(message, title, **kwargs)\r\n else:\r\n @classmethod\r\n def yesnocancel(cls, title: str, message: str, **kwargs):\r\n cls._process_kwargs(kwargs)\r\n return mb.askyesnocancel(title, message, **kwargs)\r\n\r\n @classmethod\r\n def show_error(cls, title: str, message: str, **kwargs):\r\n cls._process_kwargs(kwargs)\r\n mb.showerror(title, message, **kwargs)\r\n\r\n @classmethod\r\n def show_info(cls, title: str, message: str, **kwargs):\r\n cls._process_kwargs(kwargs)\r\n mb.showinfo(title, message, **kwargs)" }, { "identifier": "extendable", "path": "tkclasswiz/extensions.py", "snippet": "@doc_category(\"Extensions\")\r\ndef extendable(obj: Union[T, list]) -> T:\r\n \"\"\"\r\n Decorator that makes the obj extendable.\r\n\r\n It wraps the ``obj``, which is a class or a function, into an extension object.\r\n The extension object will adds 3 methods to the original class or function:\r\n\r\n - ``register_pre_extension``\r\n - ``register_post_extension``\r\n - ``get_extensions``\r\n \r\n The ``get_extensions`` method just returns the list of registered \r\n extensions (:class:`tkclasswiz.extensions.Extension`).\r\n\r\n The ``register_pre_extension`` and ``register_post_extension`` methods allow users to extend\r\n the functionality of original tkclass wiz classes or functions.\r\n They accept the extension (:class:`tkclasswiz.extensions.Extension`) parameter.\r\n\r\n Pre-extensions (``register_pre_extension``) get activated / called before the original ``__init__`` method / \r\n before the original function and accept the ``loader`` of the extension must accept the same arguments\r\n as the original ``__init__`` method / original function.\r\n\r\n Post-extensions differ a bit if the thing being extended is a class or a function.\r\n They both have in common that they get activated after the original ``__init__`` method call / original function\r\n call, but they differ in the arguments they receive:\r\n\r\n - In the case of the extended is a class,\r\n the extension ``loader`` accepts the same arguments as the ``__init__`` method receives.\r\n - In the case of the extended is a function,\r\n the extension ``loader`` accepts the same arguments as the original function and an additional parameter,\r\n which is the result of the original function call. The result parameter is passed to the ``loader`` as the\r\n last positional argument.\r\n\r\n\r\n Parameters\r\n ---------------\r\n obj: T\r\n Function or a class that can be extended.\r\n \"\"\"\r\n\r\n if DOCUMENTATION_MODE:\r\n return obj\r\n\r\n if isclass(obj):\r\n @wraps(obj, updated=[])\r\n class ExtendableClass(obj):\r\n __reg_post_ext__ = []\r\n __reg_pre_ext__ = []\r\n\r\n def __init__(self, *args, **kwargs):\r\n for extension in ExtendableClass.__reg_pre_ext__:\r\n extension(self, *args, **kwargs)\r\n\r\n super().__init__(*args, **kwargs)\r\n\r\n extension: Extension\r\n for extension in ExtendableClass.__reg_post_ext__:\r\n extension(self, *args, **kwargs)\r\n\r\n @classmethod\r\n def register_pre_extension(cls, extension: Extension):\r\n cls.__reg_pre_ext__.append(extension)\r\n\r\n @classmethod\r\n def register_post_extension(obj, extension: Extension):\r\n obj.__reg_post_ext__.append(extension)\r\n\r\n @classmethod\r\n def get_extensions(obj):\r\n return obj.__reg_pre_ext__, obj.__reg_post_ext__[:]\r\n\r\n return ExtendableClass\r\n else:\r\n class ExtendableFunction:\r\n __reg_post_ext__ = []\r\n __reg_pre_ext__ = []\r\n\r\n def __init__(self, bind: object = None) -> None:\r\n self.bind = bind\r\n\r\n def __call__(self, *args, **kwargs):\r\n if self.bind is not None:\r\n extra_args = (self.bind,) # self reference\r\n else:\r\n extra_args = ()\r\n\r\n for ext in ExtendableFunction.__reg_pre_ext__:\r\n ext(*extra_args, *args, **kwargs)\r\n\r\n r = obj(*extra_args, *args, **kwargs)\r\n \r\n for ext in ExtendableFunction.__reg_post_ext__:\r\n r = ext(*extra_args, *args, r, **kwargs)\r\n\r\n return r\r\n\r\n def __get__(self, instance, cls):\r\n # Bind the wrapper callable object into a callable object \"instance\"\r\n return ExtendableFunction(instance)\r\n\r\n @classmethod\r\n def register_pre_extension(cls, extension: Extension):\r\n cls.__reg_pre_ext__.append(extension)\r\n\r\n @classmethod\r\n def register_post_extension(cls, extension: Extension):\r\n cls.__reg_post_ext__.append(extension)\r\n\r\n @classmethod\r\n def get_extensions(obj):\r\n return obj.__reg_pre_ext__, obj.__reg_post_ext__[:]\r\n\r\n return ExtendableFunction()\r" }, { "identifier": "get_annotations", "path": "tkclasswiz/annotations.py", "snippet": "@doc_category(\"Annotations\")\r\ndef get_annotations(class_) -> dict:\r\n \"\"\"\r\n Returns class / function annotations including the ones extended with ``register_annotations``.\r\n It does not return the return annotation.\r\n\r\n Additionally, this function resolves any generic types to their parameterized types, but\r\n only for classes, functions don't support this yet as support for generics on functions was added\r\n in Python 3.12.\r\n \"\"\"\r\n annotations = {}\r\n with suppress(AttributeError):\r\n if isclass(class_):\r\n annotations = class_.__init__.__annotations__.copy()\r\n elif isclass(origin_class := get_origin(class_)) and issubclass(origin_class, Generic):\r\n # Resolve generics\r\n annotations = origin_class.__init__.__annotations__.copy()\r\n generic_types = get_args(origin_class.__orig_bases__[0])\r\n generic_values = get_args(class_)\r\n generic_name_value = {generic_types[i]: generic_values[i] for i in range(len(generic_types))}\r\n for k, v in annotations.items():\r\n annotations[k] = generic_name_value.get(v, v)\r\n else:\r\n annotations = class_.__annotations__.copy()\r\n\r\n additional_annotations = ADDITIONAL_ANNOTATIONS.get(class_, {})\r\n annotations = {**annotations, **additional_annotations}\r\n\r\n if \"return\" in annotations:\r\n del annotations[\"return\"]\r\n\r\n return annotations\r" }, { "identifier": "doc_category", "path": "tkclasswiz/doc.py", "snippet": "def doc_category(\n cat: str,\n manual: Optional[bool] = False,\n path: Optional[str] = None,\n api_type: Literal[\"Program\", \"HTTP\"] = \"Program\"\n):\n \"\"\"\n Used to mark the object for documentation.\n Objects marked with this decorator function will\n have :mod:`sphinx.ext.autodoc` directives generated automatically.\n\n Parameters\n ------------\n cat: str\n The name of the category to put this in.\n manual: Optional[bool]\n Generate ``function`` directives instead of ``autofunction``.\n Should be used when dealing with overloads.\n path: Optional[str]\n Custom path to the object.\n api_type: Literal[\"Program\", \"HTTP\"]\n The type of API, the documented item belongs to.\n Defaults to 'Program'\n \"\"\"\n def _category(item): # item == class or function\n if DOCUMENTATION_MODE:\n cat_map[api_type][cat].append((item, manual, path))\n return item\n\n if DOCUMENTATION_MODE:\n if cat not in cat_map[api_type]:\n cat_map[api_type][cat] = []\n\n return _category" }, { "identifier": "ComboboxTooltip", "path": "tkclasswiz/object_frame/tooltip.py", "snippet": "class ComboboxTooltip(BaseToolTip):\r\n def __init__(self, widget: tk.Widget, timeout_ms: int = 500):\r\n super().__init__(widget, timeout_ms)\r\n self._widget.bind(\"<Enter>\", self._schedule)\r\n self._widget.bind(\"<Leave>\", self._cancel_schedule)\r\n\r\n def _get_value(self):\r\n value = self._widget.get()\r\n return str(value)\r" } ]
from typing import get_args, get_origin, Iterable, Union, Literal, Dict, Tuple from collections.abc import Iterable as ABCIterable from functools import partial from enum import Enum from ..convert import * from ..dpi import * from ..utilities import * from ..storage import * from ..messagebox import Messagebox from ..extensions import extendable from ..annotations import get_annotations from ..doc import doc_category from .frame_base import * from .tooltip import ComboboxTooltip import tkinter as tk import tkinter.ttk as ttk import tkinter.filedialog as tkfile import inspect import copy import json
3,656
def __init__( self, class_, return_widget: Union[ComboBoxObjects, ListBoxScrolled], parent = None, old_data: ObjectInfo = None, check_parameters: bool = True, allow_save = True, additional_values: dict = {}, _annotations_override: dict = None, ): super().__init__(class_, return_widget, parent, old_data, check_parameters,allow_save) self._map: Dict[str, Tuple[ComboBoxObjects, Iterable[type]]] = {} dpi_5 = dpi_scaled(5) dpi_5_h = dpi_5 // 2 if not (annotations := _annotations_override or get_annotations(class_)): raise TypeError("This object cannot be edited.") # Template @gui_except(window=self) def save_template(): filename = tkfile.asksaveasfilename(filetypes=[("JSON", "*.json")], parent=self) if filename == "": return json_data = convert_to_dict(self.to_object(ignore_checks=True)) if not filename.endswith(".json"): filename += ".json" with open(filename, "w", encoding="utf-8") as file: json.dump(json_data, file, indent=2) Messagebox.show_info("Finished", f"Saved to {filename}", parent=self) @gui_except(window=self) def load_template(): filename = tkfile.askopenfilename(filetypes=[("JSON", "*.json")], parent=self) if filename == "": return with open(filename, "r", encoding="utf-8") as file: json_data: dict = json.loads(file.read()) object_info = convert_from_dict(json_data) # Get class_ attribute if we have the ObjectInfo type, if not just compare the actual type if object_info.class_ is not self.class_: raise TypeError( f"The selected template is not a {self.class_.__name__} template.\n" f"The requested template is for type: {object_info.class_.__name__}!" ) self.load(object_info) bnt_menu_template = ttk.Menubutton(self.frame_toolbar, text="Template") menu = tk.Menu(bnt_menu_template) menu.add_command(label="Load template", command=load_template) menu.add_command(label="Save template", command=save_template) bnt_menu_template.configure(menu=menu) bnt_menu_template.pack(side="left") # Nickname entry self.entry_nick = HintedEntry( "Object nickname", self.frame_main, state="normal" if self.allow_save else "disabled" ) self.entry_nick.pack(anchor=tk.W, padx=dpi_5_h, pady=dpi_5) def fill_values(k: str, entry_types: list, menu: tk.Menu, combo: ComboBoxObjects): "Fill ComboBox values based on types in ``entry_types`` and create New <object_type> buttons" any_filled = False for entry_type in entry_types: if get_origin(entry_type) is Literal: values = get_args(entry_type) combo["values"] = values # tkvalid.add_option_validation(combo, values) elif entry_type is bool: combo.insert(tk.END, True) combo.insert(tk.END, False) # tkvalid.add_option_validation(combo, ["True", "False", '']) elif issubclass_noexcept(entry_type, Enum): combo["values"] = values = [en for en in entry_type] # tkvalid.add_option_validation(combo, list(map(str, values)) + ['']) elif entry_type is type(None): if bool not in entry_types: combo.insert(tk.END, None) else: # Type not supported, try other types any_filled = True if self.allow_save: menu.add_command( label=f"New {self.get_cls_name(entry_type)}", command=partial(self.new_object_frame, entry_type, combo) ) # Additional values to be inserted into ComboBox for value in additional_values.get(k, []): combo.insert(tk.END, value) # The class of last list like type. Needed when "Edit selected" is used # since we don't know what type it was return any_filled max_attr_name_len = max(*map(len, annotations), 15) - 2 for (k, v) in annotations.items(): # Init widgets entry_types = self.convert_types(v) frame_annotated = ttk.Frame(self.frame_main) frame_annotated.pack(fill=tk.BOTH, expand=True, pady=dpi_5) ttk.Label(frame_annotated, text=k, width=max_attr_name_len).pack(side="left") bnt_new_menu = ttk.Menubutton(frame_annotated, text="New") menu_new = tk.Menu(bnt_new_menu) bnt_new_menu.configure(menu=menu_new) # Storage widget with the tooltip for displaying # nicknames on ObjectInfo instances w = combo = ComboBoxObjects(frame_annotated)
__all__ = ( "NewObjectFrameStruct", "NewObjectFrameStructView", ) @extendable @doc_category("Object frames") class NewObjectFrameStruct(NewObjectFrameBase): """ Frame for inside the :class:`ObjectEditWindow` that allows object definition. Parameters ------------- class_: Any The class we are defining for. return_widget: ComboBoxObjects | ListBoxScrolled The widget to insert the ObjectInfo into after saving. parent: TopLevel The parent window. old_data: ObjectInfo The old_data ObjectInfo object to edit. check_parameters: bool Check parameters (by creating the real object) upon saving. This is ignored if editing a function instead of a class. allow_save: bool If False, will open in read-only mode. additional_values: Dict[str, Any] A mapping of additional values to be inserted into corresponding field. """ def __new__(cls, *args, **kwargs): if kwargs.get("allow_save", True): obj = super().__new__(NewObjectFrameStruct) else: obj = super().__new__(NewObjectFrameStructView) return obj def __init__( self, class_, return_widget: Union[ComboBoxObjects, ListBoxScrolled], parent = None, old_data: ObjectInfo = None, check_parameters: bool = True, allow_save = True, additional_values: dict = {}, _annotations_override: dict = None, ): super().__init__(class_, return_widget, parent, old_data, check_parameters,allow_save) self._map: Dict[str, Tuple[ComboBoxObjects, Iterable[type]]] = {} dpi_5 = dpi_scaled(5) dpi_5_h = dpi_5 // 2 if not (annotations := _annotations_override or get_annotations(class_)): raise TypeError("This object cannot be edited.") # Template @gui_except(window=self) def save_template(): filename = tkfile.asksaveasfilename(filetypes=[("JSON", "*.json")], parent=self) if filename == "": return json_data = convert_to_dict(self.to_object(ignore_checks=True)) if not filename.endswith(".json"): filename += ".json" with open(filename, "w", encoding="utf-8") as file: json.dump(json_data, file, indent=2) Messagebox.show_info("Finished", f"Saved to {filename}", parent=self) @gui_except(window=self) def load_template(): filename = tkfile.askopenfilename(filetypes=[("JSON", "*.json")], parent=self) if filename == "": return with open(filename, "r", encoding="utf-8") as file: json_data: dict = json.loads(file.read()) object_info = convert_from_dict(json_data) # Get class_ attribute if we have the ObjectInfo type, if not just compare the actual type if object_info.class_ is not self.class_: raise TypeError( f"The selected template is not a {self.class_.__name__} template.\n" f"The requested template is for type: {object_info.class_.__name__}!" ) self.load(object_info) bnt_menu_template = ttk.Menubutton(self.frame_toolbar, text="Template") menu = tk.Menu(bnt_menu_template) menu.add_command(label="Load template", command=load_template) menu.add_command(label="Save template", command=save_template) bnt_menu_template.configure(menu=menu) bnt_menu_template.pack(side="left") # Nickname entry self.entry_nick = HintedEntry( "Object nickname", self.frame_main, state="normal" if self.allow_save else "disabled" ) self.entry_nick.pack(anchor=tk.W, padx=dpi_5_h, pady=dpi_5) def fill_values(k: str, entry_types: list, menu: tk.Menu, combo: ComboBoxObjects): "Fill ComboBox values based on types in ``entry_types`` and create New <object_type> buttons" any_filled = False for entry_type in entry_types: if get_origin(entry_type) is Literal: values = get_args(entry_type) combo["values"] = values # tkvalid.add_option_validation(combo, values) elif entry_type is bool: combo.insert(tk.END, True) combo.insert(tk.END, False) # tkvalid.add_option_validation(combo, ["True", "False", '']) elif issubclass_noexcept(entry_type, Enum): combo["values"] = values = [en for en in entry_type] # tkvalid.add_option_validation(combo, list(map(str, values)) + ['']) elif entry_type is type(None): if bool not in entry_types: combo.insert(tk.END, None) else: # Type not supported, try other types any_filled = True if self.allow_save: menu.add_command( label=f"New {self.get_cls_name(entry_type)}", command=partial(self.new_object_frame, entry_type, combo) ) # Additional values to be inserted into ComboBox for value in additional_values.get(k, []): combo.insert(tk.END, value) # The class of last list like type. Needed when "Edit selected" is used # since we don't know what type it was return any_filled max_attr_name_len = max(*map(len, annotations), 15) - 2 for (k, v) in annotations.items(): # Init widgets entry_types = self.convert_types(v) frame_annotated = ttk.Frame(self.frame_main) frame_annotated.pack(fill=tk.BOTH, expand=True, pady=dpi_5) ttk.Label(frame_annotated, text=k, width=max_attr_name_len).pack(side="left") bnt_new_menu = ttk.Menubutton(frame_annotated, text="New") menu_new = tk.Menu(bnt_new_menu) bnt_new_menu.configure(menu=menu_new) # Storage widget with the tooltip for displaying # nicknames on ObjectInfo instances w = combo = ComboBoxObjects(frame_annotated)
ComboboxTooltip(w)
4
2023-11-14 09:26:01+00:00
8k
raphaelreme/koft
src/simulator/simulator.py
[ { "identifier": "GlobalDriftAndRotation", "path": "src/simulator/motion.py", "snippet": "class GlobalDriftAndRotation:\n \"\"\"Global drift and rotation for all particles (Rigid motion)\n\n Note: With the current bad implementation, i'm not able to make this fit with other motions.\n Lets not make it inherit Motion and be given independently to the simulator\n\n Drift follow a spring (prevent all the particles to go out of focus) with a large period (slow vs the local motion)\n\n Rotation is also a spring (more for continuous reason) with the same period\n \"\"\"\n\n def __init__(self, mass_center: torch.Tensor, period=1000.0, noise_position=0.0, noise_theta=0.0) -> None:\n super().__init__()\n self.spring = springs.RandomAcceleratedSpring.build(\n torch.cat((mass_center, torch.ones(1))),\n torch.tensor([0.5]),\n torch.tensor([noise_position, noise_position, noise_theta]),\n torch.tensor(2 * torch.pi / period),\n )\n self.global_theta = torch.tensor(0.0)\n self.global_translation = torch.tensor([0.0, 0.0])\n\n @property\n def transformation(self) -> torch.Tensor:\n return torch.tensor(\n [\n [torch.cos(self.global_theta), -torch.sin(self.global_theta), self.global_translation[0]],\n [torch.sin(self.global_theta), torch.cos(self.global_theta), self.global_translation[1]],\n [0.0, 0.0, 1.0],\n ]\n )\n\n def update(self) -> None:\n old_state = self.spring.value.clone()\n self.spring.update()\n # Add to translation the new motion of the center of mass\n self.global_translation += self.spring.value[:2] - old_state[:2]\n\n # New rotation=\n theta = self.spring.value[2] - old_state[2]\n rotation = torch.tensor(\n [\n [torch.cos(theta), -torch.sin(theta)],\n [torch.sin(theta), torch.cos(theta)],\n ]\n )\n\n self.global_translation -= self.spring.value[:2]\n self.global_translation = rotation @ self.global_translation\n self.global_translation += self.spring.value[:2]\n self.global_theta += theta\n\n def apply(self, particles: GaussianParticles) -> None:\n particles.mu = self.apply_tensor(particles.mu)\n\n def apply_tensor(self, points: torch.Tensor) -> torch.Tensor:\n homogenous = torch.cat((points, torch.ones(points.shape[0])[:, None]), dim=-1) # N, 3\n homogenous = homogenous @ self.transformation.T\n\n return homogenous[:, :2]\n\n def revert(self, particles: GaussianParticles) -> None:\n particles.mu = self.revert_tensor(particles.mu)\n\n def revert_tensor(self, points: torch.Tensor) -> torch.Tensor:\n transformation_inv = self.transformation\n transformation_inv[:2, :2] = transformation_inv[:2, :2].clone().T\n transformation_inv[:2, 2] = -transformation_inv[:2, :2] @ transformation_inv[:2, 2]\n\n homogenous = torch.cat((points, torch.ones(points.shape[0])[:, None]), dim=-1) # N, 3\n homogenous = homogenous @ transformation_inv.T\n\n return homogenous[:, :2]" }, { "identifier": "GlobalMotionConfig", "path": "src/simulator/motion.py", "snippet": "class GlobalMotionConfig:\n period: float = 1000.0\n noise_position: float = 0.0\n noise_theta: float = 0.0" }, { "identifier": "MotionConfig", "path": "src/simulator/motion.py", "snippet": "class MotionConfig:\n motions: List[str]\n ShapeVariation: ShapeVariationConfig = ShapeVariationConfig()\n LocalRotation: LocalRotationConfig = LocalRotationConfig()\n BrownianRotation: BrownianRotationConfig = BrownianRotationConfig()\n FlowMotion: FlowMotionConfig = FlowMotionConfig()\n ElasticMotion: ElasticMotionConfig = ElasticMotionConfig()\n\n def build(self, **kwargs) -> \"MultipleMotion\":\n motions = []\n for motion_name in self.motions:\n sub_cfg = getattr(self, motion_name)\n if motion_name in (\"ShapeVariation\", \"LocalRotation\"):\n motions.append(sub_cfg.build(particles=kwargs[\"particles\"]))\n if kwargs.get(\"background\"):\n motions.append(sub_cfg.build(particles=kwargs[\"background\"]))\n\n motions.append(sub_cfg.build(**kwargs))\n\n return MultipleMotion(motions)" }, { "identifier": "MultipleMotion", "path": "src/simulator/motion.py", "snippet": "class MultipleMotion(BaseMotion):\n \"\"\"Handle multiple motions to apply to particles/backgrounds\n\n The current implementation of motions is not very robust. To prevent bugs, you should avoid\n having multiple motions handling the same parameter (mu, theta, std).\n \"\"\"\n\n def __init__(self, motions: Iterable[BaseMotion]) -> None:\n super().__init__()\n self.motions = motions\n\n def update(self) -> None:\n for motion in self.motions:\n motion.update()\n\n def apply(self, particles: GaussianParticles) -> None:\n for motion in self.motions:\n motion.apply(particles)\n\n def warm_up(self, warm_up: int, particles: GaussianParticles, background: Optional[GaussianParticles]) -> None:\n for motion in self.motions:\n motion.warm_up(warm_up, particles, background)" }, { "identifier": "GaussianParticles", "path": "src/simulator/particle.py", "snippet": "class GaussianParticles:\n \"\"\"Handle multiple gaussian particles\n\n Each particle is defined by a position, deviation and angle (mu, std, theta).\n\n mu is the 2d center of the particle.\n std is standart deviation along the axis of the ellipse.\n theta is the rotation angle from the horizontal axis.\n\n Attributes:\n size (Tuple[int, int]): Size of the image generated\n mu (torch.Tensor): Positions of the spots\n Shape: (n, 2), dtype: float32\n std (torch.Tensor): Uncorrelated stds\n Shape: (n, 2), dtype: float32\n theta (torch.Tensor): Rotation of each spot\n Shape: (n,), dtype: float32\n weight (torch.Tensor): Weight of each spot (proportional to intensity)\n Shape: (n,), dtype: float32\n \"\"\"\n\n def __init__(self, n: int, mask: torch.Tensor, min_std: float, max_std: float):\n \"\"\"Constructor\n\n Args:\n n (int): Number of particles to generate. Note that due to random masking, the true number\n of particles generated is not exactly n (and is stored in self._n)\n mask (torch.Tensor): Boolean mask where to generate particles in the image\n self.size is extracted from it.\n Shape: (H, W), dtype: bool\n min_std, max_std (float): Minimum/Maximum std. Stds are generated uniformly between these values\n\n \"\"\"\n self.size = (mask.shape[0], mask.shape[1])\n\n mask_proportion = mask.sum() / mask.numel()\n self.mu = torch.rand(int(n / mask_proportion.item()), 2) * torch.tensor(self.size)\n self.mu = self.mu[mask[self.mu[:, 0].long(), self.mu[:, 1].long()]]\n\n self._n = self.mu.shape[0]\n\n self.weight = torch.ones(self._n)\n self.std = min_std + torch.rand(self._n, 2) * (max_std - min_std)\n self.theta = torch.rand(self._n) * torch.pi\n\n self.build_distribution()\n\n def filter_close_particles(self, min_dist: float) -> None:\n \"\"\"Drop too close particles based on mahalanobis distance\n\n Args:\n min_dist (float): Minimum mahalanobis distance between two particles\n \"\"\"\n dist = _fast_mahalanobis_pdist(\n self.mu.numpy(),\n self._distribution.precision_matrix.contiguous().numpy(),\n min_dist * self.std.max().item() * 2,\n )\n valid = _fast_valid(dist, min_dist)\n\n self.mu = self.mu[valid]\n\n self._n = self.mu.shape[0]\n self.weight = self.weight[valid]\n self.std = self.std[valid]\n self.theta = self.theta[valid]\n\n self.build_distribution()\n print(f\"Filtered particles from {valid.shape[0]} to {self._n}\")\n\n def build_distribution(self):\n \"\"\"Rebuild the distributions\n\n To be called each time a modification is made to mu, std or theta\n \"\"\"\n rot = torch.empty((self._n, 2, 2), dtype=torch.float32)\n rot[:, 0, 0] = torch.cos(self.theta)\n rot[:, 0, 1] = torch.sin(self.theta)\n rot[:, 1, 0] = -rot[:, 0, 1]\n rot[:, 1, 1] = rot[:, 0, 0]\n\n sigma = torch.zeros((self._n, 2, 2), dtype=torch.float32)\n sigma[:, 0, 0] = self.std[:, 0].pow(2)\n sigma[:, 1, 1] = self.std[:, 1].pow(2)\n sigma = rot @ sigma @ rot.permute(0, 2, 1)\n sigma = (sigma + sigma.permute(0, 2, 1)) / 2 # prevent some floating error leading to non inversible matrix\n\n self._distribution = torch.distributions.MultivariateNormal(self.mu, sigma)\n # NOTE: A gmm could be created with torch.distributions.MixtureSameFamily(\n # torch.distributions.Categorical(self.weight * self.std.prod(dim=-1)),\n # self._distribution\n # )\n # This is not faster nor easier to use\n # torch.distributions.MixtureSameFamily(\n # torch.distributions.Categorical(self.weight * self.std.prod(dim=-1)),\n # self._distribution\n # )\n\n def draw_sample(self, n=20000, blur=0.0) -> torch.Tensor:\n \"\"\"Draw an image of the particles\n\n The generation starts from a black image where we add at each sample location the weights of its particle.\n A blurring process can be added to smooth the results (With smaller n).\n\n Args:\n n (int): Number of samples by particles\n blur (float): std of the blurring process\n Default: 0.0 (No blurring)\n\n Returns:\n torch.Tensor: Image of the particles\n Shape: (H, W), dtype: float32\n \"\"\"\n samples = self._distribution.sample((n,)) # type: ignore\n samples = samples.round().long().permute(1, 0, 2) # Shape: self._n, n, 2\n weight = self.weight * self.std.prod(dim=-1) # By default, smaller gaussian spots have higher intensities.\n image = _fast_draw(samples.numpy(), self.size, weight.numpy())\n if blur > 0:\n image = cv2.GaussianBlur(image, (55, 55), blur, blur)\n return torch.tensor(image) / n\n\n def draw_poisson(self, dt=100.0, scale=1) -> torch.Tensor:\n \"\"\"Draw from ground truth with Poisson Shot noise\n\n Args:\n dt (float): Integration interval\n Default: 100.0\n scale (int): Down scaling to compute true pdf\n Default: 1\n\n Returns:\n torch.Tensor: Image of the particles\n Shape: (H, W), dtype: float32\n \"\"\"\n return torch.distributions.Poisson(dt * self.draw_truth(scale)).sample((1,))[0] / dt\n\n def draw_truth(self, scale=1) -> torch.Tensor:\n \"\"\"Draw the ground truth image, where the intensities are the pdf of the mixture of gaussians\n\n I(x) = \\\\sum_i w_i N(x; \\\\mu_i, \\\\Sigma_i)\n\n Args:\n scale (int): Downscale the image to make the computation faster\n Default: 1\n\n Returns:\n torch.Tensor: Pdf of the particles\n Shape: (H // scale, W // scale), dtype: float32\n \"\"\"\n indices = torch.tensor(np.indices((self.size[0] // scale, self.size[1] // scale)))\n indices = indices.permute(1, 2, 0).to(torch.float32) * scale\n\n truth = gmm_pdf(\n indices.reshape(-1, 2), self.mu, self._distribution.precision_matrix, self.weight, self.std.max().item() * 5\n ).cpu() # Limit to 5 times the std\n\n return torch.nn.functional.interpolate(\n truth.reshape((1, 1, self.size[0] // scale, self.size[1] // scale)), size=self.size, mode=\"bilinear\"\n )[0, 0]" }, { "identifier": "GaussianParticlesConfig", "path": "src/simulator/particle.py", "snippet": "class GaussianParticlesConfig:\n n: int\n min_std: float\n max_std: float\n min_dist: float = 0" }, { "identifier": "NeuronalActivityModel", "path": "src/simulator/nam.py", "snippet": "class NeuronalActivityModel:\n \"\"\"Simple Neuronal Activity Model (nam)\n\n The neurons are supposed independent (untrue) following a simple generation process:\n i(t+1) = decay * i(t) + gain * firing\n\n We add some complexity to prevent some behaviors:\n - The gain of each firing depends on the actual value of the neurons\n (Large value, small gain to prevent reaching MAX_WEIGHT)\n - The added value is then sampled from N(gain, (gain / 5)**2)\n - The real weights retained for neurons intensity is an EMA of the computed weights\n This is done to mimic natural firings where the intensity does not jump in a single frame\n\n Attributes:\n particles (GaussianParticles): The particles to handle\n firing_rate (float): Firing rates of particles\n decay (float): Exponential decay of the weights\n immediate_weights (torch.Tensor): Weights without the EMA (Smoothing)\n\n \"\"\"\n\n MAX_WEIGHT = 2\n MIN_WEIGHT = 0.1 # Minimal baseline for particles\n FIRING_GAIN = 1.0\n SMOOTHING_DECAY = 0.75 # EMA factor to prevent hard firings\n\n def __init__(self, particles: particle.GaussianParticles, firing_rate=0.01, decay=0.95):\n self.firing_rate = firing_rate\n self.decay = decay\n self.particles = particles\n self.immediate_weight = particles.weight.clone()\n\n def update(self):\n self.immediate_weight *= self.decay\n firing = torch.rand(self.immediate_weight.shape) < self.firing_rate\n gain = self.FIRING_GAIN - self.immediate_weight[firing] * (self.FIRING_GAIN / self.MAX_WEIGHT)\n self.immediate_weight[firing] += gain + torch.randn(firing.sum()) * gain * 0.2\n\n # Update the particles weights as an EMA with a gamma of SMOOTHING_DELAY\n self.particles.weight.sub_(\n (1.0 - self.SMOOTHING_DECAY) * (self.particles.weight - self.immediate_weight - self.MIN_WEIGHT)\n )\n\n # NOTE: We could add some gaussian noise to weights" }, { "identifier": "NamConfig", "path": "src/simulator/nam.py", "snippet": "class NamConfig:\n firing_rate: float = 0.01\n decay: float = 0.95" } ]
import dataclasses import cv2 import numpy as np import torch import byotrack from typing import Optional from .motion import GlobalDriftAndRotation, GlobalMotionConfig, MotionConfig, MultipleMotion from .particle import GaussianParticles, GaussianParticlesConfig from .nam import NeuronalActivityModel, NamConfig
4,598
@dataclasses.dataclass class ImagingConfig: dt: float = 100.0 snr: float = 1.5 noise: float = 0.1 @dataclasses.dataclass class VideoConfig: path: str = "" start: int = 0 stop: int = -1 step: int = 1 transform: byotrack.VideoTransformConfig = byotrack.VideoTransformConfig() def open(self) -> Optional[byotrack.Video]: if self.path == "": return None video = byotrack.Video(self.path)[slice(self.start, self.stop, self.step)] video.set_transform(self.transform) return video @dataclasses.dataclass class SimulatorConfig: base_video: VideoConfig imaging_config: ImagingConfig particle: GaussianParticlesConfig background: GaussianParticlesConfig nam: NamConfig motion: MotionConfig global_motion: GlobalMotionConfig warm_up: int = 500 def random_mask(size=1024, verbose=False) -> torch.Tensor: """Generate a random ellipse mask roughly centered in the middle""" thresh = 1 / np.sqrt(2 * torch.pi) ** 2 * np.exp(-1 / 2) # Thresh at 1 sigma (1 mahalanohobis) mask_area = torch.rand(1).item() * 0.15 + 0.2 # [0.2, 0.35] ratio = 0.5 + 1.5 * torch.rand(1).item() # [0.5, 2] mean = size / 2 + torch.randn(2) * size / 60 std = torch.tensor([mask_area * ratio / torch.pi, mask_area / ratio / torch.pi]).sqrt() * size distribution = torch.distributions.Normal(mean, std) indices = torch.tensor(np.indices((size, size)), dtype=torch.float32).permute(1, 2, 0) prob = distribution.log_prob(indices).sum(dim=2).exp() * std.prod() mask = prob > thresh if verbose: print(mask.sum() / mask.numel(), mask_area) return mask def mask_from_frame(frame: np.ndarray) -> torch.Tensor: """Find the animal mask using a simple thresholding""" # First blur the image frame = cv2.GaussianBlur(frame, (35, 35), 10, 10) # Set the threshold using the inflexion point of the hist # threshold = np.quantile(frame, 0.8) # Limit the search to threshold resulting in 10 to 40% of the image mini = int((np.quantile(frame, 0.6) * 100).round()) maxi = int((np.quantile(frame, 0.9) * 100).round()) bins = np.array([k / 100 for k in range(101)]) hist, _ = np.histogram(frame.ravel(), bins=bins) # Smoothing of the histogram before inflexion extraction cumsum = np.cumsum(hist) cumsum_pad = np.pad(cumsum, 10 // 2, mode="edge") cumsum_smooth = np.convolve(cumsum_pad, np.ones(10) / 10, mode="valid") argmax = np.gradient(np.gradient(np.gradient(cumsum_smooth)))[mini : maxi + 1].argmax() + mini threshold = bins[argmax + 1] return torch.tensor(frame > threshold) class Simulator: """Simulator object Handle the image generation and temporal evolution. """ def __init__( self,
@dataclasses.dataclass class ImagingConfig: dt: float = 100.0 snr: float = 1.5 noise: float = 0.1 @dataclasses.dataclass class VideoConfig: path: str = "" start: int = 0 stop: int = -1 step: int = 1 transform: byotrack.VideoTransformConfig = byotrack.VideoTransformConfig() def open(self) -> Optional[byotrack.Video]: if self.path == "": return None video = byotrack.Video(self.path)[slice(self.start, self.stop, self.step)] video.set_transform(self.transform) return video @dataclasses.dataclass class SimulatorConfig: base_video: VideoConfig imaging_config: ImagingConfig particle: GaussianParticlesConfig background: GaussianParticlesConfig nam: NamConfig motion: MotionConfig global_motion: GlobalMotionConfig warm_up: int = 500 def random_mask(size=1024, verbose=False) -> torch.Tensor: """Generate a random ellipse mask roughly centered in the middle""" thresh = 1 / np.sqrt(2 * torch.pi) ** 2 * np.exp(-1 / 2) # Thresh at 1 sigma (1 mahalanohobis) mask_area = torch.rand(1).item() * 0.15 + 0.2 # [0.2, 0.35] ratio = 0.5 + 1.5 * torch.rand(1).item() # [0.5, 2] mean = size / 2 + torch.randn(2) * size / 60 std = torch.tensor([mask_area * ratio / torch.pi, mask_area / ratio / torch.pi]).sqrt() * size distribution = torch.distributions.Normal(mean, std) indices = torch.tensor(np.indices((size, size)), dtype=torch.float32).permute(1, 2, 0) prob = distribution.log_prob(indices).sum(dim=2).exp() * std.prod() mask = prob > thresh if verbose: print(mask.sum() / mask.numel(), mask_area) return mask def mask_from_frame(frame: np.ndarray) -> torch.Tensor: """Find the animal mask using a simple thresholding""" # First blur the image frame = cv2.GaussianBlur(frame, (35, 35), 10, 10) # Set the threshold using the inflexion point of the hist # threshold = np.quantile(frame, 0.8) # Limit the search to threshold resulting in 10 to 40% of the image mini = int((np.quantile(frame, 0.6) * 100).round()) maxi = int((np.quantile(frame, 0.9) * 100).round()) bins = np.array([k / 100 for k in range(101)]) hist, _ = np.histogram(frame.ravel(), bins=bins) # Smoothing of the histogram before inflexion extraction cumsum = np.cumsum(hist) cumsum_pad = np.pad(cumsum, 10 // 2, mode="edge") cumsum_smooth = np.convolve(cumsum_pad, np.ones(10) / 10, mode="valid") argmax = np.gradient(np.gradient(np.gradient(cumsum_smooth)))[mini : maxi + 1].argmax() + mini threshold = bins[argmax + 1] return torch.tensor(frame > threshold) class Simulator: """Simulator object Handle the image generation and temporal evolution. """ def __init__( self,
particles: GaussianParticles,
4
2023-11-10 10:18:39+00:00
8k
quantuminterface/qiclib
src/qiclib/code/qi_prog_builder.py
[ { "identifier": "QiCommandVisitor", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiCommandVisitor(abc.ABC):\n def visit_cell_command(self, cell_cmd):\n pass\n\n def visit_context_manager(self, context_manager):\n pass\n\n def visit_if(self, if_cm):\n pass\n\n def visit_parallel(self, parallel_cm):\n self.visit_context_manager(parallel_cm)\n\n def visit_for_range(self, for_range_cm):\n self.visit_context_manager(for_range_cm)\n\n def visit_variable_command(self, variable_cmd):\n pass\n\n def visit_assign_command(self, assign_cmd):\n self.visit_variable_command(assign_cmd)\n\n def visit_declare_command(self, declare_cmd):\n self.visit_variable_command(declare_cmd)\n\n def visit_sync_command(self, sync_cmd):\n pass\n\n def visit_asm_command(self, asm_cmd):\n pass\n\n def visit_mem_store_command(self, store_cmd):\n pass" }, { "identifier": "QiFindVarCmds", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiFindVarCmds(QiCommandVisitor):\n \"\"\"Used to find Pulse and Wait commands containing given QiTimeVariable\"\"\"\n\n def __init__(self, var) -> None:\n self.requested_var = var\n self.found_cmds: List[QiCommand] = []\n self.calc_in_wait = False\n\n def visit_cell_command(self, cell_cmd):\n \"\"\"Add commands if the use QiTimeVariable. If variable is used in calculation in wait, it is registered in calc_in_wait\"\"\"\n from .qi_jobs import _cQiPlay_base, cQiWait\n from .qi_var_definitions import _QiVariableBase\n\n if (\n isinstance(cell_cmd, _cQiPlay_base)\n and self.requested_var in cell_cmd._associated_variable_set\n ) or (\n isinstance(cell_cmd, cQiWait)\n and isinstance(cell_cmd.length, _QiVariableBase)\n and cell_cmd.length.id == self.requested_var.id\n ):\n self.found_cmds.append(cell_cmd)\n elif (\n isinstance(cell_cmd, cQiWait)\n and self.requested_var in cell_cmd._associated_variable_set\n ):\n self.found_cmds.append(cell_cmd)\n self.calc_in_wait = True\n\n def visit_context_manager(self, context_manager):\n \"\"\"Search for variable commands in context manager's bodies\"\"\"\n for command in context_manager.body:\n command.accept(self)\n\n def visit_if(self, if_cm):\n \"\"\"Search for variable commands in context manager's bodies\"\"\"\n for command in if_cm.body:\n command.accept(self)\n\n for command in if_cm._else_body:\n command.accept(self)\n\n def visit_parallel(self, parallel_cm):\n \"\"\"Avoid multiple additions to list, if multiple commands use self.requested_var\"\"\"\n for command in parallel_cm.body:\n if self.requested_var in command._associated_variable_set:\n self.found_cmds.append(command)\n return\n\n def visit_variable_command(self, variable_cmd):\n pass\n\n def visit_sync_command(self, sync_cmd):\n pass" }, { "identifier": "QiCMContainedCellVisitor", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiCMContainedCellVisitor(QiCommandVisitor):\n \"\"\"Visitor to check which cells are used inside context managers.\"\"\"\n\n def __init__(self) -> None:\n self.contained_cells: Set[QiCell] = set()\n\n def visit_cell_command(self, cell_cmd):\n self.contained_cells.update(cell_cmd._relevant_cells)\n\n def visit_context_manager(self, context_manager):\n visitor = QiCMContainedCellVisitor()\n for item in context_manager.body:\n item.accept(visitor)\n\n context_manager._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_if(self, if_cm):\n visitor = QiCMContainedCellVisitor()\n for command in if_cm.body:\n command.accept(visitor)\n\n for command in if_cm._else_body:\n command.accept(visitor)\n\n if_cm._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_parallel(self, parallel_cm):\n visitor = QiCMContainedCellVisitor()\n for cmd_list in parallel_cm.entries:\n for cmd in cmd_list:\n cmd.accept(visitor)\n\n parallel_cm._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_variable_command(self, variable_cmd):\n self.contained_cells.update(variable_cmd._relevant_cells)\n\n def visit_sync_command(self, sync_cmd):\n self.contained_cells.update(sync_cmd._relevant_cells)\n\n def visit_asm_command(self, asm_cmd):\n self.contained_cells.update(asm_cmd._relevant_cells)\n\n def visit_mem_store_command(self, store_cmd):\n self.contained_cells.update(store_cmd._relevant_cells)" }, { "identifier": "QiCmdVariableInspection", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiCmdVariableInspection(QiCommandVisitor):\n \"\"\"Visits QiCommands and assigns cell to a QiVariable, if the variable is used for the cells program execution\n\n QiCMContainedCellVisitor needs to be run beforehand\"\"\"\n\n def visit_cell_command(self, cell_cmd):\n for variable in cell_cmd._associated_variable_set:\n for cell in cell_cmd._relevant_cells:\n self._add_cell_to_var(cell, variable)\n\n def visit_context_manager(self, context_manager):\n # TODO does Else need to have the same relevant cells as If?\n for command in reversed(context_manager.body):\n command.accept(self)\n\n for variable in context_manager._associated_variable_set:\n for cell in context_manager._relevant_cells:\n self._add_cell_to_var(cell, variable)\n\n def visit_if(self, if_cm):\n # TODO does Else need to have the same relevant cells as If?\n for command in reversed(if_cm.body):\n command.accept(self)\n\n for command in reversed(if_cm._else_body):\n command.accept(self)\n\n for variable in if_cm._associated_variable_set:\n for cell in if_cm._relevant_cells:\n self._add_cell_to_var(cell, variable)\n\n def visit_parallel(self, parallel_cm):\n for cmd_list in parallel_cm.entries:\n for cmd in reversed(cmd_list):\n cmd.accept(self)\n\n def visit_variable_command(self, variable_cmd):\n self.visit_cell_command(variable_cmd)\n\n def visit_assign_command(self, assign_cmd):\n \"\"\"assign_cmd.var is the destination variable. For every relevant cell of the variable, which were defined\n beforehand, the assign command is also relevant for the same cell.\n\n Variables that are needed to calculate assign_cmd.var are contained in assign_cmd._associated_variable_set. For every\n one of these associated variables they must at least have the same relevant cells as assign_cmd.var, therefore\n they are added here\"\"\"\n\n for cell in assign_cmd.var._relevant_cells:\n assign_cmd._relevant_cells.add(cell)\n\n for variable in assign_cmd._associated_variable_set:\n self._add_cell_to_var(cell, variable)\n\n def visit_declare_command(self, declare_cmd):\n for cell in declare_cmd.var._relevant_cells:\n declare_cmd._relevant_cells.add(cell)\n\n def visit_sync_command(self, sync_cmd):\n pass\n\n def _add_cell_to_var(self, cell, var):\n var._relevant_cells.add(cell)\n cell.add_variable(var)" }, { "identifier": "_get_for_range_iterations", "path": "src/qiclib/code/qi_util.py", "snippet": "def _get_for_range_iterations(start, end, step):\n \"\"\"Returns number of iterations of ForRange or None if start or end are QiVariables.\n Stupid but no need to check validity of input, in case of unrolled loop\"\"\"\n from .qi_var_definitions import _QiVariableBase, _QiConstValue, QiCellProperty\n\n if (\n isinstance(start, _QiVariableBase)\n or start is None\n or isinstance(end, _QiVariableBase)\n or end is None\n ):\n return None\n\n if isinstance(start, (_QiConstValue, QiCellProperty)):\n start = start.value\n if isinstance(end, (_QiConstValue, QiCellProperty)):\n end = end.value\n if isinstance(step, (_QiConstValue, QiCellProperty)):\n step = step.value\n\n iterations = 0\n for _ in range(start, end, step):\n iterations += 1\n return iterations" }, { "identifier": "_get_for_range_end_value", "path": "src/qiclib/code/qi_util.py", "snippet": "def _get_for_range_end_value(start, end, step):\n \"\"\"Returns end value of ForRange or None if start or end are QiVariables.\n Stupid but no need to check validity of input, in case of unrolled loop\"\"\"\n from .qi_var_definitions import _QiVariableBase, _QiConstValue, QiCellProperty\n\n if (\n isinstance(start, _QiVariableBase)\n or start is None\n or isinstance(end, _QiVariableBase)\n or end is None\n ):\n return None\n\n if isinstance(start, (_QiConstValue, QiCellProperty)):\n start = start.value\n if isinstance(end, (_QiConstValue, QiCellProperty)):\n end = end.value\n if isinstance(step, (_QiConstValue, QiCellProperty)):\n step = step.value\n\n end_val = start\n for _ in range(start, end, step):\n end_val += step\n return end_val" } ]
import copy import qiclib.packages.utility as util from typing import List, Any, Dict, Union, Tuple, TYPE_CHECKING from qiclib.code.qi_seq_instructions import SeqCellSync from qiclib.code.qi_var_definitions import ( _QiConstValue, QiCellProperty, QiExpression, QiType, _QiVariableBase, ) from .qi_visitor import ( QiCommandVisitor, QiFindVarCmds, QiCMContainedCellVisitor, QiCmdVariableInspection, ) from .qi_util import _get_for_range_iterations, _get_for_range_end_value from .qi_jobs import QiCommand from .qi_jobs import _cQiPlay_base, cQiWait, cQiPlayReadout from .qi_var_definitions import _QiVariableBase from .qi_jobs import _cQiPlay_base, cQiPlayReadout, cQiRecording from .qi_sequencer import Sequencer from .qi_jobs import QiCell from .qi_jobs import QiCell from .qi_jobs import ( cQiWait, cQiPlay, cQiPlayReadout, cQiRotateFrame, cQiRecording, ) from .qi_sequencer import _ProgramCycles from .qi_jobs import ( cQiPlay, cQiPlayReadout, cQiRotateFrame, cQiRecording, cQiWait, ) from .qi_sequencer import Sequencer, _ProgramCycles from .qi_var_definitions import _QiVariableBase from .qi_jobs import cQiWait from .qi_sequencer import _ProgramCycles from .qi_var_definitions import _QiVariableBase from .qi_jobs import cQiWait, _cQiPlay_base from .qi_sequencer import _ProgramCycles from .qi_var_definitions import _QiVariableBase from .qi_var_definitions import _QiVariableBase from .qi_var_definitions import _QiVariableBase from .qi_var_definitions import QiOp, _QiVariableBase from .qi_sequencer import _ProgramCycles from .qi_var_definitions import _QiVariableBase from .qi_sequencer import _ProgramCycles from .qi_var_definitions import QiOp from .qi_sequencer import _ProgramCycles from .qi_sequencer import _ProgramCycles from .qi_var_definitions import _QiVariableBase from .qi_jobs import _QiVariableBase, _QiCalcBase from .qi_sequencer import _ProgramCycles from .qi_sequencer import Sequencer
5,130
relevant_cells, _ProgramCycles.SyncPoint( for_range, _ProgramCycles.SyncPointType.AFTER_FOR_RANGE_ITERATION ), ) return if find_var_visitor.calc_in_wait: self.force_sync( relevant_cells, _ProgramCycles.SyncPoint( for_range, _ProgramCycles.SyncPointType.AFTER_FOR_RANGE_ITERATION ), ) return if isinstance(start_val, _QiVariableBase) or start_val is None: self.force_sync( relevant_cells, _ProgramCycles.SyncPoint( for_range, _ProgramCycles.SyncPointType.AFTER_FOR_RANGE_ITERATION ), ) return if isinstance(start_val, (_QiConstValue, QiCellProperty)): start_val = start_val.value prog_lengths: List[int] = [] wait_cmds = {} for cell in relevant_cells: wait_cmds[cell] = [ cmd for cmd in find_var_visitor.found_cmds if cell in cmd._relevant_cells ] prog_lengths.append( self.cell_seq[cell].prog_cycles - (start_val * len(wait_cmds[cell])) ) # subtract already added variable waits # negative prog_lengths imply that a self.cell_seq[cell].prog_cycles were invalid. if any(x < 0 for x in prog_lengths): self.force_sync( relevant_cells, _ProgramCycles.SyncPoint( for_range, _ProgramCycles.SyncPointType.AFTER_FOR_RANGE_ITERATION ), ) return longest = max(prog_lengths) cycles_without_waits = self.cell_seq[cell].prog_cycles - ( start_val * len(wait_cmds[cell]) ) for cell in relevant_cells: if cycles_without_waits < longest: # sync non variable cycles self.cell_seq[cell]._wait_cycles(longest - cycles_without_waits) most_waits = 0 for cell in relevant_cells: most_waits = max(len(wait_cmds[cell]), most_waits) waits = len(wait_cmds[cell]) for _ in range(waits, most_waits): self.cell_seq[cell].add_wait_cmd( cQiWait(None, for_range.var) ) # add missing waits, no multiplication to avoid overflows def update_cycles_after_for_range(self, for_range, start_val, program_cycles_start): """First iteration of loop was already added to sequencer; so every variable wait already used start_val cycles. If variable start/end are used, sets _prog_cycles to False.""" relevant_cells = self.get_relevant_cells(for_range) end_val = self.get_for_range_val(for_range.end, relevant_cells) if ( isinstance(for_range.start, _QiVariableBase) or isinstance(for_range.end, _QiVariableBase) or start_val is None or end_val is None ): for cell in relevant_cells: self.cell_seq[cell]._prog_cycles.valid = False return if isinstance(start_val, (_QiConstValue, QiCellProperty)): start_val = start_val.value assert isinstance(start_val, (int, _QiVariableBase)) find_var_visitor = QiFindVarCmds(for_range.var) for cmd in for_range.body: cmd.accept(find_var_visitor) wait_cmds = {} play_cmds = {} for cell in relevant_cells: wait_cmds[cell] = [ cmd for cmd in find_var_visitor.found_cmds if cell in cmd._relevant_cells and isinstance(cmd, cQiWait) ] play_cmds[cell] = [ cmd for cmd in find_var_visitor.found_cmds if cell in cmd._relevant_cells and isinstance(cmd, _cQiPlay_base) ] for cell in relevant_cells: if self.cell_seq[cell].prog_cycles is _ProgramCycles.INVALID: continue if len(find_var_visitor.found_cmds) == 0: self.cell_seq[cell].prog_cycles += ( self.cell_seq[cell].prog_cycles - program_cycles_start[cell] ) * (
# Copyright © 2017-2023 Quantum Interface ([email protected]) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This module contains the higher level parts of the code generation logic. The entry point is in `QiProgramBuilder.build_program` which uses the `ProgramBuilderVisitor`. `ProgramBuilderVisitor` recursively visits every `QiJob` command and generates its corresponding RISC-V assembly sequentially. """ if TYPE_CHECKING: class QiCmdExcludeVar(QiCommandVisitor): """Generates command list excluding cell- and variable-commands which implement variables from ignore_list. Generates new context managers with updated bodies""" def __init__(self, ignore_list: List[Any]) -> None: self.ignore_list = ignore_list self.commands: List[QiCommand] = [] def visit_cell_command(self, cell_cmd): for variable in self.ignore_list: if ( isinstance(cell_cmd, _cQiPlay_base) and variable in cell_cmd._associated_variable_set ): if ( isinstance(cell_cmd, cQiPlayReadout) and cell_cmd.recording is not None ): self.commands.append(cell_cmd.recording) return elif ( isinstance(cell_cmd, cQiWait) and isinstance(cell_cmd.length, _QiVariableBase) and cell_cmd.length.id == variable.id ): # If cQiWait.length is QiCalc, append command return self.commands.append(cell_cmd) def visit_context_manager(self, context_manager): exclude_var = QiCmdExcludeVar(self.ignore_list) for command in context_manager.body: command.accept(exclude_var) if len(exclude_var.commands) == 0: return new_cm = copy.copy(context_manager) new_cm._relevant_cells.update(context_manager._relevant_cells) new_cm.body = exclude_var.commands self.commands.append(new_cm) def visit_if(self, if_cm): """Searches If/Else for commands containing variables defined in self.ignore_list. Creates new bodies and checks their size, so no empty ifs are returned.""" exclude_if_body = QiCmdExcludeVar(self.ignore_list) for command in if_cm.body: command.accept(exclude_if_body) new_if = copy.copy(if_cm) new_if.body = exclude_if_body.commands if if_cm.is_followed_by_else(): exclude_else_body = QiCmdExcludeVar(self.ignore_list) for command in if_cm._else_body: command.accept(exclude_else_body) if len(exclude_else_body.commands) > 0: new_if._else_body = exclude_else_body.commands if len(new_if.body) != 0 or len(new_if._else_body) != 0: self.commands.append(new_if) def visit_parallel(self, parallel_cm): new_parallel = copy.copy(parallel_cm) new_parallel.parallel = [] for cmd_list in parallel_cm.entries: exclude_var = QiCmdExcludeVar(self.ignore_list) for cmd in cmd_list: cmd.accept(exclude_var) if len(exclude_var.commands) > 0: new_parallel.parallel.append(exclude_var.commands) if len(new_parallel.parallel) > 0: self.commands.append(new_parallel) def visit_variable_command(self, variable_cmd): for variable in self.ignore_list: if variable.id == variable_cmd.var.id: return self.commands.append(variable_cmd) def visit_sync_command(self, sync_cmd): self.commands.append(sync_cmd) def visit_mem_store_command(self, store_cmd): self.commands.append(store_cmd) class QiCmdReplaceTriggerVar(QiCommandVisitor): """Generates command list, replacing trigger commands utilizing var with a trigger command of length 1. Generates new context managers with updated bodies.""" def __init__(self, replace_var) -> None: self.replace_var = replace_var self.commands: List[QiCommand] = [] def visit_cell_command(self, cell_cmd): if ( isinstance(cell_cmd, _cQiPlay_base) and self.replace_var in cell_cmd._associated_variable_set ): new_cmd = copy.copy(cell_cmd) new_cmd._relevant_cells.update(cell_cmd._relevant_cells) if isinstance(cell_cmd, cQiPlayReadout) and isinstance( cell_cmd.recording, cQiRecording ): new_cmd.recording = copy.copy(cell_cmd.recording) new_cmd.length = util.conv_cycles_to_time( 1 ) # set length to 1 for possible Parallel sequence generation new_cmd._var_single_cycle_trigger = True self.commands.append(new_cmd) return self.commands.append(cell_cmd) def visit_context_manager(self, context_manager): replace_var = QiCmdReplaceTriggerVar(self.replace_var) for command in context_manager.body: command.accept(replace_var) new_cm = copy.copy(context_manager) new_cm._relevant_cells.update(context_manager._relevant_cells) new_cm.body = replace_var.commands self.commands.append(new_cm) def visit_if(self, if_cm): """Searches If/Else for commands containing variable defined in self.replace_var.""" replace_var_if = QiCmdReplaceTriggerVar(self.replace_var) for command in if_cm.body: command.accept(replace_var_if) new_if = copy.copy(if_cm) new_if.body = replace_var_if.commands if if_cm.is_followed_by_else(): replace_var_else = QiCmdReplaceTriggerVar(self.replace_var) for command in if_cm._else_body: command.accept(replace_var_else) new_if._else_body = replace_var_else.commands self.commands.append(new_if) def visit_parallel(self, parallel_cm): new_parallel = copy.copy(parallel_cm) new_parallel.parallel = [] for cmd_list in parallel_cm.parallel: replace_var = QiCmdReplaceTriggerVar(self.replace_var) for cmd in cmd_list: cmd.accept(replace_var) new_parallel.parallel.append(replace_var.commands) if len(new_parallel.parallel) > 0: self.commands.append(new_parallel) def visit_variable_command(self, variable_cmd): self.commands.append(variable_cmd) def visit_sync_command(self, sync_cmd): self.commands.append(sync_cmd) def visit_mem_store_command(self, store_cmd): self.commands.append(store_cmd) class ProgramBuilderVisitor(QiCommandVisitor): def __init__(self, cell_seq, job_cell_to_digital_unit_cell_map) -> None: self.cell_seq: Dict[QiCell, Sequencer] = cell_seq self.if_depth: int = 0 # Used to check if currently processing commands inside If-Context-Manager self.for_range_end_val_list: List[ Tuple[_QiVariableBase, Union[QiExpression, int]] ] = [] self.job_cell_to_digital_unit_cell_map = job_cell_to_digital_unit_cell_map def get_relevant_cells(self, cmd): """Generates a list of releveant cells from the cells registered to the builder and the command cmd""" return [cell for cell in self.cell_seq.keys() if cell in cmd._relevant_cells] def end_of_body(self, relevant_cells): """End of body --> stop potentially running pulses""" for cell in self.cell_seq.keys(): if cell in relevant_cells: self.cell_seq[cell].end_of_command_body() def build_element_body(self, body, relevant_cells): """Function used to build commands from body. end_of_body() is called afterwards to end possibly ongoing pulses""" for cmd in body: cmd.accept(self) self.end_of_body(relevant_cells) def force_sync(self, relevant_cells, sync_point): """Forces the given cells to synchronize by inserting a SeqCellSync instruction.""" digital_unit_cell_indices = list( map( lambda cell: self.job_cell_to_digital_unit_cell_map[cell.cellID], relevant_cells, ) ) for cell in relevant_cells: cell_sequencer = self.cell_seq[cell] cell_sequencer.add_instruction_to_list( SeqCellSync(digital_unit_cell_indices) ) cell_sequencer._prog_cycles.set_synchronized(sync_point) def cells_implicitly_synchronizable(self, cells): all_valid = all(self.cell_seq[cell].prog_cycles >= 0 for cell in cells) def all_share_syncpoint(): return ( len( set( map( lambda cell: self.cell_seq[ cell ]._prog_cycles.last_sync_point, cells, ) ) ) == 1 ) return all_valid and all_share_syncpoint() def sync_cells(self, relevant_cells, sync_point): """ Synchronizes given cells, implicitly if possible, explicitly otherwise. If implicit synch is possible, it evaluates the current programm lengths of sequencers of relevant_cells. If valid prog_lengths are found adds Wait commands at sequencers with shorter programs. """ relevant_cells: List[QiCell] = relevant_cells if len(relevant_cells) <= 1: return if not self.cells_implicitly_synchronizable(relevant_cells): self.force_sync(relevant_cells, sync_point) else: prog_lengths: List[int] = list( map(lambda cell: self.cell_seq[cell].prog_cycles, relevant_cells) ) longest = max(prog_lengths) for cell in relevant_cells: if self.cell_seq[cell].prog_cycles < longest: self.cell_seq[cell]._wait_cycles( longest - self.cell_seq[cell].prog_cycles ) def _unroll_loop_0(self, for_range, static_unroll=False): """Function used for unrolling ForRange with variable value 0. A new program body is built from ForRange.body excluding wait and pulse commands using solely ForRange.var. The new program body is then added to the sequencer.""" exclude_var = QiCmdExcludeVar([for_range.var]) for cmd in for_range.body: cmd.accept(exclude_var) if len(exclude_var.commands) == 0: return relevant_cells = self.get_relevant_cells(for_range) for cell in relevant_cells: if static_unroll is True: # set register value to 0 in case it had different values before --> important for possible use in conditions self.cell_seq[cell].set_variable_value(for_range.var, 0) # register one cycle of ForRange, actual start/end/step values not relevant self.cell_seq[cell].register_for_range( for_range.var, 0, for_range.step.value, for_range.step.value ) self.build_element_body(exclude_var.commands, for_range._relevant_cells) for cell in relevant_cells: self.cell_seq[cell].exit_for_range() def _unroll_loop_1(self, for_range): """Function used for unrolling ForRange with variable value 1. A new program body is built from ForRange.body, replacing pulse commands using ForRange.var with trigger commands with length 1. The new program body is then added to the sequencer.""" replace_var = QiCmdReplaceTriggerVar(for_range.var) for cmd in for_range.body: cmd.accept(replace_var) relevant_cells = self.get_relevant_cells(for_range) for cell in relevant_cells: self.cell_seq[cell].register_for_range( for_range.var, 1, 1 + for_range.step.value, for_range.step.value ) # register one cycle of ForRange, actual start/end/step values not relevant self.build_element_body(replace_var.commands, for_range._relevant_cells) for cell in relevant_cells: self.cell_seq[cell].exit_for_range() def visit_cell_command(self, cell_cmd): relevant_cells = self.get_relevant_cells(cell_cmd) for cell in relevant_cells: if isinstance(cell_cmd, cQiWait): # Ignore Wait command if it is of length less than a cycle. length = cell_cmd.length if ( isinstance(length, (int, float)) and util.conv_time_to_cycles(length) == 0 ): return self.cell_seq[cell].add_wait_cmd(cell_cmd) elif isinstance(cell_cmd, (cQiPlay, cQiRotateFrame)): self.cell_seq[cell].add_trigger_cmd( manipulation=cell_cmd, var_single_cycle=cell_cmd._var_single_cycle_trigger, ) elif isinstance(cell_cmd, cQiPlayReadout): # cell_cmd.combine_recording is either None or cQiRecording self.cell_seq[cell].add_trigger_cmd( readout=cell_cmd, recording=cell_cmd.recording, var_single_cycle=cell_cmd._var_single_cycle_trigger, ) elif isinstance(cell_cmd, cQiRecording): self.cell_seq[cell].add_trigger_cmd(recording=cell_cmd) def visit_context_manager(self, context_manager): """Context managers are evaluated in respective visit""" def visit_if(self, if_cm): """Visits If command and builds sequencer instructions. Tries synchronizing if multiple cells are used.""" jump_over_if = {} program_counters = {} relevant_cells = self.get_relevant_cells(if_cm) self.sync_cells(relevant_cells, _ProgramCycles.SyncPoint(if_cm)) self.if_depth += 1 for cell in relevant_cells: jump_over_if[cell] = self.cell_seq[cell].add_if_condition(if_cm.condition) # conditional branching makes implicit sync by wait impossible self.cell_seq[cell]._prog_cycles.valid = False program_counters[cell] = self.cell_seq[cell].get_prog_size() - 1 self.build_element_body(if_cm.body, if_cm._relevant_cells) if if_cm.is_followed_by_else(): jump_over_else = {} for cell in relevant_cells: # add jump after if-body to jump over else-body jump_over_else[cell] = self.cell_seq[cell].add_jump() jump_over_if[cell].set_jump_value( self.cell_seq[cell].get_prog_size() - program_counters[cell] ) program_counters[cell] = self.cell_seq[cell].get_prog_size() - 1 self.build_element_body(if_cm._else_body, if_cm._relevant_cells) for cell in relevant_cells: jump_over_else[cell].jump_val = ( self.cell_seq[cell].get_prog_size() - program_counters[cell] ) else: for cell in relevant_cells: jump_over_if[cell].set_jump_value( self.cell_seq[cell].get_prog_size() - program_counters[cell] ) self.if_depth -= 1 def visit_parallel(self, parallel_cm): """Visits Parallel command and builds sequencer command. Searches for manipulation, readout and recording pulses inside body and summarizes them in one trigger command. """ relevant_cells = self.get_relevant_cells(parallel_cm) self.sync_cells(relevant_cells, _ProgramCycles.SyncPoint(parallel_cm)) for cell in relevant_cells: time_slots = parallel_cm._generate_command_body(cell, self.cell_seq[cell]) for time_slot in time_slots: if isinstance(time_slot.cmd_tuples[0].cmd, cQiWait): self.cell_seq[cell].add_wait_cmd(cQiWait(cell, time_slot.duration)) else: manipulation = None readout = None recording = None for cmd_tuple in time_slot.cmd_tuples: trigger_cmd = copy.copy(cmd_tuple.cmd) trigger_cmd.length = time_slot.duration if isinstance(cmd_tuple.cmd, (cQiPlay, cQiRotateFrame)): if cmd_tuple.choke_cmd is True: trigger_cmd.trigger_index = Sequencer.CHOKE_PULSE_INDEX manipulation = trigger_cmd elif isinstance(cmd_tuple.cmd, cQiPlayReadout): if cmd_tuple.choke_cmd is True: trigger_cmd.trigger_index = Sequencer.CHOKE_PULSE_INDEX trigger_cmd.recording = None readout = trigger_cmd recording = trigger_cmd.recording elif isinstance(cmd_tuple.cmd, cQiRecording): recording = trigger_cmd self.cell_seq[cell].add_trigger_cmd( manipulation=manipulation, readout=readout, recording=recording, recording_delay=False, ) def try_sync_for_range(self, for_range, start_val: "QiExpression"): """If multiple cells are used inside a ForRange context manager the program tries to sync cells before restarting the loop. If the ForRange does not use its variable for Pulses or waits a normal sync is used. """ relevant_cells = self.get_relevant_cells(for_range) if len(relevant_cells) == 1: return find_var_visitor = QiFindVarCmds(for_range.var) for cmd in for_range.body: cmd.accept(find_var_visitor) if len(find_var_visitor.found_cmds) == 0: self.sync_cells( relevant_cells, _ProgramCycles.SyncPoint( for_range, _ProgramCycles.SyncPointType.AFTER_FOR_RANGE_ITERATION ), ) return if find_var_visitor.calc_in_wait: self.force_sync( relevant_cells, _ProgramCycles.SyncPoint( for_range, _ProgramCycles.SyncPointType.AFTER_FOR_RANGE_ITERATION ), ) return if isinstance(start_val, _QiVariableBase) or start_val is None: self.force_sync( relevant_cells, _ProgramCycles.SyncPoint( for_range, _ProgramCycles.SyncPointType.AFTER_FOR_RANGE_ITERATION ), ) return if isinstance(start_val, (_QiConstValue, QiCellProperty)): start_val = start_val.value prog_lengths: List[int] = [] wait_cmds = {} for cell in relevant_cells: wait_cmds[cell] = [ cmd for cmd in find_var_visitor.found_cmds if cell in cmd._relevant_cells ] prog_lengths.append( self.cell_seq[cell].prog_cycles - (start_val * len(wait_cmds[cell])) ) # subtract already added variable waits # negative prog_lengths imply that a self.cell_seq[cell].prog_cycles were invalid. if any(x < 0 for x in prog_lengths): self.force_sync( relevant_cells, _ProgramCycles.SyncPoint( for_range, _ProgramCycles.SyncPointType.AFTER_FOR_RANGE_ITERATION ), ) return longest = max(prog_lengths) cycles_without_waits = self.cell_seq[cell].prog_cycles - ( start_val * len(wait_cmds[cell]) ) for cell in relevant_cells: if cycles_without_waits < longest: # sync non variable cycles self.cell_seq[cell]._wait_cycles(longest - cycles_without_waits) most_waits = 0 for cell in relevant_cells: most_waits = max(len(wait_cmds[cell]), most_waits) waits = len(wait_cmds[cell]) for _ in range(waits, most_waits): self.cell_seq[cell].add_wait_cmd( cQiWait(None, for_range.var) ) # add missing waits, no multiplication to avoid overflows def update_cycles_after_for_range(self, for_range, start_val, program_cycles_start): """First iteration of loop was already added to sequencer; so every variable wait already used start_val cycles. If variable start/end are used, sets _prog_cycles to False.""" relevant_cells = self.get_relevant_cells(for_range) end_val = self.get_for_range_val(for_range.end, relevant_cells) if ( isinstance(for_range.start, _QiVariableBase) or isinstance(for_range.end, _QiVariableBase) or start_val is None or end_val is None ): for cell in relevant_cells: self.cell_seq[cell]._prog_cycles.valid = False return if isinstance(start_val, (_QiConstValue, QiCellProperty)): start_val = start_val.value assert isinstance(start_val, (int, _QiVariableBase)) find_var_visitor = QiFindVarCmds(for_range.var) for cmd in for_range.body: cmd.accept(find_var_visitor) wait_cmds = {} play_cmds = {} for cell in relevant_cells: wait_cmds[cell] = [ cmd for cmd in find_var_visitor.found_cmds if cell in cmd._relevant_cells and isinstance(cmd, cQiWait) ] play_cmds[cell] = [ cmd for cmd in find_var_visitor.found_cmds if cell in cmd._relevant_cells and isinstance(cmd, _cQiPlay_base) ] for cell in relevant_cells: if self.cell_seq[cell].prog_cycles is _ProgramCycles.INVALID: continue if len(find_var_visitor.found_cmds) == 0: self.cell_seq[cell].prog_cycles += ( self.cell_seq[cell].prog_cycles - program_cycles_start[cell] ) * (
_get_for_range_iterations(start_val, end_val, for_range.step.value)
4
2023-11-10 10:26:10+00:00
8k
jpcadena/fastapi-boilerplate
app/services/infrastructure/auth.py
[ { "identifier": "get_auth_settings", "path": "app/config/config.py", "snippet": "@lru_cache()\ndef get_auth_settings() -> AuthSettings:\n \"\"\"\n Get auth settings cached\n :return: Auth settings instance\n :rtype: AuthSettings\n \"\"\"\n return AuthSettings()" }, { "identifier": "AuthSettings", "path": "app/config/db/auth_settings.py", "snippet": "class AuthSettings(BaseSettings):\n \"\"\"\n Settings class for authentication using JWT and Redis\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n MAX_REQUESTS: PositiveInt = 30\n RATE_LIMIT_DURATION: PositiveInt = 60\n BLACKLIST_EXPIRATION_SECONDS: PositiveInt = 3600\n API_V1_STR: str = \"/api/v1\"\n ALGORITHM: str = \"HS256\"\n AUTH_URL: str = \"api/v1/auth/\"\n TOKEN_URL: str = \"api/v1/auth/login\"\n OAUTH2_SCHEME: str = \"JWT\"\n OAUTH2_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate most of\" \" the API endpoints.\"\n )\n OAUTH2_REFRESH_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate\" \" most ofhe API endpoints.\"\n )\n TOKEN_USER_INFO_REGEX: str = (\n r\"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-\"\n r\"[0-9a-f]{4}-[0-9a-f]{12}:\\d{1,3}\\.\"\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\"\n )\n SUB_REGEX: str = (\n r\"^username:[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-\"\n r\"[89ab][0-9a-f]{3}-[0-9a-f]{12}$\"\n )\n HEADERS: dict[str, str] = {\"WWW-Authenticate\": \"Bearer\"}\n DETAIL: str = \"Could not validate credentials\"\n NO_CLIENT_FOUND: str = \"No client found on the request\"\n SECRET_KEY: str\n SERVER_URL: AnyHttpUrl\n SERVER_DESCRIPTION: str\n CACHE_SECONDS: PositiveInt = 3600\n ACCESS_TOKEN_EXPIRE_MINUTES: float\n REFRESH_TOKEN_EXPIRE_MINUTES: PositiveInt\n EMAIL_RESET_TOKEN_EXPIRE_HOURS: PositiveInt\n AUDIENCE: Optional[AnyHttpUrl] = None\n STRICT_TRANSPORT_SECURITY_MAX_AGE: PositiveInt\n\n @field_validator(\"AUDIENCE\", mode=\"before\")\n def assemble_audience(\n cls, v: Optional[str], info: ValidationInfo\n ) -> AnyHttpUrl:\n \"\"\"\n Combine server host and API_V1_STR to create the audience\n string.\n :param v: The value of audience attribute\n :type v: Optional[str]\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The AUDIENCE attribute\n :rtype: AnyHttpUrl\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return AnyHttpUrl(\n f'{str(info.data.get(\"SERVER_URL\"))[:-1]}:8000/'\n f'{info.data.get(\"TOKEN_URL\")}'\n )\n\n REDIS_SCHEME: str\n REDIS_HOST: str\n REDIS_USERNAME: str\n REDIS_PASSWORD: str\n REDIS_PORT: PositiveInt\n REDIS_DATABASE_URI: Optional[RedisDsn] = None\n\n @field_validator(\"REDIS_DATABASE_URI\", mode=\"before\")\n def assemble_redis_connection(\n cls, v: Optional[str], info: ValidationInfo\n ) -> RedisDsn:\n \"\"\"\n Assemble the cache database connection as URI string\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: Redis URI\n :rtype: RedisDsn\n \"\"\"\n # pylint: disable=no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return RedisDsn(\n str(\n Url.build(\n scheme=info.data.get(\"REDIS_SCHEME\", \"\"),\n username=info.data.get(\"REDIS_USERNAME\"),\n password=info.data.get(\"REDIS_PASSWORD\"),\n host=info.data.get(\"REDIS_HOST\", \"\"),\n port=info.data.get(\"REDIS_PORT\"),\n )\n )\n )" }, { "identifier": "create_access_token", "path": "app/core/security/jwt.py", "snippet": "def create_access_token(\n payload: TokenPayload,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n scope: Scope = Scope.ACCESS_TOKEN,\n expires_delta: Optional[timedelta] = None,\n) -> str:\n \"\"\"\n Create a new JWT access token\n :param payload: The payload or claims for the token\n :type payload: TokenPayload\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param scope: The token's scope.\n :type scope: Scope\n :param expires_delta: The timedelta specifying when the token should expire\n :type expires_delta: timedelta\n :return: The encoded JWT\n :rtype: str\n \"\"\"\n claims: dict[str, Any]\n if expires_delta:\n expire_time: datetime = _generate_expiration_time(\n expires_delta, auth_settings.ACCESS_TOKEN_EXPIRE_MINUTES\n )\n updated_payload: TokenPayload = payload.model_copy(\n update={\"exp\": int(expire_time.timestamp()), \"scope\": scope}\n )\n claims = jsonable_encoder(updated_payload)\n else:\n claims = jsonable_encoder(payload)\n encoded_jwt: str = jwt.encode(\n claims=claims,\n key=auth_settings.SECRET_KEY,\n algorithm=auth_settings.ALGORITHM,\n )\n logger.info(\"JWT created with JTI: %s\", payload.jti)\n return encoded_jwt" }, { "identifier": "create_refresh_token", "path": "app/core/security/jwt.py", "snippet": "def create_refresh_token(\n payload: TokenPayload,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> str:\n \"\"\"\n Create a refresh token for authentication\n :param payload: The data to be used as payload in the token\n :type payload: TokenPayload\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The access token with refresh expiration time\n :rtype: str\n \"\"\"\n expires: timedelta = timedelta(\n minutes=auth_settings.REFRESH_TOKEN_EXPIRE_MINUTES\n )\n token: str = create_access_token(\n payload=payload,\n auth_settings=auth_settings,\n scope=Scope.REFRESH_TOKEN,\n expires_delta=expires,\n )\n return token" }, { "identifier": "User", "path": "app/models/sql/user.py", "snippet": "class User(Base): # type: ignore\n \"\"\"\n User model class representing the \"users\" table\n \"\"\"\n\n __tablename__ = \"users\"\n\n id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n index=True,\n nullable=False,\n primary_key=True,\n unique=True,\n server_default=text(\"(gen_random_uuid())\"),\n comment=\"ID of the User\",\n )\n username: Mapped[str] = mapped_column(\n String(15),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Username to identify the user\",\n )\n email: Mapped[EmailStr] = mapped_column(\n String(320),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Preferred e-mail address of the User\",\n )\n first_name: Mapped[str] = mapped_column(\n String(50), nullable=False, comment=\"First name(s) of the User\"\n )\n middle_name: Mapped[str] = mapped_column(\n String(50), nullable=True, comment=\"Middle name(s) of the User\"\n )\n last_name: Mapped[str] = mapped_column(\n String(100), nullable=False, comment=\"Last name(s) of the User\"\n )\n password: Mapped[str] = mapped_column(\n String(60), nullable=False, comment=\"Hashed password of the User\"\n )\n gender: Mapped[Gender] = mapped_column(\n Enum(Gender), nullable=True, comment=\"Gender of the User\"\n )\n birthdate: Mapped[PastDate] = mapped_column(\n Date, nullable=True, comment=\"Birthday of the User\"\n )\n phone_number: Mapped[PhoneNumber] = mapped_column(\n String(20),\n nullable=True,\n comment=\"Preferred telephone number of the User\",\n )\n is_active: Mapped[bool] = mapped_column(\n Boolean(),\n default=True,\n nullable=False,\n server_default=text(\"true\"),\n comment=\"True if the user is active; otherwise false\",\n )\n is_superuser: Mapped[bool] = mapped_column(\n Boolean(),\n default=False,\n nullable=False,\n server_default=text(\"false\"),\n comment=\"True if the user is super user; otherwise false\",\n )\n created_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n default=datetime.now(),\n nullable=False,\n server_default=text(\"now()\"),\n comment=\"Time the User was created\",\n )\n updated_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n nullable=True,\n onupdate=text(\"now()\"),\n comment=\"Time the User was updated\",\n )\n address_id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n ForeignKey(\n \"users_address.id\",\n name=\"users_address_id_fkey\",\n ),\n nullable=False,\n comment=\"ID of the User's address\",\n )\n address: Mapped[\"Address\"] = relationship( # type: ignore\n \"Address\", back_populates=\"users\", lazy=\"joined\"\n )\n\n __table_args__ = (\n CheckConstraint(\n \"char_length(username) >= 4\", name=\"users_username_length\"\n ),\n CheckConstraint(\"char_length(email) >= 3\", name=\"users_email_length\"),\n CheckConstraint(\n sql_database_setting.DB_EMAIL_CONSTRAINT, name=\"users_email_format\"\n ),\n CheckConstraint(\n \"char_length(first_name) >= 1\", name=\"users_first_name_length\"\n ),\n CheckConstraint(\n \"char_length(last_name) >= 1\", name=\"users_last_name_length\"\n ),\n CheckConstraint(\"LENGTH(password) = 60\", name=\"users_password_length\"),\n CheckConstraint(\n sql_database_setting.DB_PHONE_NUMBER_CONSTRAINT,\n name=\"users_phone_number_format\",\n ),\n )" }, { "identifier": "Token", "path": "app/models/unstructured/token.py", "snippet": "class Token(BaseModel):\n \"\"\"\n Token class based on Pydantic Base Model.\n \"\"\"\n\n key: str = Field(\n ..., title=\"Token\", description=\"Refresh token retrieved from login\"\n )\n user_info: str = Field(\n ...,\n title=\"User Info\",\n description=\"User ID and IP address of user\",\n pattern=auth_setting.TOKEN_USER_INFO_REGEX,\n )\n\n model_config = ConfigDict(\n json_schema_extra={\n \"example\": {\n \"key\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"user_info\": \"c3ee0ef6-3a18-4251-af6d-138a8c8fec25:127.0.0.1\",\n }\n }\n )" }, { "identifier": "Token", "path": "app/schemas/external/token.py", "snippet": "class Token(BaseModel):\n \"\"\"\n Token that inherits from Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra=token_example,\n )\n\n access_token: str = Field(\n ..., title=\"Token\", description=\"Access token\", min_length=30\n )\n refresh_token: str = Field(\n ..., title=\"Refresh Token\", description=\"Refresh token\", min_length=30\n )" }, { "identifier": "TokenPayload", "path": "app/schemas/external/token.py", "snippet": "class TokenPayload(PublicClaimsToken, RegisteredClaimsToken):\n \"\"\"\n Token Payload class based on RegisteredClaimsToken and PublicClaimsToken.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra=token_payload_example,\n )" }, { "identifier": "TokenResponse", "path": "app/schemas/external/token.py", "snippet": "class TokenResponse(Token):\n \"\"\"\n Token for Response based on Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra=token_response_example,\n )\n\n token_type: str = Field(\n default=\"bearer\", title=\"Token type\", description=\"Type of the token\"\n )" }, { "identifier": "Scope", "path": "app/schemas/infrastructure/scope.py", "snippet": "class Scope(StrEnum):\n \"\"\"\n Enum representing different scopes\n \"\"\"\n\n ACCESS_TOKEN = auto()\n REFRESH_TOKEN = auto()" }, { "identifier": "TokenService", "path": "app/services/infrastructure/token.py", "snippet": "class TokenService:\n \"\"\"\n Service class for token operations in the authentication database\n \"\"\"\n\n def __init__(\n self,\n redis: Redis, # type: ignore\n auth_settings: AuthSettings,\n ):\n self._redis: Redis = redis # type: ignore\n self._refresh_token_expire_minutes: (\n PositiveInt\n ) = auth_settings.REFRESH_TOKEN_EXPIRE_MINUTES\n self._blacklist_expiration_seconds: PositiveInt = (\n PositiveInt(\n PositiveInt(auth_settings.ACCESS_TOKEN_EXPIRE_MINUTES) + 1\n )\n * 60\n ) # converting minutes to seconds\n\n @handle_redis_exceptions\n @benchmark\n async def create_token(self, token: Token) -> bool:\n \"\"\"\n Create a token in authentication database\n :param token: Token object with key and value\n :type token: Token\n :return: True if the token was inserted; otherwise false\n :rtype: bool\n \"\"\"\n try:\n inserted: bool = await self._redis.setex(\n token.key,\n self._refresh_token_expire_minutes,\n token.user_info,\n )\n except RedisError as r_exc:\n logger.error(\"Error at creating token. %s\", r_exc)\n raise r_exc\n return inserted\n\n @handle_redis_exceptions\n @benchmark\n async def get_token(self, key: str) -> Optional[str]:\n \"\"\"\n Read token from the authentication database\n :param key: The key to search for\n :type key: str\n :return: The refresh token\n :rtype: str\n \"\"\"\n try:\n value: str = str(await self._redis.get(key))\n except RedisError as r_exc:\n logger.error(\"Error at getting token. %s\", r_exc)\n raise r_exc\n return value\n\n @handle_redis_exceptions\n @benchmark\n async def blacklist_token(self, token_key: str) -> bool:\n \"\"\"\n Blacklist a given token.\n :param token_key: The token key to blacklist.\n :type token_key: str\n :return: True if the token was successfully blacklisted,\n otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: bool = await self._redis.setex(\n f\"blacklist:{token_key}\",\n self._blacklist_expiration_seconds,\n \"true\",\n )\n except RedisError as r_exc:\n logger.error(\"Error at blacklisting token. %s\", r_exc)\n raise r_exc\n return blacklisted\n\n @handle_redis_exceptions\n @benchmark\n async def is_token_blacklisted(self, token_key: str) -> bool:\n \"\"\"\n Check if a given token is blacklisted.\n :param token_key: The token key to verify.\n :type token_key: str\n :return: True if the token is blacklisted, otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: Optional[str] = await self._redis.get(\n f\"blacklist\" f\":{token_key}\"\n )\n except RedisError as r_exc:\n logger.error(\"Error at checking if token is blacklisted. %s\", r_exc)\n raise r_exc\n return bool(blacklisted)" }, { "identifier": "get_nationality_code", "path": "app/utils/utils.py", "snippet": "def get_nationality_code(country_name: str) -> str:\n \"\"\"\n Get the nationality code given a country name\n :param country_name: The name of the country\n :type country_name: str\n :return: The nationality in ICAO 3-letter code [ICAO-Doc9303]\n :rtype: str\n \"\"\"\n with contextlib.suppress(LookupError):\n if country := pycountry.countries.get(name=country_name):\n return str(country.alpha_3)\n return \"\"" } ]
import logging import time from typing import Annotated, Any, Optional from fastapi import Depends, HTTPException, status from redis.asyncio import Redis from app.config.config import get_auth_settings from app.config.db.auth_settings import AuthSettings from app.core.security.jwt import create_access_token, create_refresh_token from app.models.sql.user import User from app.models.unstructured.token import Token as TokenDB from app.schemas.external.token import Token, TokenPayload, TokenResponse from app.schemas.infrastructure.scope import Scope from app.services.infrastructure.token import TokenService from app.utils.utils import get_nationality_code
4,332
""" A module for auth in the app.services package. """ logger: logging.Logger = logging.getLogger(__name__) class AuthService: """ Service class for user authentication. """ @staticmethod def _build_payload( user: User,
""" A module for auth in the app.services package. """ logger: logging.Logger = logging.getLogger(__name__) class AuthService: """ Service class for user authentication. """ @staticmethod def _build_payload( user: User,
auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],
0
2023-11-17 00:32:32+00:00
8k
vitant-lang/CBAM-ASPP
get_miou.py
[ { "identifier": "DeeplabV3", "path": "deeplab.py", "snippet": "class DeeplabV3(object):\n _defaults = {\n #-------------------------------------------------------------------#\n # model_path指向logs文件夹下的权值文件\n # 训练好后logs文件夹下存在多个权值文件,选择验证集损失较低的即可。\n # 验证集损失较低不代表miou较高,仅代表该权值在验证集上泛化性能较好。\n #-------------------------------------------------------------------#\n \"model_path\" : 'model_data/cbam.pth',\n #----------------------------------------#\n # 所需要区分的类的个数+1\n #----------------------------------------#\n \"num_classes\" : 3,\n #----------------------------------------#\n # 所使用的的主干网络:\n # mobilenet\n # xception\n #----------------------------------------#\n \"backbone\" : \"mobilenet\",\n #----------------------------------------#\n # 输入图片的大小\n #----------------------------------------#\n \"input_shape\" : [512, 512],\n #----------------------------------------#\n # 下采样的倍数,一般可选的为8和16\n # 与训练时设置的一样即可\n #----------------------------------------#\n \"downsample_factor\" : 16,\n #-------------------------------------------------#\n # mix_type参数用于控制检测结果的可视化方式\n #\n # mix_type = 0的时候代表原图与生成的图进行混合\n # mix_type = 1的时候代表仅保留生成的图\n # mix_type = 2的时候代表仅扣去背景,仅保留原图中的目标\n #-------------------------------------------------#\n \"mix_type\" : 0,\n #-------------------------------#\n # 是否使用Cuda\n # 没有GPU可以设置成False\n #-------------------------------#\n \"cuda\" : True,\n }\n\n #---------------------------------------------------#\n # 初始化Deeplab\n #---------------------------------------------------#\n def __init__(self, **kwargs):\n self.__dict__.update(self._defaults)\n for name, value in kwargs.items():\n setattr(self, name, value)\n #---------------------------------------------------#\n # 画框设置不同的颜色\n #---------------------------------------------------#\n if self.num_classes <= 21:\n self.colors = [ (0, 0, 0), (128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128), (0, 128, 128), \n (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0), (192, 128, 0), (64, 0, 128), (192, 0, 128), \n (64, 128, 128), (192, 128, 128), (0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128), \n (128, 64, 12)]\n else:\n hsv_tuples = [(x / self.num_classes, 1., 1.) for x in range(self.num_classes)]\n self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))\n self.colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), self.colors))\n #---------------------------------------------------#\n # 获得模型\n #---------------------------------------------------#\n self.generate()\n \n show_config(**self._defaults)\n \n #---------------------------------------------------#\n # 获得所有的分类\n #---------------------------------------------------#\n def generate(self, onnx=False):\n #-------------------------------#\n # 载入模型与权值\n #-------------------------------#\n self.net = DeepLab(num_classes=self.num_classes, backbone=self.backbone, downsample_factor=self.downsample_factor, pretrained=False)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n self.net.load_state_dict(torch.load(self.model_path, map_location=device))\n self.net = self.net.eval()\n print('{} model, and classes loaded.'.format(self.model_path))\n if not onnx:\n if self.cuda:\n self.net = nn.DataParallel(self.net)\n self.net = self.net.cuda()\n\n #---------------------------------------------------#\n # 检测图片\n #---------------------------------------------------#\n def detect_image(self, image, count=False, name_classes=None):\n #---------------------------------------------------------#\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\n # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\n #---------------------------------------------------------#\n image = cvtColor(image)\n #---------------------------------------------------#\n # 对输入图像进行一个备份,后面用于绘图\n #---------------------------------------------------#\n old_img = copy.deepcopy(image)\n orininal_h = np.array(image).shape[0]\n orininal_w = np.array(image).shape[1]\n #---------------------------------------------------------#\n # 给图像增加灰条,实现不失真的resize\n # 也可以直接resize进行识别\n #---------------------------------------------------------#\n image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))\n #---------------------------------------------------------#\n # 添加上batch_size维度\n #---------------------------------------------------------#\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)\n\n with torch.no_grad():\n images = torch.from_numpy(image_data)\n if self.cuda:\n images = images.cuda()\n \n #---------------------------------------------------#\n # 图片传入网络进行预测\n #---------------------------------------------------#\n pr = self.net(images)[0]\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy()\n #--------------------------------------#\n # 将灰条部分截取掉\n #--------------------------------------#\n pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \\\n int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]\n #---------------------------------------------------#\n # 进行图片的resize\n #---------------------------------------------------#\n pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR)\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = pr.argmax(axis=-1)\n \n #---------------------------------------------------------#\n # 计数\n #---------------------------------------------------------#\n if count:\n classes_nums = np.zeros([self.num_classes])\n total_points_num = orininal_h * orininal_w\n print('-' * 63)\n print(\"|%25s | %15s | %15s|\"%(\"Key\", \"Value\", \"Ratio\"))\n print('-' * 63)\n for i in range(self.num_classes):\n num = np.sum(pr == i)\n ratio = num / total_points_num * 100\n if num > 0:\n print(\"|%25s | %15s | %14.2f%%|\"%(str(name_classes[i]), str(num), ratio))\n print('-' * 63)\n classes_nums[i] = num\n print(\"classes_nums:\", classes_nums)\n \n if self.mix_type == 0:\n # seg_img = np.zeros((np.shape(pr)[0], np.shape(pr)[1], 3))\n # for c in range(self.num_classes):\n # seg_img[:, :, 0] += ((pr[:, :] == c ) * self.colors[c][0]).astype('uint8')\n # seg_img[:, :, 1] += ((pr[:, :] == c ) * self.colors[c][1]).astype('uint8')\n # seg_img[:, :, 2] += ((pr[:, :] == c ) * self.colors[c][2]).astype('uint8')\n seg_img = np.reshape(np.array(self.colors, np.uint8)[np.reshape(pr, [-1])], [orininal_h, orininal_w, -1])\n #------------------------------------------------#\n\n # 将新图片转换成Image的形式\n #------------------------------------------------#\n image = Image.fromarray(np.uint8(seg_img))\n #------------------------------------------------#\n heatmap = ImageOps.colorize(image.convert('L'), black='blue', white='red')\n heatmap = heatmap.resize(old_img.size)\n\n # 将热力图与原图进行混合\n image = Image.blend(old_img, heatmap, 0.9)\n # 将热力图的透明度加大,以增强颜色鲜明度\n image.putalpha(128)\n # 将新图与原图及进行混合\n #------------------------------------------------#\n\n # image = Image.blend(old_img, image, 0.3)\n\n elif self.mix_type == 1:\n # seg_img = np.zeros((np.shape(pr)[0], np.shape(pr)[1], 3))\n # for c in range(self.num_classes):\n # seg_img[:, :, 0] += ((pr[:, :] == c ) * self.colors[c][0]).astype('uint8')\n # seg_img[:, :, 1] += ((pr[:, :] == c ) * self.colors[c][1]).astype('uint8')\n # seg_img[:, :, 2] += ((pr[:, :] == c ) * self.colors[c][2]).astype('uint8')\n seg_img = np.reshape(np.array(self.colors, np.uint8)[np.reshape(pr, [-1])], [orininal_h, orininal_w, -1])\n #------------------------------------------------#\n # 将新图片转换成Image的形式\n #------------------------------------------------#\n image = Image.fromarray(np.uint8(seg_img))\n\n elif self.mix_type == 2:\n seg_img = (np.expand_dims(pr != 0, -1) * np.array(old_img, np.float32)).astype('uint8')\n #------------------------------------------------#\n # 将新图片转换成Image的形式\n #------------------------------------------------#\n image = Image.fromarray(np.uint8(seg_img))\n \n return image\n\n def get_FPS(self, image, test_interval):\n #---------------------------------------------------------#\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\n # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\n #---------------------------------------------------------#\n image = cvtColor(image)\n #---------------------------------------------------------#\n # 给图像增加灰条,实现不失真的resize\n # 也可以直接resize进行识别\n #---------------------------------------------------------#\n image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))\n #---------------------------------------------------------#\n # 添加上batch_size维度\n #---------------------------------------------------------#\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)\n\n with torch.no_grad():\n images = torch.from_numpy(image_data)\n if self.cuda:\n images = images.cuda()\n \n #---------------------------------------------------#\n # 图片传入网络进行预测\n #---------------------------------------------------#\n pr = self.net(images)[0]\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy().argmax(axis=-1)\n #--------------------------------------#\n # 将灰条部分截取掉\n #--------------------------------------#\n pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \\\n int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]\n\n t1 = time.time()\n for _ in range(test_interval):\n with torch.no_grad():\n #---------------------------------------------------#\n # 图片传入网络进行预测\n #---------------------------------------------------#\n pr = self.net(images)[0]\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy().argmax(axis=-1)\n #--------------------------------------#\n # 将灰条部分截取掉\n #--------------------------------------#\n pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \\\n int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]\n t2 = time.time()\n tact_time = (t2 - t1) / test_interval\n return tact_time\n\n def convert_to_onnx(self, simplify, model_path):\n import onnx\n self.generate(onnx=True)\n\n im = torch.zeros(1, 3, *self.input_shape).to('cpu') # image size(1, 3, 512, 512) BCHW\n input_layer_names = [\"images\"]\n output_layer_names = [\"output\"]\n \n # Export the model\n print(f'Starting export with onnx {onnx.__version__}.')\n torch.onnx.export(self.net,\n im,\n f = model_path,\n verbose = False,\n opset_version = 12,\n training = torch.onnx.TrainingMode.EVAL,\n do_constant_folding = True,\n input_names = input_layer_names,\n output_names = output_layer_names,\n dynamic_axes = None)\n\n # Checks\n model_onnx = onnx.load(model_path) # load onnx model\n onnx.checker.check_model(model_onnx) # check onnx model\n\n # Simplify onnx\n if simplify:\n import onnxsim\n print(f'Simplifying with onnx-simplifier {onnxsim.__version__}.')\n model_onnx, check = onnxsim.simplify(\n model_onnx,\n dynamic_input_shape=False,\n input_shapes=None)\n assert check, 'assert check failed'\n onnx.save(model_onnx, model_path)\n\n print('Onnx model save as {}'.format(model_path))\n \n def get_miou_png(self, image):\n #---------------------------------------------------------#\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\n # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\n #---------------------------------------------------------#\n image = cvtColor(image)\n orininal_h = np.array(image).shape[0]\n orininal_w = np.array(image).shape[1]\n #---------------------------------------------------------#\n # 给图像增加灰条,实现不失真的resize\n # 也可以直接resize进行识别\n #---------------------------------------------------------#\n image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))\n #---------------------------------------------------------#\n # 添加上batch_size维度\n #---------------------------------------------------------#\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)\n\n with torch.no_grad():\n images = torch.from_numpy(image_data)\n if self.cuda:\n images = images.cuda()\n \n #---------------------------------------------------#\n # 图片传入网络进行预测\n #---------------------------------------------------#\n pr = self.net(images)[0]\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy()\n #--------------------------------------#\n # 将灰条部分截取掉\n #--------------------------------------#\n pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \\\n int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]\n #---------------------------------------------------#\n # 进行图片的resize\n #---------------------------------------------------#\n pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR)\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = pr.argmax(axis=-1)\n \n image = Image.fromarray(np.uint8(pr))\n return image" }, { "identifier": "compute_mIoU", "path": "utils/utils_metrics.py", "snippet": "def compute_mIoU(gt_dir, pred_dir, png_name_list, num_classes, name_classes=None): \n print('Num classes', num_classes) \n #-----------------------------------------#\n # 创建一个全是0的矩阵,是一个混淆矩阵\n #-----------------------------------------#\n hist = np.zeros((num_classes, num_classes))\n \n #------------------------------------------------#\n # 获得验证集标签路径列表,方便直接读取\n # 获得验证集图像分割结果路径列表,方便直接读取\n #------------------------------------------------#\n gt_imgs = [join(gt_dir, x + \".png\") for x in png_name_list] \n pred_imgs = [join(pred_dir, x + \".png\") for x in png_name_list] \n\n #------------------------------------------------#\n # 读取每一个(图片-标签)对\n #------------------------------------------------#\n for ind in range(len(gt_imgs)): \n #------------------------------------------------#\n # 读取一张图像分割结果,转化成numpy数组\n #------------------------------------------------#\n pred = np.array(Image.open(pred_imgs[ind])) \n #------------------------------------------------#\n # 读取一张对应的标签,转化成numpy数组\n #------------------------------------------------#\n label = np.array(Image.open(gt_imgs[ind])) \n\n # 如果图像分割结果与标签的大小不一样,这张图片就不计算\n if len(label.flatten()) != len(pred.flatten()): \n print(\n 'Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(\n len(label.flatten()), len(pred.flatten()), gt_imgs[ind],\n pred_imgs[ind]))\n continue\n\n #------------------------------------------------#\n # 对一张图片计算21×21的hist矩阵,并累加\n #------------------------------------------------#\n hist += fast_hist(label.flatten(), pred.flatten(), num_classes) \n # 每计算10张就输出一下目前已计算的图片中所有类别平均的mIoU值\n if name_classes is not None and ind > 0 and ind % 10 == 0: \n print('{:d} / {:d}: mIou-{:0.2f}%; mPA-{:0.2f}%; Accuracy-{:0.2f}%'.format(\n ind, \n len(gt_imgs),\n 100 * np.nanmean(per_class_iu(hist)),\n 100 * np.nanmean(per_class_PA_Recall(hist)),\n 100 * per_Accuracy(hist)\n )\n )\n #------------------------------------------------#\n # 计算所有验证集图片的逐类别mIoU值\n #------------------------------------------------#\n IoUs = per_class_iu(hist)\n PA_Recall = per_class_PA_Recall(hist)\n Precision = per_class_Precision(hist)\n #------------------------------------------------#\n # 逐类别输出一下mIoU值\n #------------------------------------------------#\n if name_classes is not None:\n for ind_class in range(num_classes):\n print('===>' + name_classes[ind_class] + ':\\tIou-' + str(round(IoUs[ind_class] * 100, 2)) \\\n + '; Recall (equal to the PA)-' + str(round(PA_Recall[ind_class] * 100, 2))+ '; Precision-' + str(round(Precision[ind_class] * 100, 2)))\n\n #-----------------------------------------------------------------#\n # 在所有验证集图像上求所有类别平均的mIoU值,计算时忽略NaN值\n #-----------------------------------------------------------------#\n print('===> mIoU: ' + str(round(np.nanmean(IoUs) * 100, 2)) + '; mPA: ' + str(round(np.nanmean(PA_Recall) * 100, 2)) + '; Accuracy: ' + str(round(per_Accuracy(hist) * 100, 2))) \n return np.array(hist, np.int), IoUs, PA_Recall, Precision" }, { "identifier": "show_results", "path": "utils/utils_metrics.py", "snippet": "def show_results(miou_out_path, hist, IoUs, PA_Recall, Precision, name_classes, tick_font_size = 12):\n draw_plot_func(IoUs, name_classes, \"mIoU = {0:.2f}%\".format(np.nanmean(IoUs)*100), \"Intersection over Union\", \\\n os.path.join(miou_out_path, \"mIoU.png\"), tick_font_size = tick_font_size, plt_show = True)\n print(\"Save mIoU out to \" + os.path.join(miou_out_path, \"mIoU.png\"))\n\n draw_plot_func(PA_Recall, name_classes, \"mPA = {0:.2f}%\".format(np.nanmean(PA_Recall)*100), \"Pixel Accuracy\", \\\n os.path.join(miou_out_path, \"mPA.png\"), tick_font_size = tick_font_size, plt_show = False)\n print(\"Save mPA out to \" + os.path.join(miou_out_path, \"mPA.png\"))\n \n draw_plot_func(PA_Recall, name_classes, \"mRecall = {0:.2f}%\".format(np.nanmean(PA_Recall)*100), \"Recall\", \\\n os.path.join(miou_out_path, \"Recall.png\"), tick_font_size = tick_font_size, plt_show = False)\n print(\"Save Recall out to \" + os.path.join(miou_out_path, \"Recall.png\"))\n\n draw_plot_func(Precision, name_classes, \"mPrecision = {0:.2f}%\".format(np.nanmean(Precision)*100), \"Precision\", \\\n os.path.join(miou_out_path, \"Precision.png\"), tick_font_size = tick_font_size, plt_show = False)\n print(\"Save Precision out to \" + os.path.join(miou_out_path, \"Precision.png\"))\n\n with open(os.path.join(miou_out_path, \"confusion_matrix.csv\"), 'w', newline='') as f:\n writer = csv.writer(f)\n writer_list = []\n writer_list.append([' '] + [str(c) for c in name_classes])\n for i in range(len(hist)):\n writer_list.append([name_classes[i]] + [str(x) for x in hist[i]])\n writer.writerows(writer_list)\n print(\"Save confusion_matrix out to \" + os.path.join(miou_out_path, \"confusion_matrix.csv\"))" } ]
import os from PIL import Image from tqdm import tqdm from deeplab import DeeplabV3 from utils.utils_metrics import compute_mIoU, show_results
6,713
''' 进行指标评估需要注意以下几点: 1、该文件生成的图为灰度图,因为值比较小,按照PNG形式的图看是没有显示效果的,所以看到近似全黑的图是正常的。 2、该文件计算的是验证集的miou,当前该库将测试集当作验证集使用,不单独划分测试集 ''' if __name__ == "__main__": #---------------------------------------------------------------------------# # miou_mode用于指定该文件运行时计算的内容 # miou_mode为0代表整个miou计算流程,包括获得预测结果、计算miou。 # miou_mode为1代表仅仅获得预测结果。 # miou_mode为2代表仅仅计算miou。 #---------------------------------------------------------------------------# miou_mode = 0 #------------------------------# # 分类个数+1、如2+1 #------------------------------# num_classes = 3 #--------------------------------------------# # 区分的种类,和json_to_dataset里面的一样 #--------------------------------------------# #name_classes = ["background","aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] name_classes = ["_background_","cat","coal"] #-------------------------------------------------------# # 指向VOC数据集所在的文件夹 # 默认指向根目录下的VOC数据集 #-------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' image_ids = open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),'r').read().splitlines() gt_dir = os.path.join(VOCdevkit_path, "VOC2007/SegmentationClass/") miou_out_path = "miou_out" pred_dir = os.path.join(miou_out_path, 'detection-results') if miou_mode == 0 or miou_mode == 1: if not os.path.exists(pred_dir): os.makedirs(pred_dir) print("Load model.") deeplab = DeeplabV3() print("Load model done.") print("Get predict result.") for image_id in tqdm(image_ids): image_path = os.path.join(VOCdevkit_path, "VOC2007/JPEGImages/"+image_id+".jpg") image = Image.open(image_path) image = deeplab.get_miou_png(image) image.save(os.path.join(pred_dir, image_id + ".png")) print("Get predict result done.") if miou_mode == 0 or miou_mode == 2: print("Get miou.") hist, IoUs, PA_Recall, Precision = compute_mIoU(gt_dir, pred_dir, image_ids, num_classes, name_classes) # 执行计算mIoU的函数 print("Get miou done.")
''' 进行指标评估需要注意以下几点: 1、该文件生成的图为灰度图,因为值比较小,按照PNG形式的图看是没有显示效果的,所以看到近似全黑的图是正常的。 2、该文件计算的是验证集的miou,当前该库将测试集当作验证集使用,不单独划分测试集 ''' if __name__ == "__main__": #---------------------------------------------------------------------------# # miou_mode用于指定该文件运行时计算的内容 # miou_mode为0代表整个miou计算流程,包括获得预测结果、计算miou。 # miou_mode为1代表仅仅获得预测结果。 # miou_mode为2代表仅仅计算miou。 #---------------------------------------------------------------------------# miou_mode = 0 #------------------------------# # 分类个数+1、如2+1 #------------------------------# num_classes = 3 #--------------------------------------------# # 区分的种类,和json_to_dataset里面的一样 #--------------------------------------------# #name_classes = ["background","aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] name_classes = ["_background_","cat","coal"] #-------------------------------------------------------# # 指向VOC数据集所在的文件夹 # 默认指向根目录下的VOC数据集 #-------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' image_ids = open(os.path.join(VOCdevkit_path, "VOC2007/ImageSets/Segmentation/val.txt"),'r').read().splitlines() gt_dir = os.path.join(VOCdevkit_path, "VOC2007/SegmentationClass/") miou_out_path = "miou_out" pred_dir = os.path.join(miou_out_path, 'detection-results') if miou_mode == 0 or miou_mode == 1: if not os.path.exists(pred_dir): os.makedirs(pred_dir) print("Load model.") deeplab = DeeplabV3() print("Load model done.") print("Get predict result.") for image_id in tqdm(image_ids): image_path = os.path.join(VOCdevkit_path, "VOC2007/JPEGImages/"+image_id+".jpg") image = Image.open(image_path) image = deeplab.get_miou_png(image) image.save(os.path.join(pred_dir, image_id + ".png")) print("Get predict result done.") if miou_mode == 0 or miou_mode == 2: print("Get miou.") hist, IoUs, PA_Recall, Precision = compute_mIoU(gt_dir, pred_dir, image_ids, num_classes, name_classes) # 执行计算mIoU的函数 print("Get miou done.")
show_results(miou_out_path, hist, IoUs, PA_Recall, Precision, name_classes)
2
2023-11-17 13:25:28+00:00
8k
dataaug/open-interpreter-free
interpreter/core/core.py
[ { "identifier": "cli", "path": "interpreter/cli/cli.py", "snippet": "def cli(interpreter):\n parser = argparse.ArgumentParser(description=\"Open Interpreter\")\n\n # Add arguments\n for arg in arguments:\n if arg[\"type\"] == bool:\n parser.add_argument(\n f'-{arg[\"nickname\"]}',\n f'--{arg[\"name\"]}',\n dest=arg[\"name\"],\n help=arg[\"help_text\"],\n action=\"store_true\",\n default=None,\n )\n else:\n choices = arg[\"choices\"] if \"choices\" in arg else None\n default = arg[\"default\"] if \"default\" in arg else None\n\n parser.add_argument(\n f'-{arg[\"nickname\"]}',\n f'--{arg[\"name\"]}',\n dest=arg[\"name\"],\n help=arg[\"help_text\"],\n type=arg[\"type\"],\n choices=choices,\n default=default,\n )\n\n # Add special arguments\n parser.add_argument(\n \"--config\",\n dest=\"config\",\n action=\"store_true\",\n help=\"open config.yaml file in text editor\",\n )\n parser.add_argument(\n \"--conversations\",\n dest=\"conversations\",\n action=\"store_true\",\n help=\"list conversations to resume\",\n )\n parser.add_argument(\n \"-f\",\n \"--fast\",\n dest=\"fast\",\n action=\"store_true\",\n help=\"run `interpreter --model gpt-3.5-turbo`\",\n )\n parser.add_argument(\n \"--version\",\n dest=\"version\",\n action=\"store_true\",\n help=\"get Open Interpreter's version number\",\n )\n\n args = parser.parse_args()\n\n # This should be pushed into an open_config.py util\n # If --config is used, open the config.yaml file in the Open Interpreter folder of the user's config dir\n if args.config:\n if args.config_file:\n config_file = get_config_path(args.config_file)\n else:\n config_file = get_config_path()\n\n print(f\"Opening `{config_file}`...\")\n\n # Use the default system editor to open the file\n if platform.system() == \"Windows\":\n os.startfile(\n config_file\n ) # This will open the file with the default application, e.g., Notepad\n else:\n try:\n # Try using xdg-open on non-Windows platforms\n subprocess.call([\"xdg-open\", config_file])\n except FileNotFoundError:\n # Fallback to using 'open' on macOS if 'xdg-open' is not available\n subprocess.call([\"open\", config_file])\n return\n\n if args.local:\n # Default local (LM studio) attributes\n interpreter.system_message = \"You are an AI.\"\n interpreter.model = (\n \"openai/\" + interpreter.model\n ) # This tells LiteLLM it's an OpenAI compatible server\n interpreter.api_base = \"http://localhost:1234/v1\"\n interpreter.max_tokens = 1000\n interpreter.context_window = 3000\n interpreter.api_key = \"0\"\n\n display_markdown_message(\n \"\"\"\n> Open Interpreter's local mode is powered by **`LM Studio`**.\n\n\nYou will need to run **LM Studio** in the background.\n\n1. Download **LM Studio** from [https://lmstudio.ai/](https://lmstudio.ai/) then start it.\n2. Select a language model then click **Download**.\n3. Click the **<->** button on the left (below the chat button).\n4. Select your model at the top, then click **Start Server**.\n\n\nOnce the server is running, you can begin your conversation below.\n\n> **Warning:** This feature is highly experimental.\n> Don't expect `gpt-3.5` / `gpt-4` level quality, speed, or reliability yet!\n\n\"\"\"\n )\n\n # Set attributes on interpreter\n for attr_name, attr_value in vars(args).items():\n # Ignore things that aren't possible attributes on interpreter\n if attr_value is not None and hasattr(interpreter, attr_name):\n # If the user has provided a config file, load it and extend interpreter's configuration\n if attr_name == \"config_file\":\n user_config = get_config_path(attr_value)\n interpreter.config_file = user_config\n interpreter.extend_config(config_path=user_config)\n else:\n setattr(interpreter, attr_name, attr_value)\n\n # if safe_mode and auto_run are enabled, safe_mode disables auto_run\n if interpreter.auto_run and (\n interpreter.safe_mode == \"ask\" or interpreter.safe_mode == \"auto\"\n ):\n setattr(interpreter, \"auto_run\", False)\n\n # If --conversations is used, run conversation_navigator\n if args.conversations:\n conversation_navigator(interpreter)\n return\n\n if args.version:\n version = pkg_resources.get_distribution(\"open-interpreter\").version\n print(f\"Open Interpreter {version}\")\n return\n\n if args.fast:\n interpreter.model = \"gpt-3.5-turbo\"\n\n if args.vision:\n interpreter.vision = True\n interpreter.model = \"gpt-4-vision-preview\"\n interpreter.system_message += \"\\nThe user will show you an image of the code you write. You can view images directly. Be sure to actually write a markdown code block for almost every user request! Almost EVERY message should include a markdown code block. Do not end your message prematurely!\\n\\nFor HTML: This will be run STATELESSLY. You may NEVER write '<!-- previous code here... --!>' or `<!-- header will go here -->` or anything like that. It is CRITICAL TO NEVER WRITE PLACEHOLDERS. Placeholders will BREAK it. You must write the FULL HTML CODE EVERY TIME. Therefore you cannot write HTML piecemeal—write all the HTML, CSS, and possibly Javascript **in one step, in one code block**. The user will help you review it visually.\\nIf the user submits a filepath, you will also see the image. The filepath and user image will both be in the user's message.\"\n interpreter.function_calling_llm = False\n interpreter.context_window = 110000\n interpreter.max_tokens = 4096\n\n display_markdown_message(\"> `Vision` enabled **(experimental)**\\n\")\n\n interpreter.chat()" }, { "identifier": "setup_llm", "path": "interpreter/llm/setup_llm.py", "snippet": "def setup_llm(interpreter):\n \"\"\"\n Takes an Interpreter (which includes a ton of LLM settings),\n returns a Coding LLM (a generator that streams deltas with `message` and `code`).\n \"\"\"\n # gpt4fre\n gpt4free = True\n if gpt4free:\n text_llm = setup_gpt4free_llm(interpreter)\n coding_llm = convert_to_coding_gpt4free_llm(text_llm, debug_mode=interpreter.debug_mode)\n return coding_llm\n\n # Detect whether or not it's a function calling LLM\n if interpreter.function_calling_llm == None:\n if not interpreter.local and (\n interpreter.model in litellm.open_ai_chat_completion_models\n or interpreter.model.startswith(\"azure/\")\n ):\n interpreter.function_calling_llm = True\n else:\n interpreter.function_calling_llm = False\n\n if interpreter.function_calling_llm:\n # Function-calling LLM\n coding_llm = setup_openai_coding_llm(interpreter)\n else:\n # If disable_procedures has not been set manually:\n if interpreter.disable_procedures == None:\n # Disable procedures, which confuses most of these models (except GPT-4V)\n\n if interpreter.model != \"gpt-4-vision-preview\":\n interpreter.disable_procedures = True\n\n # Non-function-calling LLM\n text_llm = setup_text_llm(interpreter)\n coding_llm = convert_to_coding_llm(text_llm, debug_mode=interpreter.debug_mode)\n\n return coding_llm" }, { "identifier": "terminal_interface", "path": "interpreter/terminal_interface/terminal_interface.py", "snippet": "def terminal_interface(interpreter, message):\n # Auto run and local don't display messages.\n # Probably worth abstracting this to something like \"verbose_cli\" at some point.\n if not interpreter.auto_run and not interpreter.local:\n interpreter_intro_message = [\n \"**Open Interpreter** will require approval before running code.\"\n ]\n\n if interpreter.safe_mode == \"ask\" or interpreter.safe_mode == \"auto\":\n if not check_for_package(\"semgrep\"):\n interpreter_intro_message.append(\n f\"**Safe Mode**: {interpreter.safe_mode}\\n\\n>Note: **Safe Mode** requires `semgrep` (`pip install semgrep`)\"\n )\n else:\n interpreter_intro_message.append(\"Use `interpreter -y` to bypass this.\")\n\n interpreter_intro_message.append(\"Press `CTRL-C` to exit.\")\n\n display_markdown_message(\"\\n\\n\".join(interpreter_intro_message) + \"\\n\")\n\n active_block = None\n\n if message:\n interactive = False\n else:\n interactive = True\n\n while True:\n try:\n if interactive:\n message = input(\"> \").strip()\n\n try:\n # This lets users hit the up arrow key for past messages\n readline.add_history(message)\n except:\n # If the user doesn't have readline (may be the case on windows), that's fine\n pass\n\n except KeyboardInterrupt:\n # Exit gracefully\n break\n\n if message.startswith(\"%\") and interactive:\n handle_magic_command(interpreter, message)\n continue\n\n # Many users do this\n if message.strip() == \"interpreter --local\":\n print(\"Please press CTRL-C then run `interpreter --local`.\")\n continue\n\n if True: ################## interpreter.vision:\n # Is the input a path to an image? Like they just dragged it into the terminal?\n image_path = find_image_path(message)\n\n ## If we found an image, add it to the message\n if image_path:\n if interpreter.debug_mode:\n print(\"Found image:\", image_path)\n # Turn it into base64\n with open(image_path, \"rb\") as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode(\"utf-8\")\n file_extension = image_path.split(\".\")[-1]\n message = {\n \"role\": \"user\",\n \"message\": message,\n \"image\": f\"data:image/{file_extension};base64,{encoded_string}\",\n }\n\n # Track if we've ran a code block.\n # We'll use this to determine if we should render a new code block,\n # In the event we get code -> output -> code again\n ran_code_block = False\n render_cursor = True\n\n try:\n for chunk in interpreter.chat(message, display=False, stream=True):\n if interpreter.debug_mode:\n print(\"Chunk in `terminal_interface`:\", chunk)\n\n # Message\n if \"message\" in chunk:\n if active_block is None:\n active_block = MessageBlock()\n if active_block.type != \"message\":\n active_block.end()\n active_block = MessageBlock()\n active_block.message += chunk[\"message\"]\n render_cursor = True\n\n # Code\n if \"code\" in chunk or \"language\" in chunk:\n if active_block is None:\n active_block = CodeBlock()\n if active_block.type != \"code\" or ran_code_block:\n # If the last block wasn't a code block,\n # or it was, but we already ran it:\n active_block.end()\n active_block = CodeBlock()\n ran_code_block = False\n render_cursor = True\n\n if \"language\" in chunk:\n active_block.language = chunk[\"language\"]\n if \"code\" in chunk:\n active_block.code += chunk[\"code\"]\n if \"active_line\" in chunk:\n active_block.active_line = chunk[\"active_line\"]\n\n # Execution notice\n if \"executing\" in chunk:\n if not interpreter.auto_run:\n # OI is about to execute code. The user wants to approve this\n\n # End the active block so you can run input() below it\n active_block.end()\n\n should_scan_code = False\n\n if not interpreter.safe_mode == \"off\":\n if interpreter.safe_mode == \"auto\":\n should_scan_code = True\n elif interpreter.safe_mode == \"ask\":\n response = input(\n \" Would you like to scan this code? (y/n)\\n\\n \"\n )\n print(\"\") # <- Aesthetic choice\n\n if response.strip().lower() == \"y\":\n should_scan_code = True\n\n if should_scan_code:\n # Get code language and actual code from the chunk\n # We need to give these to semgrep when we start our scan\n language = chunk[\"executing\"][\"language\"]\n code = chunk[\"executing\"][\"code\"]\n\n scan_code(code, language, interpreter)\n\n response = input(\n \" Would you like to run this code? (y/n)\\n\\n \"\n )\n print(\"\") # <- Aesthetic choice\n\n if response.strip().lower() == \"y\":\n # Create a new, identical block where the code will actually be run\n # Conveniently, the chunk includes everything we need to do this:\n active_block = CodeBlock()\n active_block.margin_top = False # <- Aesthetic choice\n active_block.language = chunk[\"executing\"][\"language\"]\n active_block.code = chunk[\"executing\"][\"code\"]\n else:\n # User declined to run code.\n interpreter.messages.append(\n {\n \"role\": \"user\",\n \"message\": \"I have declined to run this code.\",\n }\n )\n break\n\n if \"image\" in chunk or \"html\" in chunk or \"javascript\" in chunk:\n # Good to keep the LLM informed <3\n message_for_llm = display_output(chunk)\n if message_for_llm:\n if \"output\" in interpreter.messages[-1]:\n interpreter.messages[-1][\"output\"] += \"\\n\" + message_for_llm\n else:\n interpreter.messages[-1][\"output\"] = message_for_llm\n\n # I know this is insane, but the easiest way to now display this\n # is to set the chunk to an output chunk, which will trigger the next conditional!\n\n chunk = {\"output\": message_for_llm}\n\n # Output\n if \"output\" in chunk:\n ran_code_block = True\n render_cursor = False\n active_block.output += \"\\n\" + chunk[\"output\"]\n active_block.output = (\n active_block.output.strip()\n ) # <- Aesthetic choice\n\n # Truncate output\n active_block.output = truncate_output(\n active_block.output, interpreter.max_output\n )\n\n if active_block:\n active_block.refresh(cursor=render_cursor)\n\n yield chunk\n\n # (Sometimes -- like if they CTRL-C quickly -- active_block is still None here)\n if active_block:\n active_block.end()\n active_block = None\n\n if not interactive:\n # Don't loop\n break\n\n except KeyboardInterrupt:\n # Exit gracefully\n if active_block:\n active_block.end()\n active_block = None\n\n if interactive:\n # (this cancels LLM, returns to the interactive \"> \" input)\n continue\n else:\n break\n except:\n system_info(interpreter)\n raise" }, { "identifier": "validate_llm_settings", "path": "interpreter/terminal_interface/validate_llm_settings.py", "snippet": "def validate_llm_settings(interpreter):\n \"\"\"\n Interactivley prompt the user for required LLM settings\n \"\"\"\n\n # This runs in a while loop so `continue` lets us start from the top\n # after changing settings (like switching to/from local)\n while True:\n if interpreter.local:\n # We have already displayed a message.\n # (This strange behavior makes me think validate_llm_settings needs to be rethought / refactored)\n break\n\n else:\n # Ensure API keys are set as environment variables\n\n # OpenAI\n if interpreter.model in litellm.open_ai_chat_completion_models:\n if not os.environ.get(\"OPENAI_API_KEY\") and not interpreter.api_key:\n display_welcome_message_once()\n\n display_markdown_message(\n \"\"\"---\n > OpenAI API key not found\n\n To use `GPT-4` (highly recommended) please provide an OpenAI API key.\n\n To use another language model, consult the documentation at [docs.openinterpreter.com](https://docs.openinterpreter.com/language-model-setup/).\n \n ---\n \"\"\"\n )\n\n response = getpass.getpass(\"OpenAI API key: \")\n print(f\"OpenAI API key: {response[:4]}...{response[-4:]}\")\n\n display_markdown_message(\n \"\"\"\n\n **Tip:** To save this key for later, run `export OPENAI_API_KEY=your_api_key` on Mac/Linux or `setx OPENAI_API_KEY your_api_key` on Windows.\n \n ---\"\"\"\n )\n\n interpreter.api_key = response\n time.sleep(2)\n break\n\n # This is a model we don't have checks for yet.\n break\n\n # If we're here, we passed all the checks.\n\n # Auto-run is for fast, light useage -- no messages.\n # If local, we've already displayed a message.\n if not interpreter.auto_run and not interpreter.local:\n display_markdown_message(f\"> Model set to `{interpreter.model}`\")\n return" }, { "identifier": "check_for_update", "path": "interpreter/utils/check_for_update.py", "snippet": "def check_for_update():\n # Fetch the latest version from the PyPI API\n response = requests.get(f\"https://pypi.org/pypi/open-interpreter/json\")\n latest_version = response.json()[\"info\"][\"version\"]\n\n # Get the current version using pkg_resources\n current_version = pkg_resources.get_distribution(\"open-interpreter\").version\n\n return version.parse(latest_version) > version.parse(current_version)" }, { "identifier": "display_markdown_message", "path": "interpreter/utils/display_markdown_message.py", "snippet": "def display_markdown_message(message):\n \"\"\"\n Display markdown message. Works with multiline strings with lots of indentation.\n Will automatically make single line > tags beautiful.\n \"\"\"\n\n for line in message.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n print(\"\")\n elif line == \"---\":\n rich_print(Rule(style=\"white\"))\n else:\n rich_print(Markdown(line))\n\n if \"\\n\" not in message and message.startswith(\">\"):\n # Aesthetic choice. For these tags, they need a space below them\n print(\"\")" }, { "identifier": "get_config", "path": "interpreter/utils/get_config.py", "snippet": "def get_config_path(path=user_config_path):\ndef get_config(path=user_config_path):" }, { "identifier": "get_storage_path", "path": "interpreter/utils/local_storage_path.py", "snippet": "def get_storage_path(subdirectory=None):\n if subdirectory is None:\n return config_dir\n else:\n return os.path.join(config_dir, subdirectory)" }, { "identifier": "generate_system_message", "path": "interpreter/core/generate_system_message.py", "snippet": "def generate_system_message(interpreter):\n \"\"\"\n Dynamically generate a system message.\n\n Takes an interpreter instance,\n returns a string.\n\n This is easy to replace!\n Just swap out `interpreter.generate_system_message` with another function.\n \"\"\"\n\n #### Start with the static system message\n\n system_message = interpreter.system_message\n\n #### Add dynamic components, like the user's OS, username, relevant procedures, etc\n\n system_message += \"\\n\" + get_user_info_string()\n\n if not interpreter.local and not interpreter.disable_procedures:\n try:\n system_message += \"\\n\" + get_relevant_procedures_string(\n interpreter.messages\n )\n except:\n if interpreter.debug_mode:\n print(traceback.format_exc())\n # It's okay if they can't. This just fixes some common mistakes it makes.\n\n return system_message" }, { "identifier": "respond", "path": "interpreter/core/respond.py", "snippet": "def respond(interpreter):\n \"\"\"\n Yields tokens, but also adds them to interpreter.messages. TBH probably would be good to seperate those two responsibilities someday soon\n Responds until it decides not to run any more code or say anything else.\n \"\"\"\n\n last_unsupported_code = \"\"\n\n while True:\n system_message = interpreter.generate_system_message()\n\n # Create message object\n system_message = {\"role\": \"system\", \"message\": system_message}\n\n # Create the version of messages that we'll send to the LLM\n messages_for_llm = interpreter.messages.copy()\n messages_for_llm = [system_message] + messages_for_llm\n\n # It's best to explicitly tell these LLMs when they don't get an output\n for message in messages_for_llm:\n if \"output\" in message and message[\"output\"] == \"\":\n message[\"output\"] = \"No output\"\n\n ### RUN THE LLM ###\n\n # Add a new message from the assistant to interpreter's \"messages\" attribute\n # (This doesn't go to the LLM. We fill this up w/ the LLM's response)\n interpreter.messages.append({\"role\": \"assistant\"})\n\n # Start putting chunks into the new message\n # + yielding chunks to the user\n try:\n # Track the type of chunk that the coding LLM is emitting\n chunk_type = None\n\n for chunk in interpreter._llm(messages_for_llm):\n # Add chunk to the last message\n interpreter.messages[-1] = merge_deltas(interpreter.messages[-1], chunk)\n\n # This is a coding llm\n # It will yield dict with either a message, language, or code (or language AND code)\n\n # We also want to track which it's sending to we can send useful flags.\n # (otherwise pretty much everyone needs to implement this)\n for new_chunk_type in [\"message\", \"language\", \"code\"]:\n if new_chunk_type in chunk and chunk_type != new_chunk_type:\n if chunk_type:\n yield {f\"end_of_{chunk_type}\": True}\n # Language is actually from a code block\n if new_chunk_type == \"language\":\n new_chunk_type = \"code\"\n chunk_type = new_chunk_type\n yield {f\"start_of_{chunk_type}\": True}\n\n yield chunk\n\n # We don't trigger the end_of_message or end_of_code flag if we actually end on either (we just exit the loop above)\n yield {f\"end_of_{chunk_type}\": True}\n\n except litellm.exceptions.BudgetExceededError:\n display_markdown_message(\n f\"\"\"> Max budget exceeded\n\n **Session spend:** ${litellm._current_cost}\n **Max budget:** ${interpreter.max_budget}\n\n Press CTRL-C then run `interpreter --max_budget [higher USD amount]` to proceed.\n \"\"\"\n )\n break\n # Provide extra information on how to change API keys, if we encounter that error\n # (Many people writing GitHub issues were struggling with this)\n except Exception as e:\n if (\n interpreter.local == False\n and \"auth\" in str(e).lower()\n or \"api key\" in str(e).lower()\n ):\n output = traceback.format_exc()\n raise Exception(\n f\"{output}\\n\\nThere might be an issue with your API key(s).\\n\\nTo reset your API key (we'll use OPENAI_API_KEY for this example, but you may need to reset your ANTHROPIC_API_KEY, HUGGINGFACE_API_KEY, etc):\\n Mac/Linux: 'export OPENAI_API_KEY=your-key-here',\\n Windows: 'setx OPENAI_API_KEY your-key-here' then restart terminal.\\n\\n\"\n )\n elif interpreter.local:\n raise Exception(\n str(e)\n + \"\"\"\n\nPlease make sure LM Studio's local server is running by following the steps above.\n\nIf LM Studio's local server is running, please try a language model with a different architecture.\n\n \"\"\"\n )\n else:\n raise\n\n ### RUN CODE (if it's there) ###\n\n if \"code\" in interpreter.messages[-1]:\n if interpreter.debug_mode:\n print(\"Running code:\", interpreter.messages[-1])\n\n try:\n # What code do you want to run?\n code = interpreter.messages[-1][\"code\"]\n\n # Fix a common error where the LLM thinks it's in a Jupyter notebook\n if interpreter.messages[-1][\"language\"] == \"python\" and code.startswith(\n \"!\"\n ):\n code = code[1:]\n interpreter.messages[-1][\"code\"] = code\n interpreter.messages[-1][\"language\"] = \"shell\"\n\n # Get a code interpreter to run it\n language = interpreter.messages[-1][\"language\"].lower().strip()\n if language in language_map:\n if language not in interpreter._code_interpreters:\n # Create code interpreter\n config = {\"language\": language, \"vision\": interpreter.vision}\n interpreter._code_interpreters[\n language\n ] = create_code_interpreter(config)\n code_interpreter = interpreter._code_interpreters[language]\n else:\n # This still prints the code but don't allow code to run. Let's Open-Interpreter know through output message\n\n output = (\n f\"Open Interpreter does not currently support `{language}`.\"\n )\n\n yield {\"output\": output}\n interpreter.messages[-1][\"output\"] = output\n\n # Let the response continue so it can deal with the unsupported code in another way. Also prevent looping on the same piece of code.\n if code != last_unsupported_code:\n last_unsupported_code = code\n continue\n else:\n break\n\n # Yield a message, such that the user can stop code execution if they want to\n try:\n yield {\"executing\": {\"code\": code, \"language\": language}}\n except GeneratorExit:\n # The user might exit here.\n # We need to tell python what we (the generator) should do if they exit\n break\n\n # Yield each line, also append it to last messages' output\n interpreter.messages[-1][\"output\"] = \"\"\n for line in code_interpreter.run(code):\n yield line\n if \"output\" in line:\n output = interpreter.messages[-1][\"output\"]\n output += \"\\n\" + line[\"output\"]\n\n # Truncate output\n output = truncate_output(output, interpreter.max_output)\n\n interpreter.messages[-1][\"output\"] = output.strip()\n # Vision\n if interpreter.vision:\n base64_image = None\n if \"image\" in line:\n base64_image = line[\"image\"]\n if \"html\" in line:\n base64_image = html_to_base64(line[\"html\"])\n\n if base64_image:\n yield {\"output\": \"Sending image output to GPT-4V...\"}\n interpreter.messages[-1][\n \"image\"\n ] = f\"data:image/jpeg;base64,{base64_image}\"\n\n except:\n output = traceback.format_exc()\n yield {\"output\": output.strip()}\n interpreter.messages[-1][\"output\"] = output.strip()\n\n yield {\"active_line\": None}\n yield {\"end_of_execution\": True}\n\n else:\n # Doesn't want to run code. We're done\n break\n\n return" } ]
import json import os from datetime import datetime from ..cli.cli import cli from ..llm.setup_llm import setup_llm from ..terminal_interface.terminal_interface import terminal_interface from ..terminal_interface.validate_llm_settings import validate_llm_settings from ..utils.check_for_update import check_for_update from ..utils.display_markdown_message import display_markdown_message from ..utils.get_config import get_config, user_config_path from ..utils.local_storage_path import get_storage_path from .generate_system_message import generate_system_message from .respond import respond
6,987
""" This file defines the Interpreter class. It's the main file. `import interpreter` will import an instance of this class. """ class Interpreter: def cli(self): cli(self) def __init__(self): # State self.messages = [] self._code_interpreters = {} self.config_file = user_config_path # Settings self.local = False self.auto_run = False self.debug_mode = False self.max_output = 2000 self.safe_mode = "off" self.disable_procedures = False # Conversation history self.conversation_history = True self.conversation_filename = None self.conversation_history_path = get_storage_path("conversations") # LLM settings self.model = "" self.temperature = None self.system_message = "" self.context_window = None self.max_tokens = None self.api_base = None self.api_key = None self.max_budget = None self._llm = None self.function_calling_llm = None self.vision = False # LLM supports vision # Load config defaults self.extend_config(self.config_file) # Check for update try: if not self.local: # This should actually be pushed into the utility if check_for_update():
""" This file defines the Interpreter class. It's the main file. `import interpreter` will import an instance of this class. """ class Interpreter: def cli(self): cli(self) def __init__(self): # State self.messages = [] self._code_interpreters = {} self.config_file = user_config_path # Settings self.local = False self.auto_run = False self.debug_mode = False self.max_output = 2000 self.safe_mode = "off" self.disable_procedures = False # Conversation history self.conversation_history = True self.conversation_filename = None self.conversation_history_path = get_storage_path("conversations") # LLM settings self.model = "" self.temperature = None self.system_message = "" self.context_window = None self.max_tokens = None self.api_base = None self.api_key = None self.max_budget = None self._llm = None self.function_calling_llm = None self.vision = False # LLM supports vision # Load config defaults self.extend_config(self.config_file) # Check for update try: if not self.local: # This should actually be pushed into the utility if check_for_update():
display_markdown_message(
5
2023-11-16 03:10:42+00:00
8k
3dp-accelerometer/octoprint-accelerometer
octoprint_accelerometer/plugin.py
[ { "identifier": "DataPostProcessRunner", "path": "octoprint_accelerometer/data_post_process.py", "snippet": "class DataPostProcessRunner:\n \"\"\"\n Runner for traversing stream files and post-processing (FFT) if necessary.\n \"\"\"\n def __init__(self,\n logger: Logger,\n on_event_callback: Optional[Callable[[DataProcessingEventType], None]],\n input_dir: str,\n input_file_prefix: str,\n algorithm_d1: str,\n output_dir: str,\n output_file_prefix: str,\n output_overwrite: bool,\n do_dry_run: bool,\n do_abort_flag: threading.Event = threading.Event()):\n self.logger: Logger = logger\n self.on_event_callback: Optional[Callable[[DataProcessingEventType], None]] = on_event_callback\n self._input_dir: str = input_dir\n self._input_file_prefix: str = input_file_prefix\n self._algorithm_d1: str = algorithm_d1\n self._output_dir: str = output_dir\n self._output_file_prefix: str = output_file_prefix\n self._output_overwrite: bool = output_overwrite\n self._do_dry_run: bool = do_dry_run\n self._do_abort_flag: threading.Event = do_abort_flag\n self._background_task: Optional[DataPostProcessBackgroundTask] = None\n self._background_task_start_timestamp: Optional[float] = None\n self._background_task_stop_timestamp: Optional[float] = None\n self._files_total: Optional[int] = None\n self._files_processed: Optional[int] = None\n self._files_skipped: Optional[int] = None\n\n @property\n def algorithm_d1(self) -> str:\n return self._algorithm_d1\n\n @algorithm_d1.setter\n def algorithm_d1(self, algorithm_d1: str):\n self._algorithm_d1 = algorithm_d1\n\n @property\n def input_dir(self) -> str:\n return self._input_dir\n\n @input_dir.setter\n def input_dir(self, input_dir: str):\n self._input_dir = input_dir\n\n @property\n def input_file_prefix(self) -> str:\n return self._input_file_prefix\n\n @input_file_prefix.setter\n def input_file_prefix(self, input_file_prefix: str):\n self._input_file_prefix = input_file_prefix\n\n @property\n def output_dir(self) -> str:\n return self._output_dir\n\n @output_dir.setter\n def output_dir(self, output_dir: str):\n self._output_dir = output_dir\n\n @property\n def output_file_prefix(self) -> str:\n return self._output_file_prefix\n\n @output_file_prefix.setter\n def output_file_prefix(self, output_file_prefix: str):\n self._output_file_prefix = output_file_prefix\n\n @property\n def output_overwrite(self) -> bool:\n return self._output_overwrite\n\n @output_overwrite.setter\n def output_overwrite(self, output_overwrite: bool):\n self._output_overwrite = output_overwrite\n\n @property\n def do_dry_run(self) -> bool:\n return self._do_dry_run\n\n @do_dry_run.setter\n def do_dry_run(self, do_dry_run: bool):\n self._do_dry_run = do_dry_run\n\n def is_running(self) -> bool:\n return True if self._background_task is not None and self._background_task.is_alive() else False\n\n def _send_on_event_callback(self, event: DataProcessingEventType):\n if self.on_event_callback:\n self.on_event_callback(event)\n\n def _send_on_thread_event_callback(self,\n event: DataProcessingEventType,\n total: Optional[int] = None,\n processed: Optional[int] = None,\n skipped: Optional[int] = None):\n\n self._files_total = total\n self._files_processed = processed\n self._files_skipped = skipped\n\n if event == DataProcessingEventType.PROCESSING_FINISHED:\n self._thread_stop_timestamp = time.time()\n\n if self.on_event_callback:\n self.on_event_callback(event)\n\n # TODO: force an early thread termination not by just terminating run().\n # Reason: Thread.is_alive() takes up to 30 seconds after run() terminated\n # to report not-alive. This works but sounds like a bug though.\n if event in [DataProcessingEventType.PROCESSING_FINISHED,\n DataProcessingEventType.UNHANDLED_EXCEPTION,\n DataProcessingEventType.ABORTED]:\n self.logger.info(\"data post processing thread terminated\")\n raise SystemExit()\n\n def stop(self) -> None:\n self._do_abort_flag.set()\n self._send_on_event_callback(DataProcessingEventType.ABORTING)\n if self._background_task:\n try:\n self._background_task.join()\n except RuntimeError as _e:\n self.logger.info(\"no running thread that can be stopped\")\n self._background_task = None\n self._background_task_stop_timestamp = time.time()\n self._send_on_event_callback(DataProcessingEventType.ABORTED)\n\n def get_last_run_duration_s(self) -> Optional[float]:\n \"\"\"\n Returns the last known duration.\n\n Note: Whenever this method is called, make sure to assert that the thread is not running.\n\n This is-running check is skipped here on purpose.\n Normally the child thread is the caller itself.\n The call propagated indirectly through the plugin's callback that most likely called this method again.\n In that case the thread is always running.\n\n :return: the last known duration; None if unknown of thread is still running\n \"\"\"\n return None if not self._thread_stop_timestamp or not self._background_task_start_timestamp else self._thread_stop_timestamp - self._background_task_start_timestamp\n\n def get_last_processed_count(self) -> Tuple[Optional[int], Optional[int], Optional[int]]:\n return self._files_total, self._files_processed, self._files_skipped\n\n def run(self) -> None:\n self._do_abort_flag.clear()\n self._background_task_stop_timestamp = None\n self._files_total = None\n self._files_processed = None\n self._files_skipped = None\n\n try:\n self.logger.info(\"start data processing ...\")\n self._background_task = DataPostProcessBackgroundTask(\n logger=self.logger,\n task=DataPostProcessTask(\n logger=self.logger,\n runner=DataDecomposeRunner(\n command=\"algo\",\n input_dir=self.input_dir,\n input_file_prefix=self.input_file_prefix,\n algorithm_d1=self.algorithm_d1,\n output_dir=self.output_dir,\n output_file_prefix=self.output_file_prefix,\n output_overwrite=False),\n on_event_callback=self._send_on_thread_event_callback))\n\n self._send_on_event_callback(DataProcessingEventType.PROCESSING)\n self._background_task_start_timestamp = time.time()\n self._background_task.start()\n\n except Exception as e:\n self.logger.error(\"railed to start data processing thread\")\n self.logger.error(str(e))\n self._send_on_event_callback(DataProcessingEventType.UNHANDLED_EXCEPTION)" }, { "identifier": "DataProcessingEventType", "path": "octoprint_accelerometer/event_types.py", "snippet": "class DataProcessingEventType(IntEnum):\n \"\"\"\n Types that can be emitted by callback from the data processing task.\n \"\"\"\n\n STARTING = 1\n \"data processing: sane execution event\"\n PROCESSING = 2\n \"data processing: sane execution event\"\n PROCESSING_FINISHED = 3\n \"data processing: sane execution event\"\n\n UNHANDLED_EXCEPTION = 12\n \"data processing: exceptional event\"\n\n ABORTING = 21\n \"event upon user request\"\n ABORTED = 22\n \"event upon user request\"" }, { "identifier": "RecordingEventType", "path": "octoprint_accelerometer/event_types.py", "snippet": "class RecordingEventType(IntEnum):\n \"\"\"\n Types that can be emitted by callback from the recording task.\n \"\"\"\n\n STARTING = 1\n \"processing: sane execution event\"\n PROCESSING = 2\n \"processing: sane execution event\"\n PROCESSING_FINISHED = 3\n \"processing: sane execution event\"\n\n FIFO_OVERRUN = 11\n \"processing: exceptional event\"\n UNHANDLED_EXCEPTION = 12\n \"processing: exceptional event\"\n\n ABORTING = 21\n \"event upon user request\"\n ABORTED = 22\n \"event upon user request\"" }, { "identifier": "RecordStepSeriesRunner", "path": "octoprint_accelerometer/record_step_series.py", "snippet": "class RecordStepSeriesRunner:\n \"\"\"\n Runner for moving printer, recording streams from accelerometer and saving to data to files.\n \"\"\"\n\n def __init__(self,\n logger: Logger,\n printer: PrinterInterface,\n controller_serial_device: str,\n on_event_callback: Optional[Callable[[RecordingEventType], None]],\n controller_record_timelapse_s: float,\n controller_decode_timeout_s: float,\n sensor_odr_hz: int,\n gcode_start_point_mm: Tuple[int, int, int],\n gcode_axis: List[Literal[\"x\", \"y\", \"z\"]],\n gcode_distance_mm: int,\n gcode_step_count: int,\n gcode_sequence_count: int,\n start_frequency_hz: int,\n stop_frequency_hz: int,\n step_frequency_hz: int,\n start_zeta_em2: int,\n stop_zeta_em2: int,\n step_zeta_em2: int,\n output_file_prefix: str,\n output_dir: str,\n do_dry_run: bool,\n do_abort_flag: threading.Event = threading.Event()):\n self.controller_response_error: bool = False\n self.controller_fifo_overrun_error: bool = False\n self.unhandled_exception: bool = False\n self.logger: Logger = logger\n self.printer: PrinterInterface = printer\n self._controller_serial_device: str = controller_serial_device\n self.on_event_callback: Optional[Callable[[RecordingEventType], None]] = on_event_callback\n self._controller_record_timelapse_s: float = controller_record_timelapse_s\n self._controller_decode_timeout_s: float = controller_decode_timeout_s\n self._sensor_odr_hz: int = sensor_odr_hz\n self._gcode_start_point_mm: Tuple[int, int, int] = gcode_start_point_mm\n self._gcode_axis: List[Literal[\"x\", \"y\", \"z\"]] = gcode_axis\n self._gcode_distance_mm: int = gcode_distance_mm\n self._gcode_step_count: int = gcode_step_count\n self._gcode_sequence_count: int = gcode_sequence_count\n self._start_frequency_hz: int = start_frequency_hz\n self._stop_frequency_hz: int = stop_frequency_hz\n self._step_frequency_hz: int = step_frequency_hz\n self._start_zeta_em2: int = start_zeta_em2\n self._stop_zeta_em2: int = stop_zeta_em2\n self._step_zeta_em2: int = step_zeta_em2\n self._output_file_prefix: str = output_file_prefix\n self._output_dir: str = output_dir\n self._do_dry_run: bool = do_dry_run\n self._do_abort_flag: threading.Event = do_abort_flag\n self._background_task: Optional[RecordStepSeriesBackgroundTask] = None\n self._background_task_start_timestamp: Optional[float] = None\n self._background_task_stop_timestamp: Optional[float] = None\n\n @property\n def controller_serial_device(self) -> str:\n return self._controller_serial_device\n\n @controller_serial_device.setter\n def controller_serial_device(self, controller_serial_device: str):\n self._controller_serial_device = controller_serial_device\n\n @property\n def controller_record_timelapse_s(self) -> float:\n return self._controller_record_timelapse_s\n\n @controller_record_timelapse_s.setter\n def controller_record_timelapse_s(self, controller_record_timelapse_s: float):\n self._controller_record_timelapse_s = controller_record_timelapse_s\n\n @property\n def controller_decode_timeout_s(self) -> float:\n return self._controller_decode_timeout_s\n\n @controller_decode_timeout_s.setter\n def controller_decode_timeout_s(self, controller_decode_timeout_s: float):\n self._controller_decode_timeout_s = controller_decode_timeout_s\n\n @property\n def sensor_odr_hz(self) -> int:\n return self._sensor_odr_hz\n\n @sensor_odr_hz.setter\n def sensor_odr_hz(self, sensor_odr_hz: int):\n self._sensor_odr_hz = sensor_odr_hz\n\n @property\n def gcode_start_point_mm(self) -> Tuple[int, int, int]:\n return self._gcode_start_point_mm\n\n @gcode_start_point_mm.setter\n def gcode_start_point_mm(self, gcode_start_point_mm: Tuple[int, int, int]):\n self._gcode_start_point_mm = gcode_start_point_mm\n\n @property\n def gcode_axis(self) -> List[Literal[\"x\", \"y\", \"z\"]]:\n return self._gcode_axis\n\n @gcode_axis.setter\n def gcode_axis(self, gcode_axis: List[Literal[\"x\", \"y\", \"z\"]]):\n self._gcode_axis = gcode_axis\n\n @property\n def gcode_distance_mm(self) -> int:\n return self._gcode_distance_mm\n\n @gcode_distance_mm.setter\n def gcode_distance_mm(self, gcode_distance_mm: int):\n self._gcode_distance_mm = gcode_distance_mm\n\n @property\n def gcode_step_count(self) -> int:\n return self._gcode_step_count\n\n @gcode_step_count.setter\n def gcode_step_count(self, gcode_step_count: int):\n self._gcode_step_count = gcode_step_count\n\n @property\n def gcode_sequence_count(self) -> int:\n return self._gcode_sequence_count\n\n @gcode_sequence_count.setter\n def gcode_sequence_count(self, gcode_sequence_count: int):\n self._gcode_sequence_count = gcode_sequence_count\n\n @property\n def start_frequency_hz(self) -> int:\n return self._start_frequency_hz\n\n @start_frequency_hz.setter\n def start_frequency_hz(self, start_frequency_hz: int):\n self._start_frequency_hz = start_frequency_hz\n\n @property\n def stop_frequency_hz(self) -> int:\n return self._stop_frequency_hz\n\n @stop_frequency_hz.setter\n def stop_frequency_hz(self, stop_frequency_hz: int):\n self._stop_frequency_hz = stop_frequency_hz\n\n @property\n def step_frequency_hz(self) -> int:\n return self._step_frequency_hz\n\n @step_frequency_hz.setter\n def step_frequency_hz(self, step_frequency_hz: int):\n self._step_frequency_hz = step_frequency_hz\n\n @property\n def start_zeta_em2(self) -> int:\n return self._start_zeta_em2\n\n @start_zeta_em2.setter\n def start_zeta_em2(self, start_zeta_em2: int):\n self._start_zeta_em2 = start_zeta_em2\n\n @property\n def stop_zeta_em2(self) -> int:\n return self._stop_zeta_em2\n\n @stop_zeta_em2.setter\n def stop_zeta_em2(self, stop_zeta_em2: int):\n self._stop_zeta_em2 = stop_zeta_em2\n\n @property\n def step_zeta_em2(self) -> int:\n return self._step_zeta_em2\n\n @step_zeta_em2.setter\n def step_zeta_em2(self, step_zeta_em2: int):\n self._step_zeta_em2 = step_zeta_em2\n\n @property\n def output_file_prefix(self) -> str:\n return self._output_file_prefix\n\n @output_file_prefix.setter\n def output_file_prefix(self, output_file_prefix: str):\n self._output_file_prefix = output_file_prefix\n\n @property\n def output_dir(self) -> str:\n return self._output_dir\n\n @output_dir.setter\n def output_dir(self, output_dir: str):\n self._output_dir = output_dir\n\n @property\n def do_dry_run(self) -> bool:\n return self._do_dry_run\n\n @do_dry_run.setter\n def do_dry_run(self, do_dry_run: bool):\n self._do_dry_run = do_dry_run\n\n def is_running(self) -> bool:\n return True if self._background_task is not None and self._background_task.is_alive() else False\n\n def task_execution_had_errors(self) -> bool:\n return self.controller_response_error or self.controller_response_error or self.unhandled_exception\n\n def _send_on_event_callback(self, event: RecordingEventType):\n if self.on_event_callback:\n self.on_event_callback(event)\n\n def _send_on_thread_event_callback(self, event: RecordingEventType):\n if event == RecordingEventType.PROCESSING_FINISHED:\n self._thread_stop_timestamp = time.time()\n\n if self.on_event_callback:\n self.on_event_callback(event)\n\n # TODO: force an early thread termination not by just terminating run().\n # Reason: Thread.is_alive() takes up to 30 seconds after run() terminated\n # to report not-alive. This works but sounds like a bug though.\n if event in [RecordingEventType.PROCESSING_FINISHED,\n RecordingEventType.FIFO_OVERRUN,\n RecordingEventType.UNHANDLED_EXCEPTION,\n RecordingEventType.ABORTED]:\n self.logger.info(\"recording thread terminated\")\n raise SystemExit()\n\n def stop(self) -> None:\n self._do_abort_flag.set()\n self._send_on_event_callback(RecordingEventType.ABORTING)\n if self._background_task:\n try:\n self._background_task.join()\n except RuntimeError as _e:\n self.logger.info(\"no running thread that can be stopped\")\n self._background_task = None\n self._background_task_stop_timestamp = time.time()\n self._send_on_event_callback(RecordingEventType.ABORTED)\n\n def get_last_run_duration_s(self) -> Optional[float]:\n \"\"\"\n Returns the last known duration.\n\n Note: Whenever this method is called, make sure to assert that the thread is not running.\n\n This is-running check is skipped here on purpose.\n Normally the child thread is the caller itself.\n The call propagated indirectly through the plugin's callback that most likely called this method again.\n In that case the thread is always running.\n\n :return: the last known duration; None if unknown of thread is still running\n \"\"\"\n return None if not self._thread_stop_timestamp or not self._background_task_start_timestamp else self._thread_stop_timestamp - self._background_task_start_timestamp\n\n def run(self) -> None:\n py3dpaxxel_octo = Py3dpAxxelOcto(self.printer, self.logger)\n self.controller_fifo_overrun_error = False\n self.controller_response_error = False\n self.unhandled_exception = False\n self._do_abort_flag.clear()\n self._background_task_stop_timestamp = None\n\n if not self.printer.is_operational():\n self.logger.warning(\"received request to start recording but printer is not operational\")\n return\n\n try:\n self.logger.info(\"start recording ...\")\n self._background_task = RecordStepSeriesBackgroundTask(\n logger=self.logger,\n task=RecordStepSeriesTask(\n logger=self.logger,\n runner=SamplingStepsSeriesRunner(\n octoprint_api=py3dpaxxel_octo,\n controller_serial_device=self.controller_serial_device,\n controller_record_timelapse_s=self.controller_record_timelapse_s,\n controller_decode_timeout_s=self.controller_decode_timeout_s,\n sensor_odr=OutputDataRateFromHz[self.sensor_odr_hz],\n gcode_start_point_mm=self.gcode_start_point_mm,\n gcode_axis=self.gcode_axis,\n gcode_distance_mm=self.gcode_distance_mm,\n gcode_step_repeat_count=self.gcode_step_count,\n gcode_sequence_repeat_count=self.gcode_sequence_count,\n fx_start_hz=self.start_frequency_hz,\n fx_stop_hz=self.stop_frequency_hz,\n fx_step_hz=self.step_frequency_hz,\n zeta_start_em2=self.start_zeta_em2,\n zeta_stop_em2=self.start_zeta_em2,\n zeta_step_em2=self.step_zeta_em2,\n output_file_prefix=self.output_file_prefix,\n output_dir=self.output_dir,\n do_dry_run=self.do_dry_run,\n do_abort_flag=self._do_abort_flag),\n on_event_callback=self._send_on_thread_event_callback))\n self._send_on_event_callback(RecordingEventType.PROCESSING)\n self._background_task_start_timestamp = time.time()\n self._background_task.start()\n\n except Exception as e:\n self.unhandled_exception = True\n self.logger.error(\"railed to start recording thread\")\n self.logger.error(str(e))\n self._send_on_event_callback(RecordingEventType.UNHANDLED_EXCEPTION)" }, { "identifier": "RunMeta", "path": "octoprint_accelerometer/transfer_types.py", "snippet": "class RunMeta:\n started: Optional[Timestamp] = None # Timestamp()\n stopped: Optional[Timestamp] = None # Timestamp()\n sequences: Dict[int, SequenceMeta] = field(default_factory=lambda: ({}))" }, { "identifier": "SequenceMeta", "path": "octoprint_accelerometer/transfer_types.py", "snippet": "class SequenceMeta:\n streams: Dict[str, StreamMeta] = field(default_factory=lambda: ({}))" }, { "identifier": "StreamMeta", "path": "octoprint_accelerometer/transfer_types.py", "snippet": "class StreamMeta:\n file: Optional[File] = None # = File()\n meta: Optional[FilenameMetaStream] = None # = FilenameMetaStream()\n ffts: Dict[str, FftMeta] = field(default_factory=lambda: ({}))" }, { "identifier": "DataSets", "path": "octoprint_accelerometer/transfer_types.py", "snippet": "class DataSets:\n runs: Dict[str, RunMeta] = field(default_factory=lambda: ({}))" }, { "identifier": "FftMeta", "path": "octoprint_accelerometer/transfer_types.py", "snippet": "class FftMeta:\n file: Optional[File] = None # = File()\n meta: Optional[FilenameMetaFft] = None # = FilenameMetaStream()" }, { "identifier": "Timestamp", "path": "octoprint_accelerometer/transfer_types.py", "snippet": "class Timestamp:\n year: int = 0\n month: int = 0\n day: int = 0\n hour: int = 0\n minute: int = 0\n second: int = 0\n milli_second: int = 0" } ]
import os import flask import octoprint.plugin from typing import Any, Dict, List, Literal, Optional, Tuple from octoprint.server.util.tornado import LargeResponseHandler, path_validation_factory from octoprint.util import is_hidden_path from py3dpaxxel.cli.args import convert_axis_from_str from py3dpaxxel.controller.api import Py3dpAxxel from py3dpaxxel.sampling_tasks.series_argument_generator import RunArgsGenerator from py3dpaxxel.storage.file_filter import FileSelector, File from py3dpaxxel.storage.filename import timestamp_from_args from py3dpaxxel.storage.filename_meta import FilenameMetaStream, FilenameMetaFft from octoprint_accelerometer.data_post_process import DataPostProcessRunner from octoprint_accelerometer.event_types import DataProcessingEventType, RecordingEventType from octoprint_accelerometer.record_step_series import RecordStepSeriesRunner from octoprint_accelerometer.transfer_types import RunMeta, SequenceMeta, StreamMeta, DataSets, FftMeta, Timestamp
7,023
octoprint.plugin.TemplatePlugin, octoprint.plugin.BlueprintPlugin): OUTPUT_STREAM_FILE_NAME_PREFIX: str = "axxel" OUTPUT_FFT_FILE_NAME_PREFIX: str = "fft" # noinspection PyMissingConstructor def __init__(self): # following parameters are shared among settings and UI self.distance_x_mm: int = 0 self.distance_y_mm: int = 0 self.distance_z_mm: int = 0 self.step_count: int = 0 self.speed_x_mm_s: int = 0 self.speed_y_mm_s: int = 0 self.speed_z_mm_s: int = 0 self.acceleration_x_mm_ss: int = 0 self.acceleration_y_mm_ss: int = 0 self.acceleration_z_mm_ss: int = 0 self.anchor_point_coord_x_mm: int = 0 self.anchor_point_coord_y_mm: int = 0 self.anchor_point_coord_z_mm: int = 0 self.sequence_count: int = 0 self.go_start: bool = False self.return_start: bool = False self.auto_home: bool = False self.start_frequency_hz: int = 0 self.stop_frequency_hz: int = 0 self.step_frequency_hz: int = 0 self.start_zeta_em2: int = 0 self.stop_zeta_em2: int = 0 self.step_zeta_em2: int = 0 self.sensor_output_data_rate_hz: int = 0 self.data_remove_before_run: bool = False self.do_sample_x: bool = False self.do_sample_y: bool = False self.do_sample_z: bool = False self.recording_timespan_s: float = 0 self.sequence_separation_s: float = 0 self.step_separation_s: float = 0 self.do_dry_run: bool = False # other parameters shared with UI self.devices_seen: List[str] = [] self.device: str = "" self.controller_fifo_overrun_error: bool = False self.controller_response_error: bool = False # following parameters are computed from above parameters self.axis_x_sampling_start: Point3D = Point3D(0, 0, 0) self.axis_y_sampling_start: Point3D = Point3D(0, 0, 0) self.axis_z_sampling_start: Point3D = Point3D(0, 0, 0) # recording runner: once constructed before invocation all properties shall be updated self.data_recording_runner: Optional[RecordStepSeriesRunner] = None self.data_processing_runner: Optional[DataPostProcessRunner] = None @staticmethod def _get_devices() -> Tuple[str, List[str]]: """ :return: tuple of primary device (if any) and list of all devices """ seen_devices: List[str] = [k for k in Py3dpAxxel.get_devices_dict().keys()] primary: str = seen_devices[0] if len(seen_devices) > 0 else None return primary, seen_devices def _update_seen_devices(self): primary, seen_devices = self._get_devices() self._logger.debug(f"seen devices: primary={primary}, seen={seen_devices}") self.devices_seen = seen_devices self.device = primary if primary is not None else "" @octoprint.plugin.BlueprintPlugin.route("/set_values", methods=["POST"]) def on_api_set_values(self): data = flask.request.json self._update_members_from_api(data) response = flask.jsonify(message="OK") response.status_code = 202 return response @octoprint.plugin.BlueprintPlugin.route("/start_recording", methods=["POST"]) def on_api_start_recording(self): self._start_recording() response = flask.jsonify(message="OK") response.status_code = 202 return response @octoprint.plugin.BlueprintPlugin.route("/abort_recording", methods=["POST"]) def on_api_abort_recording(self): self._abort_recording() response = flask.jsonify(message="OK") response.status_code = 202 return response @octoprint.plugin.BlueprintPlugin.route("/start_data_processing", methods=["POST"]) def on_api_start_data_processing(self): self._start_data_processing() response = flask.jsonify(message="OK") response.status_code = 202 return response @octoprint.plugin.BlueprintPlugin.route("/get_estimate", methods=["GET"]) def on_api_get_estimate(self): return flask.jsonify({f"estimate": self._estimate_duration()}) @octoprint.plugin.BlueprintPlugin.route("/get_parameters", methods=["GET"]) def on_api_get_parameters(self): return flask.jsonify({f"parameters": self._get_parameter_dict(flask.request.args)}) @octoprint.plugin.BlueprintPlugin.route("/get_files_listing", methods=["GET"]) def on_api_get_files_listing(self): fs = FileSelector(os.path.join(self.get_plugin_data_folder(), ".*")) files_details = fs.filter() return flask.jsonify({f"files": files_details}) @octoprint.plugin.BlueprintPlugin.route("/get_stream_files_listing", methods=["GET"]) def on_api_get_stream_files_listing(self): fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$")) files = fs.filter()
class Point3D: def __init__(self, x: int, y: int, z: int): self.x: int = x self.y: int = y self.z: int = z def __str__(self): return f"x={self.x} y={self.y} z={self.z}" class OctoprintAccelerometerPlugin(octoprint.plugin.StartupPlugin, octoprint.plugin.SettingsPlugin, octoprint.plugin.AssetPlugin, octoprint.plugin.TemplatePlugin, octoprint.plugin.BlueprintPlugin): OUTPUT_STREAM_FILE_NAME_PREFIX: str = "axxel" OUTPUT_FFT_FILE_NAME_PREFIX: str = "fft" # noinspection PyMissingConstructor def __init__(self): # following parameters are shared among settings and UI self.distance_x_mm: int = 0 self.distance_y_mm: int = 0 self.distance_z_mm: int = 0 self.step_count: int = 0 self.speed_x_mm_s: int = 0 self.speed_y_mm_s: int = 0 self.speed_z_mm_s: int = 0 self.acceleration_x_mm_ss: int = 0 self.acceleration_y_mm_ss: int = 0 self.acceleration_z_mm_ss: int = 0 self.anchor_point_coord_x_mm: int = 0 self.anchor_point_coord_y_mm: int = 0 self.anchor_point_coord_z_mm: int = 0 self.sequence_count: int = 0 self.go_start: bool = False self.return_start: bool = False self.auto_home: bool = False self.start_frequency_hz: int = 0 self.stop_frequency_hz: int = 0 self.step_frequency_hz: int = 0 self.start_zeta_em2: int = 0 self.stop_zeta_em2: int = 0 self.step_zeta_em2: int = 0 self.sensor_output_data_rate_hz: int = 0 self.data_remove_before_run: bool = False self.do_sample_x: bool = False self.do_sample_y: bool = False self.do_sample_z: bool = False self.recording_timespan_s: float = 0 self.sequence_separation_s: float = 0 self.step_separation_s: float = 0 self.do_dry_run: bool = False # other parameters shared with UI self.devices_seen: List[str] = [] self.device: str = "" self.controller_fifo_overrun_error: bool = False self.controller_response_error: bool = False # following parameters are computed from above parameters self.axis_x_sampling_start: Point3D = Point3D(0, 0, 0) self.axis_y_sampling_start: Point3D = Point3D(0, 0, 0) self.axis_z_sampling_start: Point3D = Point3D(0, 0, 0) # recording runner: once constructed before invocation all properties shall be updated self.data_recording_runner: Optional[RecordStepSeriesRunner] = None self.data_processing_runner: Optional[DataPostProcessRunner] = None @staticmethod def _get_devices() -> Tuple[str, List[str]]: """ :return: tuple of primary device (if any) and list of all devices """ seen_devices: List[str] = [k for k in Py3dpAxxel.get_devices_dict().keys()] primary: str = seen_devices[0] if len(seen_devices) > 0 else None return primary, seen_devices def _update_seen_devices(self): primary, seen_devices = self._get_devices() self._logger.debug(f"seen devices: primary={primary}, seen={seen_devices}") self.devices_seen = seen_devices self.device = primary if primary is not None else "" @octoprint.plugin.BlueprintPlugin.route("/set_values", methods=["POST"]) def on_api_set_values(self): data = flask.request.json self._update_members_from_api(data) response = flask.jsonify(message="OK") response.status_code = 202 return response @octoprint.plugin.BlueprintPlugin.route("/start_recording", methods=["POST"]) def on_api_start_recording(self): self._start_recording() response = flask.jsonify(message="OK") response.status_code = 202 return response @octoprint.plugin.BlueprintPlugin.route("/abort_recording", methods=["POST"]) def on_api_abort_recording(self): self._abort_recording() response = flask.jsonify(message="OK") response.status_code = 202 return response @octoprint.plugin.BlueprintPlugin.route("/start_data_processing", methods=["POST"]) def on_api_start_data_processing(self): self._start_data_processing() response = flask.jsonify(message="OK") response.status_code = 202 return response @octoprint.plugin.BlueprintPlugin.route("/get_estimate", methods=["GET"]) def on_api_get_estimate(self): return flask.jsonify({f"estimate": self._estimate_duration()}) @octoprint.plugin.BlueprintPlugin.route("/get_parameters", methods=["GET"]) def on_api_get_parameters(self): return flask.jsonify({f"parameters": self._get_parameter_dict(flask.request.args)}) @octoprint.plugin.BlueprintPlugin.route("/get_files_listing", methods=["GET"]) def on_api_get_files_listing(self): fs = FileSelector(os.path.join(self.get_plugin_data_folder(), ".*")) files_details = fs.filter() return flask.jsonify({f"files": files_details}) @octoprint.plugin.BlueprintPlugin.route("/get_stream_files_listing", methods=["GET"]) def on_api_get_stream_files_listing(self): fs = FileSelector(os.path.join(self.get_plugin_data_folder(), f"{self.OUTPUT_STREAM_FILE_NAME_PREFIX}-.*\\.tsv$")) files = fs.filter()
files_details = [StreamMeta(f, FilenameMetaStream().from_filename(f.filename_ext)) for f in files]
6
2023-11-14 17:15:15+00:00
8k
hmmbug/pythaidate
tests/test_csdate.py
[ { "identifier": "julianday", "path": "pythaidate/julianday.py", "snippet": "def to_julianday(year, month, day):\ndef from_julianday(jd):\ndef today(): # pragma: no cover\ndef date_to_julianday(d):\ndef julianday_to_date(obj):\n B = 0\n A = math.trunc(yearp / 100.)\n B = 2 - A + math.trunc(A / 4.)\n C = math.trunc((365.25 * yearp) - 0.75) if yearp < 0 else math.trunc(365.25 * yearp)\n D = math.trunc(30.6001 * (monthp + 1))\n F, I = math.modf(jd)\n I = int(I)\n A = math.trunc((I - 1867216.25)/36524.25)\n B = (I + 1 + A - math.trunc(A / 4.)) if I > 2299160 else I\n C = B + 1524\n D = math.trunc((C - 122.1) / 365.25)\n E = math.trunc(365.25 * D)\n G = math.trunc((C - E) / 30.6001)" }, { "identifier": "CsDate", "path": "pythaidate/csdate.py", "snippet": "class CsDate:\n\n def __init__(self, year: int, month: int=None, day: int=None,\n month_style: int = MONTH_SUK):\n logging.debug(\"args year:%s month:%s day:%s, month_style:%s\",\n year, month, day, month_style)\n self.__year = year\n self.__month = month\n self.__day = day # day of month\n self.__days = None # days elapsed in year\n self.__month_style = month_style # Sukothai, Chiang Mai, Keng Tung\n self.__init_ymd()\n self.__calculate()\n logging.debug(\"final y:%s m:%s d:%s days:%s\",\n self.__year, self.__month, self.__day, self.__days)\n\n def __init_ymd(self):\n \"\"\"\n Initialise from year, month and day args.\n \"\"\"\n self.__year0 = self.calculate_year0(self.__year)\n # logging.debug(\"offset_days:%d\", self.__year0.offset_days)\n\n date_offset = None\n if self.__month == 5:\n date_offset = self.__day\n elif self.__month == 6:\n date_offset = 29 + self.__day\n\n MP = MONTH_POSITION_C if self.__year0.cal_type == \"C\" else MONTH_POSITION_AB\n tmonth = MP.index(self.__month)\n if date_offset and date_offset < self.__year0.offset_days:\n # this is a month 5 or 6 date at end of the year\n tmonth += 13 if self.__year0.cal_type == \"C\" else 12\n # shift month number to end of the index in LUNAR_MONTHS[]\n self.__month += 10\n self.__days = MONTH_CUMULATIVE_DAYS[self.__year0.cal_type][tmonth-1] + self.__day - self.__year0.offset_days\n logging.debug(\"ymd: y:%s m:%s d:%s days:%s cal_type:%s tmonth:%s\",\n self.__year, self.__month, self.__day,\n self.__days, self.__year0.cal_type, tmonth)\n\n def __calculate(self):\n # horakhun: The number of elapsed days since epoch plus days since New Year's Day (Thai: หรคุฌ)\n self.__horakhun = (self.__year * DAYS_IN_800_YEARS + EPOCH_OFFSET) // TIME_UNITS_IN_1_DAY + 1 + self.__days\n assert self.julianday > CS_JULIAN_DAY_OFFSET # check for pre-epoch dates\n\n # kammacapon: A quantity that gives the excess of solar days over whole solar days (Thai: กัมมัขผล)\n self.__kammacapon = TIME_UNITS_IN_1_DAY - (self.__year * DAYS_IN_800_YEARS + EPOCH_OFFSET) % TIME_UNITS_IN_1_DAY\n\n # uccapon: The measure of the position of the Moon's apogee. It increases by one unit a day to\n # a maximum of 3232 (Thai: อุจจพล)\n self.__uccapon = (self.__horakhun + UCCAPON_CONSTANT) % APOGEE_ROTATION_DAYS\n\n # avoman: The excess of lunar days over solar days in units of 1/692 of a lunar day modulus 692.\n # It increases by 11 units each solar day. It is used to determine when to add intercalary days\n # in the calendar (Thai: อวมาน)\n self.__avoman = (self.__horakhun * 11 + 650) % 692\n if self.__avoman == 0:\n self.__avoman = 692\n\n # masaken: Number of lunar months since the epoch (Thai: มาสเกฌฑ์)\n avoman_div = ((self.__horakhun + self.days) * 11 + 650) // 692\n self.__masaken = (avoman_div + self.__horakhun) // 30\n\n # tithi: a lunar day, equal to 1/30th of a synodic month (Thai: ดิถี)\n quot = (self.__horakhun * 11 + 650) // 692\n self.__tithi = (quot + self.__horakhun) % 30\n\n # self.avomanExtra = (self.horakhun * 11 + 650) % 692\n logging.debug(\"horakhun:%s kamma:%s quot:%s tt:%s\", self.__horakhun, self.__kammacapon, quot, self.__tithi)\n\n @staticmethod\n def calculate_year0(year: int):\n y = [\n LSYear(year - 2),\n LSYear(year - 1),\n LSYear(year),\n LSYear(year + 1),\n LSYear(year + 2),\n ]\n # logging.debug(\"[0] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n for i in (0, 1, 2, 3, 4):\n if y[2].tithi == 24 and y[3].tithi == 6:\n # where tithi of this year is 24 and next year is 6, set all years to C-type\n # adjust next_nyd weekday\n y[i].cal_type = \"C\"\n y[i].next_nyd = (y[i].next_nyd + 2) % 7\n # logging.debug(\"[1] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n # Adjust c-type years where a intercalary day and month coincide. This can't happen\n # in the Thai calendar (unlike the Burmese) so we decide if the intercalary day is moved\n # to the previous or next year. This is done by ensuring a correct sequence of weekdays\n # from one year to the next.\n for i in (1, 2, 3):\n if y[i].cal_type == \"c\":\n j = 1 if y[i].nyd == y[i-1].next_nyd else -1\n y[i+j].cal_type = \"B\"\n y[i+j].next_nyd = (y[i+j].next_nyd + 1) % 7\n # logging.debug(\"[2] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n for i in (1, 2, 3):\n if y[i-1].next_nyd != y[i].nyd and y[i].next_nyd != y[i+1].nyd:\n y[i].offset = True\n y[i].langsak += 1\n y[i].nyd = (y[i].nyd + 6) % 7\n y[i].next_nyd = (y[i].next_nyd + 6) % 7\n\n # housekeeping - elabal any remaining c-type years as C-type; add day count too\n for i in (0, 1, 2, 3, 4):\n if y[i].cal_type == \"c\":\n y[i].cal_type = \"C\"\n y[i].caldays = CAL_TYPE_DAY_COUNTS[y[i].cal_type]\n # logging.debug(\"[F] year0[].caltype:%s\", \"\".join([i.cal_type for i in y]))\n\n # Determine month/day of new year\n y[2].first_month = \"C\" # as per Eade, C=>Caitra, V=>Vaisakha\n y[2].first_day = y[2].langsak\n y[2].offset_days = y[2].langsak # no.days offset from Caitra 1st\n if y[2].offset_days < (6 + int(y[2].offset)):\n y[2].first_month = \"V\"\n y[2].first_day = y[2].offset_days\n y[2].offset_days += 29\n return y[2]\n\n @staticmethod\n def find_date(cal: str, days: int):\n \"\"\"\n Given a calendar type (A, B, C) and number of days since new years day,\n return the month and day component of a date, derived from lookup tables.\n \"\"\"\n logging.debug(\"cal:%s days:%s\", cal, days)\n vals = {\n \"A\": (\n (383, 16), (354, 15), (324, 12), (295, 11), (265, 10), (236, 9),\n (206, 8), (177, 7), (147, 6), (118, 5), (88, 4), (59, 3), (29, 2),\n ),\n \"B\": (\n (384, 16), (355, 15), (325, 12), (296, 11), (266, 10), (237, 9),\n (207, 8), (178, 7), (148, 6), (119, 5), (89, 4), (59, 3), (29, 2),\n ),\n \"C\": (\n (384, 15), (354, 12), (325, 11), (295, 10), (266, 9), (236, 8),\n (207, 7), (177, 6), (148, 5), (118, 14), (88, 13), (59, 3), (29, 2),\n ),\n }\n assert cal in vals.keys(), ValueError(\"Cal {} not found\".format(cal))\n\n for a, b in vals[cal]:\n if days > a:\n days -= a\n logging.debug(\"solution: (a:%s b:%s) month:%s day:%s\",\n a, b, LUNAR_MONTHS[b], days)\n month = LUNAR_MONTHS[b]\n break\n month = LUNAR_MONTHS[1]\n else:\n logging.debug(\"default: month:%s (%s) day:%s\", 1, LUNAR_MONTHS[1], days)\n return month, days\n\n @classmethod\n def today(cls):\n \"\"\"\n Return today as CS date.\n \"\"\"\n jd = julianday.today()\n logging.debug(\"jd:%s\", jd)\n return cls.fromjulianday(jd)\n\n @classmethod\n def fromyd(cls, year: int, days: int):\n \"\"\"\n Return a Chulasakarat object from a year and days since new years day.\n \"\"\"\n logging.debug(\"start: year:%s days:%s\", year, days)\n year0 = cls.calculate_year0(year)\n days_in_year = 365 + int(year0.leapday)\n while days > days_in_year: # zero-indexed\n year += 1\n days -= days_in_year\n year0 = cls.calculate_year0(year)\n days_in_year = 365 + int(year0.leapday)\n logging.debug(\"days >= %s: year:%s days:%s\", 364 + int(year0.leapday), year, days)\n\n # logging.debug(\"year0 langsak:%s offset_days:%s\", year0.langsak, year0.offset_days)\n month, day = cls.find_date(year0.cal_type, year0.offset_days + days)\n logging.debug(\"year:%s month:%s day:%s\", year, month, day)\n return cls(year, month, day)\n\n @classmethod\n def fromjulianday(cls, jd: int):\n \"\"\"\n Return a Chulasakarat object from a Julian Day Number.\n \"\"\"\n hk = jd - CS_JULIAN_DAY_OFFSET\n year = (hk * 800 - 373) // 292207\n if hk % 292207 == 95333:\n # Every 800 years (292207 days), on the last day of the solar leap\n # year coinciding with an adhkimas lunar year, this jd->year\n # formula will be off by one day pushing the year forward by one\n # and the days count to -1.\n year -= 1\n days = 365\n logging.debug(\"800 year kamma adjustment\")\n else:\n year0 = cls.calculate_year0(year)\n days = hk - year0.horakhun\n # logging.debug(\"kamma:%s\", year0.kammacapon)\n # logging.debug(\"jd:%s year:%s days:%s cal_type:%s hk0:%s\", jd, year, days, year0.cal_type, year0.horakhun)\n logging.debug(\"jd:%s year:%s days:%s\", jd, year, days)\n return cls.fromyd(year=year, days=days)\n\n from_julianday = fromjulianday\n\n @classmethod\n def fromtimestamp(cls, ts):\n \"\"\"\n Return a Chulasakarat object from a UNIX timestamp.\n \"\"\"\n jd = ts // (24 * 60 * 60) + CS_UNIX_EPOCH_OFFSET\n return cls.fromjulianday(jd)\n\n @property\n def julianday(self):\n \"\"\"\n Return the Julian Day Number of this CS date.\n \"\"\"\n return self.__horakhun + CS_JULIAN_DAY_OFFSET\n\n @property\n def horakhun(self):\n return self.__horakhun\n\n @property\n def kammacapon(self):\n return self.__kammacapon\n\n @property\n def masaken(self):\n return self.__masaken\n\n @property\n def uccapon(self):\n return self.__uccapon\n\n @property\n def avoman(self):\n return self.__avoman\n\n @property\n def tithi(self):\n return self.__tithi\n\n @property\n def year(self):\n return self.__year\n\n @property\n def month(self):\n if self.__month == 15 or self.__month == 16:\n return self.__month - 10\n return self.__month\n\n @property\n def month_raw(self):\n return self.__month\n\n @property\n def day(self):\n return self.__day\n\n @property\n def days(self):\n return self.__days\n\n @property\n def solar_leap_year(self):\n return self.__year0.leapday\n\n @property\n def leap_day(self):\n return self.__year0.cal_type == 'B'\n\n @property\n def leap_month(self):\n return self.__year0.cal_type == 'C'\n\n @property\n def days_in_year(self):\n if self.__year0.cal_type == \"A\":\n return 354\n elif self.__year0.cal_type == \"B\":\n return 355\n elif self.__year0.cal_type == \"C\":\n return 384\n\n def replace(self, year=None, month=None, day=None):\n logging.debug(\"year:%s month%s day:%s\", year, month, day)\n y = year if year else self.year\n m = month if month else self.month\n d = day if day else self.day\n logging.debug(\"year:%s month%s day:%s\", y, m, d)\n return CsDate(y, m, d)\n\n def csweekday(self):\n return self.__horakhun % 7\n\n def weekday(self):\n return self.csweekday() - 2\n\n def isoweekday(self):\n return self.csweekday() - 1\n\n @property\n def yearnaksatr(self):\n idx = (self.year + 11) % 12\n if idx == 0:\n idx = 12\n return \"ปี\" + YEAR_NAKSATR[idx]\n\n def csformat(self):\n phase = \"ขึ้น\" if self.day <= 15 else \"แรม\"\n day = self.day if self.day <= 15 else self.day - 15\n s = \"{:s} เดือน {:s} {:s} {:s} ค่ำ {:s} จ.ศ.{:s}\".format(\n WEEKDAYS[self.csweekday()],\n digit_arabic_to_thai(self.month),\n phase,\n digit_arabic_to_thai(day),\n self.yearnaksatr,\n digit_arabic_to_thai(self.year)\n )\n s = digit_arabic_to_thai(s)\n return s\n\n def csformatymd(self):\n \"\"\"\n Return string in YYYY-MM-DD format.\n \"\"\"\n return \"{:4d}-{:02d}-{:02d}\".format(self.year, self.month, self.day)\n\n @classmethod\n def fromcsformat(self, s):\n s = digit_thai_to_arabic(s)\n weekday, _, month, phase, day, _, _, year = s.split()\n year = int(year.replace(\"จ.ศ.\", \"\"))\n month = int(month)\n day = int(day)\n if phase == \"แรม\":\n day += 15\n return CsDate(year, month, day)\n\n def cscalendar(self):\n return CsCalendarDate(self.year, self.month, self.day)\n\n def __str__(self):\n return self.csformat()\n\n def __int__(self):\n \"\"\"Convert to int by returning the Julian Day Number.\"\"\"\n return self.julianday\n\n def _hashable(self):\n return (\n self.__year,\n self.__month,\n self.__day,\n self.__days,\n self.__horakhun,\n self.__kammacapon,\n self.__tithi,\n self.__year0.cal_type,\n )\n\n def __hash__(self): # pragma: no cover\n return hash(self._hashable())\n\n def __lt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday < other.julianday\n elif isinstance(other, date):\n return self.julianday < julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __le__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday <= other.julianday\n elif isinstance(other, date):\n return self.julianday <= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __eq__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday == other.julianday\n elif isinstance(other, date):\n return self.julianday == julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __ge__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday >= other.julianday\n elif isinstance(other, date):\n return self.julianday >= julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __gt__(self, other):\n if hasattr(other, \"julianday\"):\n return self.julianday > other.julianday\n elif isinstance(other, date):\n return self.julianday > julianday.to_julianday(other.year, other.month, other.day)\n return NotImplemented\n\n def __add__(self, other):\n if isinstance(other, timedelta):\n return CsDate.fromjulianday(self.julianday + other.days)\n return NotImplemented\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, timedelta):\n return self + timedelta(-other.days)\n elif hasattr(other, \"julianday\"):\n return timedelta(days=self.julianday - other.julianday)\n elif isinstance(other, date):\n other_jd = julianday.to_julianday(other.year, other.month, other.day)\n return timedelta(days=self.julianday - other_jd)\n return NotImplemented\n\n def debug(self): # pragma: no cover\n return {\n \"cp\": self.__year0,\n \"horakhun\": self.__horakhun,\n \"kamma\": self.__kammacapon,\n # \"avomanExtra\": self.avomanExtra,\n \"tt\": self.__tithi,\n \"year\": self.__year,\n \"month\": self.__month,\n \"day\": self.__day,\n \"days\": self.__days,\n \"cal_type\": self.__year0.cal_type,\n \"month_style\": self.__month_style,\n \"year0.langsak\": self.__year0.langsak,\n \"year0.offset\": self.__year0.offset,\n }" }, { "identifier": "CS_JULIAN_DAY_OFFSET", "path": "pythaidate/constants.py", "snippet": "CS_JULIAN_DAY_OFFSET = 1954167" } ]
from datetime import date, timedelta from pythaidate import CsDate, julianday from pythaidate.constants import CS_JULIAN_DAY_OFFSET import json import unittest import os import pathlib import random import logging
5,699
random.seed() this_path = pathlib.Path(__file__).parent.resolve() for datafile in ("cs.json", "cs.min.json"): datafile = os.path.join(this_path, "data", datafile) if os.path.exists(datafile): break else: raise FileNotFoundError("CS data file not found.") with open(datafile) as fh: TESTDATA = json.load(fh) MIN_YEAR = 0 # 638 AD MAX_YEAR = 1462 # 2100 AD MAX_YEAR = 2362 # 3000 AD RUN_PERCENT = 10 if os.environ.get("RUN_PERCENT"): RUN_PERCENT = int(os.environ.get("RUN_PERCENT")) if RUN_PERCENT > 100: RUN_PERCENT = 100 class Test_CsDate(unittest.TestCase): def random_dates(self, min_year=MIN_YEAR, max_year=MAX_YEAR, sample_rate_pc=None): if sample_rate_pc is None: sample_rate_pc = RUN_PERCENT for y in range(min_year, max_year):
random.seed() this_path = pathlib.Path(__file__).parent.resolve() for datafile in ("cs.json", "cs.min.json"): datafile = os.path.join(this_path, "data", datafile) if os.path.exists(datafile): break else: raise FileNotFoundError("CS data file not found.") with open(datafile) as fh: TESTDATA = json.load(fh) MIN_YEAR = 0 # 638 AD MAX_YEAR = 1462 # 2100 AD MAX_YEAR = 2362 # 3000 AD RUN_PERCENT = 10 if os.environ.get("RUN_PERCENT"): RUN_PERCENT = int(os.environ.get("RUN_PERCENT")) if RUN_PERCENT > 100: RUN_PERCENT = 100 class Test_CsDate(unittest.TestCase): def random_dates(self, min_year=MIN_YEAR, max_year=MAX_YEAR, sample_rate_pc=None): if sample_rate_pc is None: sample_rate_pc = RUN_PERCENT for y in range(min_year, max_year):
yd = CsDate.fromyd(year=y, days=0)
1
2023-11-18 21:14:01+00:00
8k