index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
16,266 | plum.bitfields | __rsub__ | null | def __rsub__(self, other):
return int.__rsub__(self.__value__, other)
| (self, other) |
16,267 | plum.bitfields | __rtruediv__ | null | def __rtruediv__(self, other):
return int.__rtruediv__(self.__value__, other)
| (self, other) |
16,268 | plum.bitfields | __rxor__ | null | def __rxor__(self, other):
return int.__rxor__(self.__value__, other)
| (self, other) |
16,269 | plum.bitfields | __setattr__ | null | def __setattr__(self, key, value):
if key.startswith("__") or key in self.__fields__:
super().__setattr__(key, value)
else:
raise AttributeError(f"{type(self).__name__!r} has no attribute {key!r}")
| (self, key, value) |
16,270 | plum.bitfields | __setitem__ | null | def __setitem__(self, index, value):
bits = self[:]
nbits = len(bits)
bits[index] = value
if len(bits) != nbits:
raise ValueError("slice and value not same length")
i = 0
mask = 1
for bit in bits:
if bit:
i |= mask
mask <<= 1
self.__value__ = i
| (self, index, value) |
16,271 | plum.bitfields | __sub__ | null | def __sub__(self, other):
return int.__sub__(self.__value__, other)
| (self, other) |
16,272 | plum.bitfields | __truediv__ | null | def __truediv__(self, other):
return int.__truediv__(self.__value__, other)
| (self, other) |
16,273 | plum.bitfields | __xor__ | null | def __xor__(self, other):
return int.__xor__(self.__value__, other)
| (self, other) |
16,274 | plum.bitfields | asdict | Return bit field values in dictionary form.
:returns: bit field names/values
:rtype: dict
| def asdict(self):
"""Return bit field values in dictionary form.
:returns: bit field names/values
:rtype: dict
"""
return {name: getattr(self, name) for name in self.__fields__}
| (self) |
16,275 | plum.data | ipack | Pack instance as bytes.
:raises: ``PackError`` if type error, value error, etc.
| def ipack(self) -> bytes:
"""Pack instance as bytes.
:raises: ``PackError`` if type error, value error, etc.
"""
pieces: List[bytes] = []
try:
# None -> dump
self.__pack__(self, pieces, None)
except Exception as exc:
# do it over to include dump in exception message
self.ipack_and_dump()
raise exceptions.ImplementationError() from exc # pragma: no cover
return b"".join(pieces)
| (self) -> bytes |
16,276 | plum.data | ipack_and_dump | Pack instance as bytes and produce bytes summary.
:raises: ``PackError`` if type error, value error, etc.
| def ipack_and_dump(self) -> Tuple[bytes, _dump.Dump]:
"""Pack instance as bytes and produce bytes summary.
:raises: ``PackError`` if type error, value error, etc.
"""
dmp = _dump.Dump()
pieces: List[bytes] = []
try:
self.__pack__(self, pieces, dmp.add_record(fmt=type(self).name))
except Exception as exc:
raise exceptions.PackError(dump=dmp, exception=exc) from exc
return b"".join(pieces), dmp
| (self) -> Tuple[bytes, plum.dump.Dump] |
16,277 | exif._datatypes | FlashMode | Flash mode of the camera. | class FlashMode(IntFlag):
"""Flash mode of the camera."""
UNKNOWN = 0
COMPULSORY_FLASH_FIRING = 1
COMPULSORY_FLASH_SUPPRESSION = 2
AUTO_MODE = 3
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,278 | exif._datatypes | FlashReturn | Flash status of returned light. | class FlashReturn(IntFlag):
"""Flash status of returned light."""
NO_STROBE_RETURN_DETECTION_FUNCTION = 0
RESERVED = 1
STROBE_RETURN_LIGHT_DETECTED = 2
STROBE_RETURN_LIGHT_NOT_DETECTED = 3
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,279 | exif._constants | GpsAltitudeRef | Altitude used as the reference altitude. | class GpsAltitudeRef(IntEnum):
"""Altitude used as the reference altitude."""
ABOVE_SEA_LEVEL = 0
"""Above Sea Level"""
BELOW_SEA_LEVEL = 1
"""Below Sea Level"""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,280 | exif._image | Image | Image EXIF metadata interface class.
:param img_file: image file with EXIF metadata
:type image_file: str (file path), bytes (already-read contents), or File
| class Image:
"""Image EXIF metadata interface class.
:param img_file: image file with EXIF metadata
:type image_file: str (file path), bytes (already-read contents), or File
"""
def _parse_segments(self, img_bytes: bytes) -> None:
cursor = 0
# Traverse hexadecimal string until EXIF APP1 segment found.
while img_bytes[cursor : cursor + len(ExifMarkers.APP1)] != ExifMarkers.APP1:
cursor += len(ExifMarkers.APP1)
if cursor > len(img_bytes):
self._has_exif = False
cursor = 2 # should theoretically go after SOI marker (if adding)
break
self._segments["preceding"] = img_bytes[:cursor]
app1_start_index = cursor
if self._has_exif:
# Determine the expected length of the APP1 segment.
app1_len = uint16.unpack(
img_bytes[app1_start_index + 2 : app1_start_index + 4]
)
cursor += app1_len + 2 # skip APP1 marker and all data
# If the expected length stops early, keep traversing until another section is found.
while img_bytes[cursor : cursor + 1] != ExifMarkers.SEG_PREFIX:
cursor += 1
# raise IOError("no subsequent EXIF segment found, is this an EXIF-encoded JPEG?")
if cursor > len(img_bytes):
self._has_exif = False
break
if self._has_exif:
# Instantiate an APP1 segment object to create an EXIF tag interface.
self._segments["APP1"] = App1MetaData(img_bytes[app1_start_index:cursor])
self._segments["succeeding"] = img_bytes[cursor:]
else:
# Store the remainder of the image so that it can be reconstructed when exporting.
self._segments["succeeding"] = img_bytes[app1_start_index:]
def __init__(
self,
img_file: Union[BinaryIO, bytes, str], # pylint: disable=unsubscriptable-object
) -> None:
self._has_exif = True
self._segments: Dict[
str, Union[App1MetaData, bytes] # pylint: disable=unsubscriptable-object
] = {}
if hasattr(img_file, "read"):
img_bytes = img_file.read() # type: ignore
elif isinstance(img_file, bytes):
img_bytes = img_file
elif os.path.isfile(img_file): # type: ignore
with open(img_file, "rb") as file_descriptor: # type: ignore
img_bytes = file_descriptor.read()
else: # pragma: no cover
raise ValueError("expected file object, file path as str, or bytes")
self._parse_segments(img_bytes)
def __dir__(self) -> List[str]:
members = [
"delete",
"delete_all",
"get",
"get_all",
"get_file",
"get_thumbnail",
"has_exif",
"list_all",
"_segments",
]
if self._has_exif:
assert isinstance(self._segments["APP1"], App1MetaData)
members += self._segments["APP1"].get_tag_list()
return members
def __getattr__(self, item):
return getattr(self._segments["APP1"], item)
def __setattr__(self, key, value):
try:
ATTRIBUTE_ID_MAP[key.lower()]
except KeyError:
super(Image, self).__setattr__(key, value)
else:
if not self._has_exif:
self._segments["APP1"] = App1MetaData(generate_empty_app1_bytes())
self._has_exif = True
setattr(self._segments["APP1"], key.lower(), value)
def __delattr__(self, item):
try:
ATTRIBUTE_ID_MAP[item]
except KeyError:
super(Image, self).__delattr__(item)
else:
delattr(self._segments["APP1"], item)
def __getitem__(self, item):
return self.__getattr__(item)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __delitem__(self, key):
self.__delattr__(key)
def delete(self, attribute: str) -> None:
"""Remove the specified attribute from the image.
:param attribute: image EXIF attribute name
"""
delattr(self, attribute)
def delete_all(self) -> None:
"""Remove all EXIF tags from the image."""
for _ in range(
2
): # iterate twice to delete thumbnail tags the second time around
assert isinstance(self._segments["APP1"], App1MetaData)
for tag in self._segments["APP1"].get_tag_list():
if not tag in ["_exif_ifd_pointer", "_gps_ifd_pointer", "exif_version"]:
try:
delattr(self, tag)
except AttributeError:
warnings.warn("could not delete tag " + tag, RuntimeWarning)
self._parse_segments(self.get_file())
def get(self, attribute: str, default: Any = None) -> Any:
"""Return the value of the specified tag.
If the attribute is not available or set, return the value specified by the ``default``
keyword argument.
:param attribute: image EXIF attribute name
:param default: return value if attribute does not exist
:returns: tag value if present, ``default`` otherwise
:rtype: corresponding Python type
"""
try:
retval = getattr(self, attribute)
except (AttributeError, NotImplementedError):
retval = default
return retval
def get_all(self) -> Dict[str, Any]:
"""Return dictionary containing all EXIF tag values keyed by tag name."""
all_tags = {}
for tag_name in self.list_all():
try:
tag_value = getattr(self, tag_name)
except Exception: # pylint: disable=broad-except
logger.warning("unable to read tag %r", tag_name)
else:
all_tags[tag_name] = tag_value
return all_tags
def get_file(self) -> bytes:
"""Generate equivalent binary file contents.
:returns: image binary with EXIF metadata
"""
assert isinstance(self._segments["preceding"], bytes)
img_bytes = self._segments["preceding"]
if self._has_exif:
assert isinstance(self._segments["APP1"], App1MetaData)
img_bytes += self._segments["APP1"].get_segment_bytes()
assert isinstance(self._segments["succeeding"], bytes)
img_bytes += self._segments["succeeding"]
return img_bytes
def get_thumbnail(self) -> bytes:
"""Extract thumbnail binary contained in EXIF metadata.
:returns: thumbnail binary
:raises RuntimeError: image does not contain thumbnail
"""
thumbnail_bytes = None
try:
app1_segment = self._segments["APP1"]
except KeyError:
pass
else:
assert isinstance(app1_segment, App1MetaData)
thumbnail_bytes = app1_segment.thumbnail_bytes
if not thumbnail_bytes:
raise RuntimeError("image does not contain thumbnail")
return thumbnail_bytes
@property
def has_exif(self) -> bool:
"""Report whether or not the image currently has EXIF metadata."""
return self._has_exif
def list_all(self) -> List[str]:
"""List all EXIF tags contained in the image."""
tags_list = []
if self._has_exif:
assert isinstance(self._segments["APP1"], App1MetaData)
tags_list += self._segments["APP1"].get_tag_list(include_unknown=False)
return tags_list
def set(self, attribute: str, value) -> None:
"""Set the value of the specified attribute.
:param attribute: image EXIF attribute name
:param value: tag value
:type value: corresponding Python type
"""
setattr(self, attribute, value)
| (img_file: Union[BinaryIO, bytes, str]) -> None |
16,281 | exif._image | __delattr__ | null | def __delattr__(self, item):
try:
ATTRIBUTE_ID_MAP[item]
except KeyError:
super(Image, self).__delattr__(item)
else:
delattr(self._segments["APP1"], item)
| (self, item) |
16,282 | exif._image | __delitem__ | null | def __delitem__(self, key):
self.__delattr__(key)
| (self, key) |
16,283 | exif._image | __dir__ | null | def __dir__(self) -> List[str]:
members = [
"delete",
"delete_all",
"get",
"get_all",
"get_file",
"get_thumbnail",
"has_exif",
"list_all",
"_segments",
]
if self._has_exif:
assert isinstance(self._segments["APP1"], App1MetaData)
members += self._segments["APP1"].get_tag_list()
return members
| (self) -> List[str] |
16,284 | exif._image | __getattr__ | null | def __getattr__(self, item):
return getattr(self._segments["APP1"], item)
| (self, item) |
16,285 | exif._image | __getitem__ | null | def __getitem__(self, item):
return self.__getattr__(item)
| (self, item) |
16,286 | exif._image | __init__ | null | def __init__(
self,
img_file: Union[BinaryIO, bytes, str], # pylint: disable=unsubscriptable-object
) -> None:
self._has_exif = True
self._segments: Dict[
str, Union[App1MetaData, bytes] # pylint: disable=unsubscriptable-object
] = {}
if hasattr(img_file, "read"):
img_bytes = img_file.read() # type: ignore
elif isinstance(img_file, bytes):
img_bytes = img_file
elif os.path.isfile(img_file): # type: ignore
with open(img_file, "rb") as file_descriptor: # type: ignore
img_bytes = file_descriptor.read()
else: # pragma: no cover
raise ValueError("expected file object, file path as str, or bytes")
self._parse_segments(img_bytes)
| (self, img_file: Union[BinaryIO, bytes, str]) -> NoneType |
16,287 | exif._image | __setattr__ | null | def __setattr__(self, key, value):
try:
ATTRIBUTE_ID_MAP[key.lower()]
except KeyError:
super(Image, self).__setattr__(key, value)
else:
if not self._has_exif:
self._segments["APP1"] = App1MetaData(generate_empty_app1_bytes())
self._has_exif = True
setattr(self._segments["APP1"], key.lower(), value)
| (self, key, value) |
16,288 | exif._image | __setitem__ | null | def __setitem__(self, key, value):
self.__setattr__(key, value)
| (self, key, value) |
16,289 | exif._image | _parse_segments | null | def _parse_segments(self, img_bytes: bytes) -> None:
cursor = 0
# Traverse hexadecimal string until EXIF APP1 segment found.
while img_bytes[cursor : cursor + len(ExifMarkers.APP1)] != ExifMarkers.APP1:
cursor += len(ExifMarkers.APP1)
if cursor > len(img_bytes):
self._has_exif = False
cursor = 2 # should theoretically go after SOI marker (if adding)
break
self._segments["preceding"] = img_bytes[:cursor]
app1_start_index = cursor
if self._has_exif:
# Determine the expected length of the APP1 segment.
app1_len = uint16.unpack(
img_bytes[app1_start_index + 2 : app1_start_index + 4]
)
cursor += app1_len + 2 # skip APP1 marker and all data
# If the expected length stops early, keep traversing until another section is found.
while img_bytes[cursor : cursor + 1] != ExifMarkers.SEG_PREFIX:
cursor += 1
# raise IOError("no subsequent EXIF segment found, is this an EXIF-encoded JPEG?")
if cursor > len(img_bytes):
self._has_exif = False
break
if self._has_exif:
# Instantiate an APP1 segment object to create an EXIF tag interface.
self._segments["APP1"] = App1MetaData(img_bytes[app1_start_index:cursor])
self._segments["succeeding"] = img_bytes[cursor:]
else:
# Store the remainder of the image so that it can be reconstructed when exporting.
self._segments["succeeding"] = img_bytes[app1_start_index:]
| (self, img_bytes: bytes) -> NoneType |
16,290 | exif._image | delete | Remove the specified attribute from the image.
:param attribute: image EXIF attribute name
| def delete(self, attribute: str) -> None:
"""Remove the specified attribute from the image.
:param attribute: image EXIF attribute name
"""
delattr(self, attribute)
| (self, attribute: str) -> NoneType |
16,291 | exif._image | delete_all | Remove all EXIF tags from the image. | def delete_all(self) -> None:
"""Remove all EXIF tags from the image."""
for _ in range(
2
): # iterate twice to delete thumbnail tags the second time around
assert isinstance(self._segments["APP1"], App1MetaData)
for tag in self._segments["APP1"].get_tag_list():
if not tag in ["_exif_ifd_pointer", "_gps_ifd_pointer", "exif_version"]:
try:
delattr(self, tag)
except AttributeError:
warnings.warn("could not delete tag " + tag, RuntimeWarning)
self._parse_segments(self.get_file())
| (self) -> NoneType |
16,292 | exif._image | get | Return the value of the specified tag.
If the attribute is not available or set, return the value specified by the ``default``
keyword argument.
:param attribute: image EXIF attribute name
:param default: return value if attribute does not exist
:returns: tag value if present, ``default`` otherwise
:rtype: corresponding Python type
| def get(self, attribute: str, default: Any = None) -> Any:
"""Return the value of the specified tag.
If the attribute is not available or set, return the value specified by the ``default``
keyword argument.
:param attribute: image EXIF attribute name
:param default: return value if attribute does not exist
:returns: tag value if present, ``default`` otherwise
:rtype: corresponding Python type
"""
try:
retval = getattr(self, attribute)
except (AttributeError, NotImplementedError):
retval = default
return retval
| (self, attribute: str, default: Optional[Any] = None) -> Any |
16,293 | exif._image | get_all | Return dictionary containing all EXIF tag values keyed by tag name. | def get_all(self) -> Dict[str, Any]:
"""Return dictionary containing all EXIF tag values keyed by tag name."""
all_tags = {}
for tag_name in self.list_all():
try:
tag_value = getattr(self, tag_name)
except Exception: # pylint: disable=broad-except
logger.warning("unable to read tag %r", tag_name)
else:
all_tags[tag_name] = tag_value
return all_tags
| (self) -> Dict[str, Any] |
16,294 | exif._image | get_file | Generate equivalent binary file contents.
:returns: image binary with EXIF metadata
| def get_file(self) -> bytes:
"""Generate equivalent binary file contents.
:returns: image binary with EXIF metadata
"""
assert isinstance(self._segments["preceding"], bytes)
img_bytes = self._segments["preceding"]
if self._has_exif:
assert isinstance(self._segments["APP1"], App1MetaData)
img_bytes += self._segments["APP1"].get_segment_bytes()
assert isinstance(self._segments["succeeding"], bytes)
img_bytes += self._segments["succeeding"]
return img_bytes
| (self) -> bytes |
16,295 | exif._image | get_thumbnail | Extract thumbnail binary contained in EXIF metadata.
:returns: thumbnail binary
:raises RuntimeError: image does not contain thumbnail
| def get_thumbnail(self) -> bytes:
"""Extract thumbnail binary contained in EXIF metadata.
:returns: thumbnail binary
:raises RuntimeError: image does not contain thumbnail
"""
thumbnail_bytes = None
try:
app1_segment = self._segments["APP1"]
except KeyError:
pass
else:
assert isinstance(app1_segment, App1MetaData)
thumbnail_bytes = app1_segment.thumbnail_bytes
if not thumbnail_bytes:
raise RuntimeError("image does not contain thumbnail")
return thumbnail_bytes
| (self) -> bytes |
16,296 | exif._image | list_all | List all EXIF tags contained in the image. | def list_all(self) -> List[str]:
"""List all EXIF tags contained in the image."""
tags_list = []
if self._has_exif:
assert isinstance(self._segments["APP1"], App1MetaData)
tags_list += self._segments["APP1"].get_tag_list(include_unknown=False)
return tags_list
| (self) -> List[str] |
16,297 | exif._image | set | Set the value of the specified attribute.
:param attribute: image EXIF attribute name
:param value: tag value
:type value: corresponding Python type
| def set(self, attribute: str, value) -> None:
"""Set the value of the specified attribute.
:param attribute: image EXIF attribute name
:param value: tag value
:type value: corresponding Python type
"""
setattr(self, attribute, value)
| (self, attribute: str, value) -> NoneType |
16,298 | exif._constants | LightSource | Class of the program used by the camera to set exposure when the picture is taken. | class LightSource(IntEnum):
"""Class of the program used by the camera to set exposure when the picture is taken."""
UNKNOWN = 0
"""Unknown"""
DAYLIGHT = 1
"""Daylight"""
FLUORESCENT = 2
"""Fluorescent"""
TUNGSTEN = 3
"""Tungsten (Incandescent Light)"""
FLASH = 4
"""Flash"""
FINE_WEATHER = 9
"""Fine Weather"""
CLOUDY_WEATHER = 10
"""Cloudy Weather"""
SHADE = 11
"""Shade"""
DAYLIGHT_FLUORESCENT = 12
"""Daylight Fluorescent (D 5700 - 7100K)"""
DAY_WHITE_FLUORESCENT = 13
"""Day White Fluorescent (N 4600 - 5400K)"""
COOL_WHITE_FLUORESCENT = 14
"""Cool White Fluorescent (W 3900 - 4500K)"""
WHITE_FLUORESCENT = 15
"""White Fluorescent (WW 3200 - 3700K)"""
STANDARD_LIGHT_A = 17
"""Standard Light A"""
STANDARD_LIGHT_B = 18
"""Standard Light B"""
STANDARD_LIGHT_C = 19
"""Standard Light C"""
D55 = 20
"""D55"""
D65 = 21
"""D65"""
D75 = 22
"""D75"""
D50 = 23
"""D50"""
ISO_STUDIO_TUNGSTEN = 24
"""ISO Studio Tungsten"""
OTHER = 255
"""Other Light Source"""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,299 | exif._constants | MeteringMode | Metering mode. | class MeteringMode(IntEnum):
"""Metering mode."""
UNKNOWN = 0
"""Unknown"""
AVERAGE = 1
"""Average"""
CENTER_WEIGHTED_AVERAGE = 2
"""Center Weighted Average"""
SPOT = 3
"""Spot"""
MULTI_SPOT = 4
"""Multi Spot"""
PATTERN = 5
"""Pattern"""
PARTIAL = 6
"""Partial"""
OTHER = 255
"""Other"""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,300 | exif._constants | Orientation | Image orientation in terms of rows and columns. | class Orientation(IntEnum):
"""Image orientation in terms of rows and columns."""
TOP_LEFT = 1
"""The 0th row is at the visual top of the image and the 0th column is the visual
left-hand side."""
TOP_RIGHT = 2
"""The 0th row is at the visual top of the image and the 0th column is the visual
right-hand side."""
BOTTOM_RIGHT = 3
"""The 0th row is at the visual bottom of the image and the 0th column is the visual
right-hand side."""
BOTTOM_LEFT = 4
"""The 0th row is at the visual bottom of the image and the 0th column is the visual
left-hand side."""
LEFT_TOP = 5
"""The 0th row is the visual left-hand side of the image and the 0th column is the
visual top."""
RIGHT_TOP = 6
"""The 0th row is the visual right-hand side of the image and the 0th column is the
visual bottom."""
RIGHT_BOTTOM = 7
"""The 0th row is the visual right-hand side of the image and the 0th column is the
visual bottom."""
LEFT_BOTTOM = 8
"""The 0th row is the visual left-hand side of the image and the 0th column is the
visual bottom."""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,301 | exif._constants | ResolutionUnit | Unit for measuring X resolution and Y resolution tags. | class ResolutionUnit(IntEnum):
"""Unit for measuring X resolution and Y resolution tags."""
INCHES = 2
"""Inches or Unknown"""
CENTIMETERS = 3
"""Centimeters"""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,302 | exif._constants | Saturation | Saturation processing applied by camera. | class Saturation(IntEnum):
"""Saturation processing applied by camera."""
NORMAL = 0
"""Normal Saturation"""
LOW = 1
"""Low Saturation"""
HIGH = 2
"""High Saturation"""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,303 | exif._constants | SceneCaptureType | Type of scene that was shot or the mode in which the image was shot. | class SceneCaptureType(IntEnum):
"""Type of scene that was shot or the mode in which the image was shot."""
STANDARD = 0
"""Standard"""
LANDSCAPE = 1
"""Landscape"""
PORTRAIT = 2
"""Portrait"""
NIGHT_SCENE = 3
"""Night Scene"""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,304 | exif._constants | SensingMethod | Image sensor type on the camera or input device. | class SensingMethod(IntEnum):
"""Image sensor type on the camera or input device."""
NOT_DEFINED = 1
"""Not Defined"""
ONE_CHIP_COLOR_AREA_SENSOR = 2
"""One-Chip Color Area Sensor"""
TWO_CHIP_COLOR_AREA_SENSOR = 3
"""Two-Chip Color Area Sensor"""
THREE_CHIP_COLOR_AREA_SENSOR = 4
"""Three-Chip Color Area Sensor"""
COLOR_SEQUENTIAL_AREA_SENSOR = 5
"""Color Sequential Area Sensor"""
TRILINEAR_SENSOR = 7
"""Trilinear Sensor"""
COLOR_SEQUENTIAL_LINEAR_SENSOR = 8
"""Color Sequential Linear Sensor"""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,305 | exif._constants | Sharpness | Sharpness processing applied by camera. | class Sharpness(IntEnum):
"""Sharpness processing applied by camera."""
NORMAL = 0
"""Normal"""
SOFT = 1
"""Soft"""
HARD = 2
"""Hard"""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,306 | exif._constants | WhiteBalance | White balance mode set when the image was shot. | class WhiteBalance(IntEnum):
"""White balance mode set when the image was shot."""
AUTO = 0
"""Auto White Balance"""
MANUAL = 1
"""Manual White Balance"""
| (value, names=None, *, module=None, qualname=None, type=None, start=1) |
16,315 | generator_oj_problem | getAppDirectory | null | def getAppDirectory() -> pathlib.Path:
return pathlib.Path(__file__).parent.resolve()
| () -> pathlib.Path |
16,316 | generator_oj_problem | getWorkingDirectory | null | def getWorkingDirectory() -> pathlib.Path:
return pathlib.Path(os.getcwd()).resolve()
| () -> pathlib.Path |
16,319 | presidio_analyzer.analysis_explanation | AnalysisExplanation |
Hold tracing information to explain why PII entities were identified as such.
:param recognizer: name of recognizer that made the decision
:param original_score: recognizer's confidence in result
:param pattern_name: name of pattern
(if decision was made by a PatternRecognizer)
:param pattern: regex pattern that was applied (if PatternRecognizer)
:param validation_result: result of a validation (e.g. checksum)
:param textual_explanation: Free text for describing
a decision of a logic or model
| class AnalysisExplanation:
"""
Hold tracing information to explain why PII entities were identified as such.
:param recognizer: name of recognizer that made the decision
:param original_score: recognizer's confidence in result
:param pattern_name: name of pattern
(if decision was made by a PatternRecognizer)
:param pattern: regex pattern that was applied (if PatternRecognizer)
:param validation_result: result of a validation (e.g. checksum)
:param textual_explanation: Free text for describing
a decision of a logic or model
"""
def __init__(
self,
recognizer: str,
original_score: float,
pattern_name: str = None,
pattern: str = None,
validation_result: float = None,
textual_explanation: str = None,
regex_flags: int = None,
):
self.recognizer = recognizer
self.pattern_name = pattern_name
self.pattern = pattern
self.original_score = original_score
self.score = original_score
self.textual_explanation = textual_explanation
self.score_context_improvement = 0
self.supportive_context_word = ""
self.validation_result = validation_result
self.regex_flags = regex_flags
def __repr__(self):
"""Create string representation of the object."""
return str(self.__dict__)
def set_improved_score(self, score: float) -> None:
"""Update the score and calculate the difference from the original score."""
self.score = score
self.score_context_improvement = self.score - self.original_score
def set_supportive_context_word(self, word: str) -> None:
"""Set the context word which helped increase the score."""
self.supportive_context_word = word
def append_textual_explanation_line(self, text: str) -> None:
"""Append a new line to textual_explanation field."""
if self.textual_explanation is None:
self.textual_explanation = text
else:
self.textual_explanation = "{}\n{}".format(self.textual_explanation, text)
def to_dict(self) -> Dict:
"""
Serialize self to dictionary.
:return: a dictionary
"""
return self.__dict__
| (recognizer: str, original_score: float, pattern_name: str = None, pattern: str = None, validation_result: float = None, textual_explanation: str = None, regex_flags: int = None) |
16,320 | presidio_analyzer.analysis_explanation | __init__ | null | def __init__(
self,
recognizer: str,
original_score: float,
pattern_name: str = None,
pattern: str = None,
validation_result: float = None,
textual_explanation: str = None,
regex_flags: int = None,
):
self.recognizer = recognizer
self.pattern_name = pattern_name
self.pattern = pattern
self.original_score = original_score
self.score = original_score
self.textual_explanation = textual_explanation
self.score_context_improvement = 0
self.supportive_context_word = ""
self.validation_result = validation_result
self.regex_flags = regex_flags
| (self, recognizer: str, original_score: float, pattern_name: Optional[str] = None, pattern: Optional[str] = None, validation_result: Optional[float] = None, textual_explanation: Optional[str] = None, regex_flags: Optional[int] = None) |
16,321 | presidio_analyzer.analysis_explanation | __repr__ | Create string representation of the object. | def __repr__(self):
"""Create string representation of the object."""
return str(self.__dict__)
| (self) |
16,322 | presidio_analyzer.analysis_explanation | append_textual_explanation_line | Append a new line to textual_explanation field. | def append_textual_explanation_line(self, text: str) -> None:
"""Append a new line to textual_explanation field."""
if self.textual_explanation is None:
self.textual_explanation = text
else:
self.textual_explanation = "{}\n{}".format(self.textual_explanation, text)
| (self, text: str) -> NoneType |
16,323 | presidio_analyzer.analysis_explanation | set_improved_score | Update the score and calculate the difference from the original score. | def set_improved_score(self, score: float) -> None:
"""Update the score and calculate the difference from the original score."""
self.score = score
self.score_context_improvement = self.score - self.original_score
| (self, score: float) -> NoneType |
16,324 | presidio_analyzer.analysis_explanation | set_supportive_context_word | Set the context word which helped increase the score. | def set_supportive_context_word(self, word: str) -> None:
"""Set the context word which helped increase the score."""
self.supportive_context_word = word
| (self, word: str) -> NoneType |
16,325 | presidio_analyzer.analysis_explanation | to_dict |
Serialize self to dictionary.
:return: a dictionary
| def to_dict(self) -> Dict:
"""
Serialize self to dictionary.
:return: a dictionary
"""
return self.__dict__
| (self) -> Dict |
16,326 | presidio_analyzer.analyzer_engine | AnalyzerEngine |
Entry point for Presidio Analyzer.
Orchestrating the detection of PII entities and all related logic.
:param registry: instance of type RecognizerRegistry
:param nlp_engine: instance of type NlpEngine
(for example SpacyNlpEngine)
:param app_tracer: instance of type AppTracer, used to trace the logic
used during each request for interpretability reasons.
:param log_decision_process: bool,
defines whether the decision process within the analyzer should be logged or not.
:param default_score_threshold: Minimum confidence value
for detected entities to be returned
:param supported_languages: List of possible languages this engine could be run on.
Used for loading the right NLP models and recognizers for these languages.
:param context_aware_enhancer: instance of type ContextAwareEnhancer for enhancing
confidence score based on context words, (LemmaContextAwareEnhancer will be created
by default if None passed)
| class AnalyzerEngine:
"""
Entry point for Presidio Analyzer.
Orchestrating the detection of PII entities and all related logic.
:param registry: instance of type RecognizerRegistry
:param nlp_engine: instance of type NlpEngine
(for example SpacyNlpEngine)
:param app_tracer: instance of type AppTracer, used to trace the logic
used during each request for interpretability reasons.
:param log_decision_process: bool,
defines whether the decision process within the analyzer should be logged or not.
:param default_score_threshold: Minimum confidence value
for detected entities to be returned
:param supported_languages: List of possible languages this engine could be run on.
Used for loading the right NLP models and recognizers for these languages.
:param context_aware_enhancer: instance of type ContextAwareEnhancer for enhancing
confidence score based on context words, (LemmaContextAwareEnhancer will be created
by default if None passed)
"""
def __init__(
self,
registry: RecognizerRegistry = None,
nlp_engine: NlpEngine = None,
app_tracer: AppTracer = None,
log_decision_process: bool = False,
default_score_threshold: float = 0,
supported_languages: List[str] = None,
context_aware_enhancer: Optional[ContextAwareEnhancer] = None,
):
if not supported_languages:
supported_languages = ["en"]
if not nlp_engine:
logger.info("nlp_engine not provided, creating default.")
provider = NlpEngineProvider()
nlp_engine = provider.create_engine()
if not registry:
logger.info("registry not provided, creating default.")
registry = RecognizerRegistry()
if not app_tracer:
app_tracer = AppTracer()
self.app_tracer = app_tracer
self.supported_languages = supported_languages
self.nlp_engine = nlp_engine
if not self.nlp_engine.is_loaded():
self.nlp_engine.load()
self.registry = registry
# load all recognizers
if not registry.recognizers:
registry.load_predefined_recognizers(
nlp_engine=self.nlp_engine, languages=self.supported_languages
)
self.log_decision_process = log_decision_process
self.default_score_threshold = default_score_threshold
if not context_aware_enhancer:
logger.debug(
"context aware enhancer not provided, creating default"
+ " lemma based enhancer."
)
context_aware_enhancer = LemmaContextAwareEnhancer()
self.context_aware_enhancer = context_aware_enhancer
def get_recognizers(self, language: Optional[str] = None) -> List[EntityRecognizer]:
"""
Return a list of PII recognizers currently loaded.
:param language: Return the recognizers supporting a given language.
:return: List of [Recognizer] as a RecognizersAllResponse
"""
if not language:
languages = self.supported_languages
else:
languages = [language]
recognizers = []
for language in languages:
logger.info(f"Fetching all recognizers for language {language}")
recognizers.extend(
self.registry.get_recognizers(language=language, all_fields=True)
)
return list(set(recognizers))
def get_supported_entities(self, language: Optional[str] = None) -> List[str]:
"""
Return a list of the entities that can be detected.
:param language: Return only entities supported in a specific language.
:return: List of entity names
"""
recognizers = self.get_recognizers(language=language)
supported_entities = []
for recognizer in recognizers:
supported_entities.extend(recognizer.get_supported_entities())
return list(set(supported_entities))
def analyze(
self,
text: str,
language: str,
entities: Optional[List[str]] = None,
correlation_id: Optional[str] = None,
score_threshold: Optional[float] = None,
return_decision_process: Optional[bool] = False,
ad_hoc_recognizers: Optional[List[EntityRecognizer]] = None,
context: Optional[List[str]] = None,
allow_list: Optional[List[str]] = None,
nlp_artifacts: Optional[NlpArtifacts] = None,
) -> List[RecognizerResult]:
"""
Find PII entities in text using different PII recognizers for a given language.
:param text: the text to analyze
:param language: the language of the text
:param entities: List of PII entities that should be looked for in the text.
If entities=None then all entities are looked for.
:param correlation_id: cross call ID for this request
:param score_threshold: A minimum value for which
to return an identified entity
:param return_decision_process: Whether the analysis decision process steps
returned in the response.
:param ad_hoc_recognizers: List of recognizers which will be used only
for this specific request.
:param context: List of context words to enhance confidence score if matched
with the recognized entity's recognizer context
:param allow_list: List of words that the user defines as being allowed to keep
in the text
:param nlp_artifacts: precomputed NlpArtifacts
:return: an array of the found entities in the text
:example:
>>> from presidio_analyzer import AnalyzerEngine
>>> # Set up the engine, loads the NLP module (spaCy model by default)
>>> # and other PII recognizers
>>> analyzer = AnalyzerEngine()
>>> # Call analyzer to get results
>>> results = analyzer.analyze(text='My phone number is 212-555-5555', entities=['PHONE_NUMBER'], language='en') # noqa D501
>>> print(results)
[type: PHONE_NUMBER, start: 19, end: 31, score: 0.85]
"""
all_fields = not entities
recognizers = self.registry.get_recognizers(
language=language,
entities=entities,
all_fields=all_fields,
ad_hoc_recognizers=ad_hoc_recognizers,
)
if all_fields:
# Since all_fields=True, list all entities by iterating
# over all recognizers
entities = self.get_supported_entities(language=language)
# run the nlp pipeline over the given text, store the results in
# a NlpArtifacts instance
if not nlp_artifacts:
nlp_artifacts = self.nlp_engine.process_text(text, language)
if self.log_decision_process:
self.app_tracer.trace(
correlation_id, "nlp artifacts:" + nlp_artifacts.to_json()
)
results = []
for recognizer in recognizers:
# Lazy loading of the relevant recognizers
if not recognizer.is_loaded:
recognizer.load()
recognizer.is_loaded = True
# analyze using the current recognizer and append the results
current_results = recognizer.analyze(
text=text, entities=entities, nlp_artifacts=nlp_artifacts
)
if current_results:
# add recognizer name to recognition metadata inside results
# if not exists
self.__add_recognizer_id_if_not_exists(current_results, recognizer)
results.extend(current_results)
results = self._enhance_using_context(
text, results, nlp_artifacts, recognizers, context
)
if self.log_decision_process:
self.app_tracer.trace(
correlation_id,
json.dumps([str(result.to_dict()) for result in results]),
)
# Remove duplicates or low score results
results = EntityRecognizer.remove_duplicates(results)
results = self.__remove_low_scores(results, score_threshold)
if allow_list:
results = self._remove_allow_list(results, allow_list, text)
if not return_decision_process:
results = self.__remove_decision_process(results)
return results
def _enhance_using_context(
self,
text: str,
raw_results: List[RecognizerResult],
nlp_artifacts: NlpArtifacts,
recognizers: List[EntityRecognizer],
context: Optional[List[str]] = None,
) -> List[RecognizerResult]:
"""
Enhance confidence score using context words.
:param text: The actual text that was analyzed
:param raw_results: Recognizer results which didn't take
context into consideration
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param recognizers: the list of recognizers
:param context: list of context words
"""
results = []
for recognizer in recognizers:
recognizer_results = [
r
for r in raw_results
if r.recognition_metadata[RecognizerResult.RECOGNIZER_IDENTIFIER_KEY]
== recognizer.id
]
other_recognizer_results = [
r
for r in raw_results
if r.recognition_metadata[RecognizerResult.RECOGNIZER_IDENTIFIER_KEY]
!= recognizer.id
]
# enhance score using context in recognizer level if implemented
recognizer_results = recognizer.enhance_using_context(
text=text,
# each recognizer will get access to all recognizer results
# to allow related entities contex enhancement
raw_recognizer_results=recognizer_results,
other_raw_recognizer_results=other_recognizer_results,
nlp_artifacts=nlp_artifacts,
context=context,
)
results.extend(recognizer_results)
# Update results in case surrounding words or external context are relevant to
# the context words.
results = self.context_aware_enhancer.enhance_using_context(
text=text,
raw_results=results,
nlp_artifacts=nlp_artifacts,
recognizers=recognizers,
context=context,
)
return results
def __remove_low_scores(
self, results: List[RecognizerResult], score_threshold: float = None
) -> List[RecognizerResult]:
"""
Remove results for which the confidence is lower than the threshold.
:param results: List of RecognizerResult
:param score_threshold: float value for minimum possible confidence
:return: List[RecognizerResult]
"""
if score_threshold is None:
score_threshold = self.default_score_threshold
new_results = [result for result in results if result.score >= score_threshold]
return new_results
@staticmethod
def _remove_allow_list(
results: List[RecognizerResult], allow_list: List[str], text: str
) -> List[RecognizerResult]:
"""
Remove results which are part of the allow list.
:param results: List of RecognizerResult
:param allow_list: list of allowed terms
:param text: the text to analyze
:return: List[RecognizerResult]
"""
new_results = []
for result in results:
word = text[result.start : result.end]
# if the word is not specified to be allowed, keep in the PII entities
if word not in allow_list:
new_results.append(result)
return new_results
@staticmethod
def __add_recognizer_id_if_not_exists(
results: List[RecognizerResult], recognizer: EntityRecognizer
) -> None:
"""Ensure recognition metadata with recognizer id existence.
Ensure recognizer result list contains recognizer id inside recognition
metadata dictionary, and if not create it. recognizer_id is needed
for context aware enhancement.
:param results: List of RecognizerResult
:param recognizer: Entity recognizer
"""
for result in results:
if not result.recognition_metadata:
result.recognition_metadata = dict()
if (
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
not in result.recognition_metadata
):
result.recognition_metadata[
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
] = recognizer.id
if RecognizerResult.RECOGNIZER_NAME_KEY not in result.recognition_metadata:
result.recognition_metadata[
RecognizerResult.RECOGNIZER_NAME_KEY
] = recognizer.name
@staticmethod
def __remove_decision_process(
results: List[RecognizerResult],
) -> List[RecognizerResult]:
"""Remove decision process / analysis explanation from response."""
for result in results:
result.analysis_explanation = None
return results
| (registry: presidio_analyzer.recognizer_registry.RecognizerRegistry = None, nlp_engine: presidio_analyzer.nlp_engine.nlp_engine.NlpEngine = None, app_tracer: presidio_analyzer.app_tracer.AppTracer = None, log_decision_process: bool = False, default_score_threshold: float = 0, supported_languages: List[str] = None, context_aware_enhancer: Optional[presidio_analyzer.context_aware_enhancers.context_aware_enhancer.ContextAwareEnhancer] = None) |
16,327 | presidio_analyzer.analyzer_engine | __add_recognizer_id_if_not_exists | Ensure recognition metadata with recognizer id existence.
Ensure recognizer result list contains recognizer id inside recognition
metadata dictionary, and if not create it. recognizer_id is needed
for context aware enhancement.
:param results: List of RecognizerResult
:param recognizer: Entity recognizer
| @staticmethod
def __add_recognizer_id_if_not_exists(
results: List[RecognizerResult], recognizer: EntityRecognizer
) -> None:
"""Ensure recognition metadata with recognizer id existence.
Ensure recognizer result list contains recognizer id inside recognition
metadata dictionary, and if not create it. recognizer_id is needed
for context aware enhancement.
:param results: List of RecognizerResult
:param recognizer: Entity recognizer
"""
for result in results:
if not result.recognition_metadata:
result.recognition_metadata = dict()
if (
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
not in result.recognition_metadata
):
result.recognition_metadata[
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
] = recognizer.id
if RecognizerResult.RECOGNIZER_NAME_KEY not in result.recognition_metadata:
result.recognition_metadata[
RecognizerResult.RECOGNIZER_NAME_KEY
] = recognizer.name
| (results: List[presidio_analyzer.recognizer_result.RecognizerResult], recognizer: presidio_analyzer.entity_recognizer.EntityRecognizer) -> NoneType |
16,328 | presidio_analyzer.analyzer_engine | __remove_decision_process | Remove decision process / analysis explanation from response. | @staticmethod
def __remove_decision_process(
results: List[RecognizerResult],
) -> List[RecognizerResult]:
"""Remove decision process / analysis explanation from response."""
for result in results:
result.analysis_explanation = None
return results
| (results: List[presidio_analyzer.recognizer_result.RecognizerResult]) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,329 | presidio_analyzer.analyzer_engine | __remove_low_scores |
Remove results for which the confidence is lower than the threshold.
:param results: List of RecognizerResult
:param score_threshold: float value for minimum possible confidence
:return: List[RecognizerResult]
| def __remove_low_scores(
self, results: List[RecognizerResult], score_threshold: float = None
) -> List[RecognizerResult]:
"""
Remove results for which the confidence is lower than the threshold.
:param results: List of RecognizerResult
:param score_threshold: float value for minimum possible confidence
:return: List[RecognizerResult]
"""
if score_threshold is None:
score_threshold = self.default_score_threshold
new_results = [result for result in results if result.score >= score_threshold]
return new_results
| (self, results: List[presidio_analyzer.recognizer_result.RecognizerResult], score_threshold: Optional[float] = None) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,330 | presidio_analyzer.analyzer_engine | __init__ | null | def __init__(
self,
registry: RecognizerRegistry = None,
nlp_engine: NlpEngine = None,
app_tracer: AppTracer = None,
log_decision_process: bool = False,
default_score_threshold: float = 0,
supported_languages: List[str] = None,
context_aware_enhancer: Optional[ContextAwareEnhancer] = None,
):
if not supported_languages:
supported_languages = ["en"]
if not nlp_engine:
logger.info("nlp_engine not provided, creating default.")
provider = NlpEngineProvider()
nlp_engine = provider.create_engine()
if not registry:
logger.info("registry not provided, creating default.")
registry = RecognizerRegistry()
if not app_tracer:
app_tracer = AppTracer()
self.app_tracer = app_tracer
self.supported_languages = supported_languages
self.nlp_engine = nlp_engine
if not self.nlp_engine.is_loaded():
self.nlp_engine.load()
self.registry = registry
# load all recognizers
if not registry.recognizers:
registry.load_predefined_recognizers(
nlp_engine=self.nlp_engine, languages=self.supported_languages
)
self.log_decision_process = log_decision_process
self.default_score_threshold = default_score_threshold
if not context_aware_enhancer:
logger.debug(
"context aware enhancer not provided, creating default"
+ " lemma based enhancer."
)
context_aware_enhancer = LemmaContextAwareEnhancer()
self.context_aware_enhancer = context_aware_enhancer
| (self, registry: Optional[presidio_analyzer.recognizer_registry.RecognizerRegistry] = None, nlp_engine: Optional[presidio_analyzer.nlp_engine.nlp_engine.NlpEngine] = None, app_tracer: Optional[presidio_analyzer.app_tracer.AppTracer] = None, log_decision_process: bool = False, default_score_threshold: float = 0, supported_languages: Optional[List[str]] = None, context_aware_enhancer: Optional[presidio_analyzer.context_aware_enhancers.context_aware_enhancer.ContextAwareEnhancer] = None) |
16,331 | presidio_analyzer.analyzer_engine | _enhance_using_context |
Enhance confidence score using context words.
:param text: The actual text that was analyzed
:param raw_results: Recognizer results which didn't take
context into consideration
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param recognizers: the list of recognizers
:param context: list of context words
| def _enhance_using_context(
self,
text: str,
raw_results: List[RecognizerResult],
nlp_artifacts: NlpArtifacts,
recognizers: List[EntityRecognizer],
context: Optional[List[str]] = None,
) -> List[RecognizerResult]:
"""
Enhance confidence score using context words.
:param text: The actual text that was analyzed
:param raw_results: Recognizer results which didn't take
context into consideration
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param recognizers: the list of recognizers
:param context: list of context words
"""
results = []
for recognizer in recognizers:
recognizer_results = [
r
for r in raw_results
if r.recognition_metadata[RecognizerResult.RECOGNIZER_IDENTIFIER_KEY]
== recognizer.id
]
other_recognizer_results = [
r
for r in raw_results
if r.recognition_metadata[RecognizerResult.RECOGNIZER_IDENTIFIER_KEY]
!= recognizer.id
]
# enhance score using context in recognizer level if implemented
recognizer_results = recognizer.enhance_using_context(
text=text,
# each recognizer will get access to all recognizer results
# to allow related entities contex enhancement
raw_recognizer_results=recognizer_results,
other_raw_recognizer_results=other_recognizer_results,
nlp_artifacts=nlp_artifacts,
context=context,
)
results.extend(recognizer_results)
# Update results in case surrounding words or external context are relevant to
# the context words.
results = self.context_aware_enhancer.enhance_using_context(
text=text,
raw_results=results,
nlp_artifacts=nlp_artifacts,
recognizers=recognizers,
context=context,
)
return results
| (self, text: str, raw_results: List[presidio_analyzer.recognizer_result.RecognizerResult], nlp_artifacts: presidio_analyzer.nlp_engine.nlp_artifacts.NlpArtifacts, recognizers: List[presidio_analyzer.entity_recognizer.EntityRecognizer], context: Optional[List[str]] = None) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,332 | presidio_analyzer.analyzer_engine | _remove_allow_list |
Remove results which are part of the allow list.
:param results: List of RecognizerResult
:param allow_list: list of allowed terms
:param text: the text to analyze
:return: List[RecognizerResult]
| @staticmethod
def _remove_allow_list(
results: List[RecognizerResult], allow_list: List[str], text: str
) -> List[RecognizerResult]:
"""
Remove results which are part of the allow list.
:param results: List of RecognizerResult
:param allow_list: list of allowed terms
:param text: the text to analyze
:return: List[RecognizerResult]
"""
new_results = []
for result in results:
word = text[result.start : result.end]
# if the word is not specified to be allowed, keep in the PII entities
if word not in allow_list:
new_results.append(result)
return new_results
| (results: List[presidio_analyzer.recognizer_result.RecognizerResult], allow_list: List[str], text: str) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,333 | presidio_analyzer.analyzer_engine | analyze |
Find PII entities in text using different PII recognizers for a given language.
:param text: the text to analyze
:param language: the language of the text
:param entities: List of PII entities that should be looked for in the text.
If entities=None then all entities are looked for.
:param correlation_id: cross call ID for this request
:param score_threshold: A minimum value for which
to return an identified entity
:param return_decision_process: Whether the analysis decision process steps
returned in the response.
:param ad_hoc_recognizers: List of recognizers which will be used only
for this specific request.
:param context: List of context words to enhance confidence score if matched
with the recognized entity's recognizer context
:param allow_list: List of words that the user defines as being allowed to keep
in the text
:param nlp_artifacts: precomputed NlpArtifacts
:return: an array of the found entities in the text
:example:
>>> from presidio_analyzer import AnalyzerEngine
>>> # Set up the engine, loads the NLP module (spaCy model by default)
>>> # and other PII recognizers
>>> analyzer = AnalyzerEngine()
>>> # Call analyzer to get results
>>> results = analyzer.analyze(text='My phone number is 212-555-5555', entities=['PHONE_NUMBER'], language='en') # noqa D501
>>> print(results)
[type: PHONE_NUMBER, start: 19, end: 31, score: 0.85]
| def analyze(
self,
text: str,
language: str,
entities: Optional[List[str]] = None,
correlation_id: Optional[str] = None,
score_threshold: Optional[float] = None,
return_decision_process: Optional[bool] = False,
ad_hoc_recognizers: Optional[List[EntityRecognizer]] = None,
context: Optional[List[str]] = None,
allow_list: Optional[List[str]] = None,
nlp_artifacts: Optional[NlpArtifacts] = None,
) -> List[RecognizerResult]:
"""
Find PII entities in text using different PII recognizers for a given language.
:param text: the text to analyze
:param language: the language of the text
:param entities: List of PII entities that should be looked for in the text.
If entities=None then all entities are looked for.
:param correlation_id: cross call ID for this request
:param score_threshold: A minimum value for which
to return an identified entity
:param return_decision_process: Whether the analysis decision process steps
returned in the response.
:param ad_hoc_recognizers: List of recognizers which will be used only
for this specific request.
:param context: List of context words to enhance confidence score if matched
with the recognized entity's recognizer context
:param allow_list: List of words that the user defines as being allowed to keep
in the text
:param nlp_artifacts: precomputed NlpArtifacts
:return: an array of the found entities in the text
:example:
>>> from presidio_analyzer import AnalyzerEngine
>>> # Set up the engine, loads the NLP module (spaCy model by default)
>>> # and other PII recognizers
>>> analyzer = AnalyzerEngine()
>>> # Call analyzer to get results
>>> results = analyzer.analyze(text='My phone number is 212-555-5555', entities=['PHONE_NUMBER'], language='en') # noqa D501
>>> print(results)
[type: PHONE_NUMBER, start: 19, end: 31, score: 0.85]
"""
all_fields = not entities
recognizers = self.registry.get_recognizers(
language=language,
entities=entities,
all_fields=all_fields,
ad_hoc_recognizers=ad_hoc_recognizers,
)
if all_fields:
# Since all_fields=True, list all entities by iterating
# over all recognizers
entities = self.get_supported_entities(language=language)
# run the nlp pipeline over the given text, store the results in
# a NlpArtifacts instance
if not nlp_artifacts:
nlp_artifacts = self.nlp_engine.process_text(text, language)
if self.log_decision_process:
self.app_tracer.trace(
correlation_id, "nlp artifacts:" + nlp_artifacts.to_json()
)
results = []
for recognizer in recognizers:
# Lazy loading of the relevant recognizers
if not recognizer.is_loaded:
recognizer.load()
recognizer.is_loaded = True
# analyze using the current recognizer and append the results
current_results = recognizer.analyze(
text=text, entities=entities, nlp_artifacts=nlp_artifacts
)
if current_results:
# add recognizer name to recognition metadata inside results
# if not exists
self.__add_recognizer_id_if_not_exists(current_results, recognizer)
results.extend(current_results)
results = self._enhance_using_context(
text, results, nlp_artifacts, recognizers, context
)
if self.log_decision_process:
self.app_tracer.trace(
correlation_id,
json.dumps([str(result.to_dict()) for result in results]),
)
# Remove duplicates or low score results
results = EntityRecognizer.remove_duplicates(results)
results = self.__remove_low_scores(results, score_threshold)
if allow_list:
results = self._remove_allow_list(results, allow_list, text)
if not return_decision_process:
results = self.__remove_decision_process(results)
return results
| (self, text: str, language: str, entities: Optional[List[str]] = None, correlation_id: Optional[str] = None, score_threshold: Optional[float] = None, return_decision_process: Optional[bool] = False, ad_hoc_recognizers: Optional[List[presidio_analyzer.entity_recognizer.EntityRecognizer]] = None, context: Optional[List[str]] = None, allow_list: Optional[List[str]] = None, nlp_artifacts: Optional[presidio_analyzer.nlp_engine.nlp_artifacts.NlpArtifacts] = None) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,334 | presidio_analyzer.analyzer_engine | get_recognizers |
Return a list of PII recognizers currently loaded.
:param language: Return the recognizers supporting a given language.
:return: List of [Recognizer] as a RecognizersAllResponse
| def get_recognizers(self, language: Optional[str] = None) -> List[EntityRecognizer]:
"""
Return a list of PII recognizers currently loaded.
:param language: Return the recognizers supporting a given language.
:return: List of [Recognizer] as a RecognizersAllResponse
"""
if not language:
languages = self.supported_languages
else:
languages = [language]
recognizers = []
for language in languages:
logger.info(f"Fetching all recognizers for language {language}")
recognizers.extend(
self.registry.get_recognizers(language=language, all_fields=True)
)
return list(set(recognizers))
| (self, language: Optional[str] = None) -> List[presidio_analyzer.entity_recognizer.EntityRecognizer] |
16,335 | presidio_analyzer.analyzer_engine | get_supported_entities |
Return a list of the entities that can be detected.
:param language: Return only entities supported in a specific language.
:return: List of entity names
| def get_supported_entities(self, language: Optional[str] = None) -> List[str]:
"""
Return a list of the entities that can be detected.
:param language: Return only entities supported in a specific language.
:return: List of entity names
"""
recognizers = self.get_recognizers(language=language)
supported_entities = []
for recognizer in recognizers:
supported_entities.extend(recognizer.get_supported_entities())
return list(set(supported_entities))
| (self, language: Optional[str] = None) -> List[str] |
16,336 | presidio_analyzer.analyzer_request | AnalyzerRequest |
Analyzer request data.
:param req_data: A request dictionary with the following fields:
text: the text to analyze
language: the language of the text
entities: List of PII entities that should be looked for in the text.
If entities=None then all entities are looked for.
correlation_id: cross call ID for this request
score_threshold: A minimum value for which to return an identified entity
log_decision_process: Should the decision points within the analysis
be logged
return_decision_process: Should the decision points within the analysis
returned as part of the response
| class AnalyzerRequest:
"""
Analyzer request data.
:param req_data: A request dictionary with the following fields:
text: the text to analyze
language: the language of the text
entities: List of PII entities that should be looked for in the text.
If entities=None then all entities are looked for.
correlation_id: cross call ID for this request
score_threshold: A minimum value for which to return an identified entity
log_decision_process: Should the decision points within the analysis
be logged
return_decision_process: Should the decision points within the analysis
returned as part of the response
"""
def __init__(self, req_data: Dict):
self.text = req_data.get("text")
self.language = req_data.get("language")
self.entities = req_data.get("entities")
self.correlation_id = req_data.get("correlation_id")
self.score_threshold = req_data.get("score_threshold")
self.return_decision_process = req_data.get("return_decision_process")
ad_hoc_recognizers = req_data.get("ad_hoc_recognizers")
self.ad_hoc_recognizers = []
if ad_hoc_recognizers:
self.ad_hoc_recognizers = [
PatternRecognizer.from_dict(rec) for rec in ad_hoc_recognizers
]
self.context = req_data.get("context")
| (req_data: Dict) |
16,337 | presidio_analyzer.analyzer_request | __init__ | null | def __init__(self, req_data: Dict):
self.text = req_data.get("text")
self.language = req_data.get("language")
self.entities = req_data.get("entities")
self.correlation_id = req_data.get("correlation_id")
self.score_threshold = req_data.get("score_threshold")
self.return_decision_process = req_data.get("return_decision_process")
ad_hoc_recognizers = req_data.get("ad_hoc_recognizers")
self.ad_hoc_recognizers = []
if ad_hoc_recognizers:
self.ad_hoc_recognizers = [
PatternRecognizer.from_dict(rec) for rec in ad_hoc_recognizers
]
self.context = req_data.get("context")
| (self, req_data: Dict) |
16,338 | presidio_analyzer.batch_analyzer_engine | BatchAnalyzerEngine |
Batch analysis of documents (tables, lists, dicts).
Wrapper class to run Presidio Analyzer Engine on multiple values,
either lists/iterators of strings, or dictionaries.
:param: analyzer_engine: AnalyzerEngine instance to use
for handling the values in those collections.
| class BatchAnalyzerEngine:
"""
Batch analysis of documents (tables, lists, dicts).
Wrapper class to run Presidio Analyzer Engine on multiple values,
either lists/iterators of strings, or dictionaries.
:param: analyzer_engine: AnalyzerEngine instance to use
for handling the values in those collections.
"""
def __init__(self, analyzer_engine: Optional[AnalyzerEngine] = None):
self.analyzer_engine = analyzer_engine
if not analyzer_engine:
self.analyzer_engine = AnalyzerEngine()
def analyze_iterator(
self,
texts: Iterable[Union[str, bool, float, int]],
language: str,
**kwargs,
) -> List[List[RecognizerResult]]:
"""
Analyze an iterable of strings.
:param texts: An list containing strings to be analyzed.
:param language: Input language
:param kwargs: Additional parameters for the `AnalyzerEngine.analyze` method.
"""
# validate types
texts = self._validate_types(texts)
# Process the texts as batch for improved performance
nlp_artifacts_batch: Iterator[
Tuple[str, NlpArtifacts]
] = self.analyzer_engine.nlp_engine.process_batch(
texts=texts, language=language
)
list_results = []
for text, nlp_artifacts in nlp_artifacts_batch:
results = self.analyzer_engine.analyze(
text=str(text), nlp_artifacts=nlp_artifacts, language=language, **kwargs
)
list_results.append(results)
return list_results
def analyze_dict(
self,
input_dict: Dict[str, Union[Any, Iterable[Any]]],
language: str,
keys_to_skip: Optional[List[str]] = None,
**kwargs,
) -> Iterator[DictAnalyzerResult]:
"""
Analyze a dictionary of keys (strings) and values/iterable of values.
Non-string values are returned as is.
:param input_dict: The input dictionary for analysis
:param language: Input language
:param keys_to_skip: Keys to ignore during analysis
:param kwargs: Additional keyword arguments
for the `AnalyzerEngine.analyze` method.
Use this to pass arguments to the analyze method,
such as `ad_hoc_recognizers`, `context`, `return_decision_process`.
See `AnalyzerEngine.analyze` for the full list.
"""
context = []
if "context" in kwargs:
context = kwargs["context"]
del kwargs["context"]
if not keys_to_skip:
keys_to_skip = []
for key, value in input_dict.items():
if not value or key in keys_to_skip:
yield DictAnalyzerResult(key=key, value=value, recognizer_results=[])
continue # skip this key as requested
# Add the key as an additional context
specific_context = context[:]
specific_context.append(key)
if type(value) in (str, int, bool, float):
results: List[RecognizerResult] = self.analyzer_engine.analyze(
text=str(value), language=language, context=[key], **kwargs
)
elif isinstance(value, dict):
new_keys_to_skip = self._get_nested_keys_to_skip(key, keys_to_skip)
results = self.analyze_dict(
input_dict=value,
language=language,
context=specific_context,
keys_to_skip=new_keys_to_skip,
**kwargs,
)
elif isinstance(value, Iterable):
# Recursively iterate nested dicts
results: List[List[RecognizerResult]] = self.analyze_iterator(
texts=value,
language=language,
context=specific_context,
**kwargs,
)
else:
raise ValueError(f"type {type(value)} is unsupported.")
yield DictAnalyzerResult(key=key, value=value, recognizer_results=results)
@staticmethod
def _validate_types(value_iterator: Iterable[Any]) -> Iterator[Any]:
for val in value_iterator:
if val and not type(val) in (int, float, bool, str):
err_msg = (
"Analyzer.analyze_iterator only works "
"on primitive types (int, float, bool, str). "
"Lists of objects are not yet supported."
)
logger.error(err_msg)
raise ValueError(err_msg)
yield val
@staticmethod
def _get_nested_keys_to_skip(key, keys_to_skip):
new_keys_to_skip = [
k.replace(f"{key}.", "") for k in keys_to_skip if k.startswith(key)
]
return new_keys_to_skip
| (analyzer_engine: Optional[presidio_analyzer.analyzer_engine.AnalyzerEngine] = None) |
16,339 | presidio_analyzer.batch_analyzer_engine | __init__ | null | def __init__(self, analyzer_engine: Optional[AnalyzerEngine] = None):
self.analyzer_engine = analyzer_engine
if not analyzer_engine:
self.analyzer_engine = AnalyzerEngine()
| (self, analyzer_engine: Optional[presidio_analyzer.analyzer_engine.AnalyzerEngine] = None) |
16,340 | presidio_analyzer.batch_analyzer_engine | _get_nested_keys_to_skip | null | @staticmethod
def _get_nested_keys_to_skip(key, keys_to_skip):
new_keys_to_skip = [
k.replace(f"{key}.", "") for k in keys_to_skip if k.startswith(key)
]
return new_keys_to_skip
| (key, keys_to_skip) |
16,341 | presidio_analyzer.batch_analyzer_engine | _validate_types | null | @staticmethod
def _validate_types(value_iterator: Iterable[Any]) -> Iterator[Any]:
for val in value_iterator:
if val and not type(val) in (int, float, bool, str):
err_msg = (
"Analyzer.analyze_iterator only works "
"on primitive types (int, float, bool, str). "
"Lists of objects are not yet supported."
)
logger.error(err_msg)
raise ValueError(err_msg)
yield val
| (value_iterator: Iterable[Any]) -> Iterator[Any] |
16,342 | presidio_analyzer.batch_analyzer_engine | analyze_dict |
Analyze a dictionary of keys (strings) and values/iterable of values.
Non-string values are returned as is.
:param input_dict: The input dictionary for analysis
:param language: Input language
:param keys_to_skip: Keys to ignore during analysis
:param kwargs: Additional keyword arguments
for the `AnalyzerEngine.analyze` method.
Use this to pass arguments to the analyze method,
such as `ad_hoc_recognizers`, `context`, `return_decision_process`.
See `AnalyzerEngine.analyze` for the full list.
| def analyze_dict(
self,
input_dict: Dict[str, Union[Any, Iterable[Any]]],
language: str,
keys_to_skip: Optional[List[str]] = None,
**kwargs,
) -> Iterator[DictAnalyzerResult]:
"""
Analyze a dictionary of keys (strings) and values/iterable of values.
Non-string values are returned as is.
:param input_dict: The input dictionary for analysis
:param language: Input language
:param keys_to_skip: Keys to ignore during analysis
:param kwargs: Additional keyword arguments
for the `AnalyzerEngine.analyze` method.
Use this to pass arguments to the analyze method,
such as `ad_hoc_recognizers`, `context`, `return_decision_process`.
See `AnalyzerEngine.analyze` for the full list.
"""
context = []
if "context" in kwargs:
context = kwargs["context"]
del kwargs["context"]
if not keys_to_skip:
keys_to_skip = []
for key, value in input_dict.items():
if not value or key in keys_to_skip:
yield DictAnalyzerResult(key=key, value=value, recognizer_results=[])
continue # skip this key as requested
# Add the key as an additional context
specific_context = context[:]
specific_context.append(key)
if type(value) in (str, int, bool, float):
results: List[RecognizerResult] = self.analyzer_engine.analyze(
text=str(value), language=language, context=[key], **kwargs
)
elif isinstance(value, dict):
new_keys_to_skip = self._get_nested_keys_to_skip(key, keys_to_skip)
results = self.analyze_dict(
input_dict=value,
language=language,
context=specific_context,
keys_to_skip=new_keys_to_skip,
**kwargs,
)
elif isinstance(value, Iterable):
# Recursively iterate nested dicts
results: List[List[RecognizerResult]] = self.analyze_iterator(
texts=value,
language=language,
context=specific_context,
**kwargs,
)
else:
raise ValueError(f"type {type(value)} is unsupported.")
yield DictAnalyzerResult(key=key, value=value, recognizer_results=results)
| (self, input_dict: Dict[str, Union[Any, Iterable[Any]]], language: str, keys_to_skip: Optional[List[str]] = None, **kwargs) -> Iterator[presidio_analyzer.dict_analyzer_result.DictAnalyzerResult] |
16,343 | presidio_analyzer.batch_analyzer_engine | analyze_iterator |
Analyze an iterable of strings.
:param texts: An list containing strings to be analyzed.
:param language: Input language
:param kwargs: Additional parameters for the `AnalyzerEngine.analyze` method.
| def analyze_iterator(
self,
texts: Iterable[Union[str, bool, float, int]],
language: str,
**kwargs,
) -> List[List[RecognizerResult]]:
"""
Analyze an iterable of strings.
:param texts: An list containing strings to be analyzed.
:param language: Input language
:param kwargs: Additional parameters for the `AnalyzerEngine.analyze` method.
"""
# validate types
texts = self._validate_types(texts)
# Process the texts as batch for improved performance
nlp_artifacts_batch: Iterator[
Tuple[str, NlpArtifacts]
] = self.analyzer_engine.nlp_engine.process_batch(
texts=texts, language=language
)
list_results = []
for text, nlp_artifacts in nlp_artifacts_batch:
results = self.analyzer_engine.analyze(
text=str(text), nlp_artifacts=nlp_artifacts, language=language, **kwargs
)
list_results.append(results)
return list_results
| (self, texts: Iterable[Union[str, bool, float, int]], language: str, **kwargs) -> List[List[presidio_analyzer.recognizer_result.RecognizerResult]] |
16,344 | presidio_analyzer.context_aware_enhancers.context_aware_enhancer | ContextAwareEnhancer |
A class representing an abstract context aware enhancer.
Context words might enhance confidence score of a recognized entity,
ContextAwareEnhancer is an abstract class to be inherited by a context aware
enhancer logic.
:param context_similarity_factor: How much to enhance confidence of match entity
:param min_score_with_context_similarity: Minimum confidence score
:param context_prefix_count: how many words before the entity to match context
:param context_suffix_count: how many words after the entity to match context
| class ContextAwareEnhancer:
"""
A class representing an abstract context aware enhancer.
Context words might enhance confidence score of a recognized entity,
ContextAwareEnhancer is an abstract class to be inherited by a context aware
enhancer logic.
:param context_similarity_factor: How much to enhance confidence of match entity
:param min_score_with_context_similarity: Minimum confidence score
:param context_prefix_count: how many words before the entity to match context
:param context_suffix_count: how many words after the entity to match context
"""
MIN_SCORE = 0
MAX_SCORE = 1.0
def __init__(
self,
context_similarity_factor: float,
min_score_with_context_similarity: float,
context_prefix_count: int,
context_suffix_count: int,
):
self.context_similarity_factor = context_similarity_factor
self.min_score_with_context_similarity = min_score_with_context_similarity
self.context_prefix_count = context_prefix_count
self.context_suffix_count = context_suffix_count
@abstractmethod
def enhance_using_context(
self,
text: str,
raw_results: List[RecognizerResult],
nlp_artifacts: NlpArtifacts,
recognizers: List[EntityRecognizer],
context: Optional[List[str]] = None,
) -> List[RecognizerResult]:
"""
Update results in case surrounding words are relevant to the context words.
Using the surrounding words of the actual word matches, look
for specific strings that if found contribute to the score
of the result, improving the confidence that the match is
indeed of that PII entity type
:param text: The actual text that was analyzed
:param raw_results: Recognizer results which didn't take
context into consideration
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param recognizers: the list of recognizers
:param context: list of context words
"""
return raw_results
| (context_similarity_factor: float, min_score_with_context_similarity: float, context_prefix_count: int, context_suffix_count: int) |
16,345 | presidio_analyzer.context_aware_enhancers.context_aware_enhancer | __init__ | null | def __init__(
self,
context_similarity_factor: float,
min_score_with_context_similarity: float,
context_prefix_count: int,
context_suffix_count: int,
):
self.context_similarity_factor = context_similarity_factor
self.min_score_with_context_similarity = min_score_with_context_similarity
self.context_prefix_count = context_prefix_count
self.context_suffix_count = context_suffix_count
| (self, context_similarity_factor: float, min_score_with_context_similarity: float, context_prefix_count: int, context_suffix_count: int) |
16,346 | presidio_analyzer.context_aware_enhancers.context_aware_enhancer | enhance_using_context |
Update results in case surrounding words are relevant to the context words.
Using the surrounding words of the actual word matches, look
for specific strings that if found contribute to the score
of the result, improving the confidence that the match is
indeed of that PII entity type
:param text: The actual text that was analyzed
:param raw_results: Recognizer results which didn't take
context into consideration
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param recognizers: the list of recognizers
:param context: list of context words
| @abstractmethod
def enhance_using_context(
self,
text: str,
raw_results: List[RecognizerResult],
nlp_artifacts: NlpArtifacts,
recognizers: List[EntityRecognizer],
context: Optional[List[str]] = None,
) -> List[RecognizerResult]:
"""
Update results in case surrounding words are relevant to the context words.
Using the surrounding words of the actual word matches, look
for specific strings that if found contribute to the score
of the result, improving the confidence that the match is
indeed of that PII entity type
:param text: The actual text that was analyzed
:param raw_results: Recognizer results which didn't take
context into consideration
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param recognizers: the list of recognizers
:param context: list of context words
"""
return raw_results
| (self, text: str, raw_results: List[presidio_analyzer.recognizer_result.RecognizerResult], nlp_artifacts: presidio_analyzer.nlp_engine.nlp_artifacts.NlpArtifacts, recognizers: List[presidio_analyzer.entity_recognizer.EntityRecognizer], context: Optional[List[str]] = None) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,347 | presidio_analyzer.dict_analyzer_result | DictAnalyzerResult |
Data class for holding the output of the Presidio Analyzer on dictionaries.
:param key: key in dictionary
:param value: value to run analysis on (either string or list of strings)
:param recognizer_results: Analyzer output for one value.
Could be either:
- A list of recognizer results if the input is one string
- A list of lists of recognizer results, if the input is a list of strings.
- An iterator of a DictAnalyzerResult, if the input is a dictionary.
In this case the recognizer_results would be the iterator
of the DictAnalyzerResults next level in the dictionary.
| class DictAnalyzerResult:
"""
Data class for holding the output of the Presidio Analyzer on dictionaries.
:param key: key in dictionary
:param value: value to run analysis on (either string or list of strings)
:param recognizer_results: Analyzer output for one value.
Could be either:
- A list of recognizer results if the input is one string
- A list of lists of recognizer results, if the input is a list of strings.
- An iterator of a DictAnalyzerResult, if the input is a dictionary.
In this case the recognizer_results would be the iterator
of the DictAnalyzerResults next level in the dictionary.
"""
key: str
value: Union[str, List[str], dict]
recognizer_results: Union[
List[RecognizerResult],
List[List[RecognizerResult]],
Iterator["DictAnalyzerResult"],
]
| (key: str, value: Union[str, List[str], dict], recognizer_results: Union[List[presidio_analyzer.recognizer_result.RecognizerResult], List[List[presidio_analyzer.recognizer_result.RecognizerResult]], Iterator[presidio_analyzer.dict_analyzer_result.DictAnalyzerResult]]) -> None |
16,348 | presidio_analyzer.dict_analyzer_result | __eq__ | null | from dataclasses import dataclass
from typing import List, Union, Iterator
from presidio_analyzer import RecognizerResult
@dataclass
class DictAnalyzerResult:
"""
Data class for holding the output of the Presidio Analyzer on dictionaries.
:param key: key in dictionary
:param value: value to run analysis on (either string or list of strings)
:param recognizer_results: Analyzer output for one value.
Could be either:
- A list of recognizer results if the input is one string
- A list of lists of recognizer results, if the input is a list of strings.
- An iterator of a DictAnalyzerResult, if the input is a dictionary.
In this case the recognizer_results would be the iterator
of the DictAnalyzerResults next level in the dictionary.
"""
key: str
value: Union[str, List[str], dict]
recognizer_results: Union[
List[RecognizerResult],
List[List[RecognizerResult]],
Iterator["DictAnalyzerResult"],
]
| (self, other) |
16,351 | presidio_analyzer.entity_recognizer | EntityRecognizer |
A class representing an abstract PII entity recognizer.
EntityRecognizer is an abstract class to be inherited by
Recognizers which hold the logic for recognizing specific PII entities.
EntityRecognizer exposes a method called enhance_using_context which
can be overridden in case a custom context aware enhancement is needed
in derived class of a recognizer.
:param supported_entities: the entities supported by this recognizer
(for example, phone number, address, etc.)
:param supported_language: the language supported by this recognizer.
The supported langauge code is iso6391Name
:param name: the name of this recognizer (optional)
:param version: the recognizer current version
:param context: a list of words which can help boost confidence score
when they appear in context of the matched entity
| class EntityRecognizer:
"""
A class representing an abstract PII entity recognizer.
EntityRecognizer is an abstract class to be inherited by
Recognizers which hold the logic for recognizing specific PII entities.
EntityRecognizer exposes a method called enhance_using_context which
can be overridden in case a custom context aware enhancement is needed
in derived class of a recognizer.
:param supported_entities: the entities supported by this recognizer
(for example, phone number, address, etc.)
:param supported_language: the language supported by this recognizer.
The supported langauge code is iso6391Name
:param name: the name of this recognizer (optional)
:param version: the recognizer current version
:param context: a list of words which can help boost confidence score
when they appear in context of the matched entity
"""
MIN_SCORE = 0
MAX_SCORE = 1.0
def __init__(
self,
supported_entities: List[str],
name: str = None,
supported_language: str = "en",
version: str = "0.0.1",
context: Optional[List[str]] = None,
):
self.supported_entities = supported_entities
if name is None:
self.name = self.__class__.__name__ # assign class name as name
else:
self.name = name
self._id = f"{self.name}_{id(self)}"
self.supported_language = supported_language
self.version = version
self.is_loaded = False
self.context = context if context else []
self.load()
logger.info("Loaded recognizer: %s", self.name)
self.is_loaded = True
@property
def id(self):
"""Return a unique identifier of this recognizer."""
return self._id
@abstractmethod
def load(self) -> None:
"""
Initialize the recognizer assets if needed.
(e.g. machine learning models)
"""
@abstractmethod
def analyze(
self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts
) -> List[RecognizerResult]:
"""
Analyze text to identify entities.
:param text: The text to be analyzed
:param entities: The list of entities this recognizer is able to detect
:param nlp_artifacts: A group of attributes which are the result of
an NLP process over the input text.
:return: List of results detected by this recognizer.
"""
return None
def enhance_using_context(
self,
text: str,
raw_recognizer_results: List[RecognizerResult],
other_raw_recognizer_results: List[RecognizerResult],
nlp_artifacts: NlpArtifacts,
context: Optional[List[str]] = None,
) -> List[RecognizerResult]:
"""Enhance confidence score using context of the entity.
Override this method in derived class in case a custom logic
is needed, otherwise return value will be equal to
raw_results.
in case a result score is boosted, derived class need to update
result.recognition_metadata[RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY]
:param text: The actual text that was analyzed
:param raw_recognizer_results: This recognizer's results, to be updated
based on recognizer specific context.
:param other_raw_recognizer_results: Other recognizer results matched in
the given text to allow related entity context enhancement
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param context: list of context words
"""
return raw_recognizer_results
def get_supported_entities(self) -> List[str]:
"""
Return the list of entities this recognizer can identify.
:return: A list of the supported entities by this recognizer
"""
return self.supported_entities
def get_supported_language(self) -> str:
"""
Return the language this recognizer can support.
:return: A list of the supported language by this recognizer
"""
return self.supported_language
def get_version(self) -> str:
"""
Return the version of this recognizer.
:return: The current version of this recognizer
"""
return self.version
def to_dict(self) -> Dict:
"""
Serialize self to dictionary.
:return: a dictionary
"""
return_dict = {
"supported_entities": self.supported_entities,
"supported_language": self.supported_language,
"name": self.name,
"version": self.version,
}
return return_dict
@classmethod
def from_dict(cls, entity_recognizer_dict: Dict) -> "EntityRecognizer":
"""
Create EntityRecognizer from a dict input.
:param entity_recognizer_dict: Dict containing keys and values for instantiation
"""
return cls(**entity_recognizer_dict)
@staticmethod
def remove_duplicates(results: List[RecognizerResult]) -> List[RecognizerResult]:
"""
Remove duplicate results.
Remove duplicates in case the two results
have identical start and ends and types.
:param results: List[RecognizerResult]
:return: List[RecognizerResult]
"""
results = list(set(results))
results = sorted(results, key=lambda x: (-x.score, x.start, -(x.end - x.start)))
filtered_results = []
for result in results:
if result.score == 0:
continue
to_keep = result not in filtered_results # equals based comparison
if to_keep:
for filtered in filtered_results:
# If result is contained in one of the other results
if (
result.contained_in(filtered)
and result.entity_type == filtered.entity_type
):
to_keep = False
break
if to_keep:
filtered_results.append(result)
return filtered_results
| (supported_entities: List[str], name: str = None, supported_language: str = 'en', version: str = '0.0.1', context: Optional[List[str]] = None) |
16,352 | presidio_analyzer.entity_recognizer | __init__ | null | def __init__(
self,
supported_entities: List[str],
name: str = None,
supported_language: str = "en",
version: str = "0.0.1",
context: Optional[List[str]] = None,
):
self.supported_entities = supported_entities
if name is None:
self.name = self.__class__.__name__ # assign class name as name
else:
self.name = name
self._id = f"{self.name}_{id(self)}"
self.supported_language = supported_language
self.version = version
self.is_loaded = False
self.context = context if context else []
self.load()
logger.info("Loaded recognizer: %s", self.name)
self.is_loaded = True
| (self, supported_entities: List[str], name: Optional[str] = None, supported_language: str = 'en', version: str = '0.0.1', context: Optional[List[str]] = None) |
16,353 | presidio_analyzer.entity_recognizer | analyze |
Analyze text to identify entities.
:param text: The text to be analyzed
:param entities: The list of entities this recognizer is able to detect
:param nlp_artifacts: A group of attributes which are the result of
an NLP process over the input text.
:return: List of results detected by this recognizer.
| @abstractmethod
def analyze(
self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts
) -> List[RecognizerResult]:
"""
Analyze text to identify entities.
:param text: The text to be analyzed
:param entities: The list of entities this recognizer is able to detect
:param nlp_artifacts: A group of attributes which are the result of
an NLP process over the input text.
:return: List of results detected by this recognizer.
"""
return None
| (self, text: str, entities: List[str], nlp_artifacts: presidio_analyzer.nlp_engine.nlp_artifacts.NlpArtifacts) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,354 | presidio_analyzer.entity_recognizer | enhance_using_context | Enhance confidence score using context of the entity.
Override this method in derived class in case a custom logic
is needed, otherwise return value will be equal to
raw_results.
in case a result score is boosted, derived class need to update
result.recognition_metadata[RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY]
:param text: The actual text that was analyzed
:param raw_recognizer_results: This recognizer's results, to be updated
based on recognizer specific context.
:param other_raw_recognizer_results: Other recognizer results matched in
the given text to allow related entity context enhancement
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param context: list of context words
| def enhance_using_context(
self,
text: str,
raw_recognizer_results: List[RecognizerResult],
other_raw_recognizer_results: List[RecognizerResult],
nlp_artifacts: NlpArtifacts,
context: Optional[List[str]] = None,
) -> List[RecognizerResult]:
"""Enhance confidence score using context of the entity.
Override this method in derived class in case a custom logic
is needed, otherwise return value will be equal to
raw_results.
in case a result score is boosted, derived class need to update
result.recognition_metadata[RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY]
:param text: The actual text that was analyzed
:param raw_recognizer_results: This recognizer's results, to be updated
based on recognizer specific context.
:param other_raw_recognizer_results: Other recognizer results matched in
the given text to allow related entity context enhancement
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param context: list of context words
"""
return raw_recognizer_results
| (self, text: str, raw_recognizer_results: List[presidio_analyzer.recognizer_result.RecognizerResult], other_raw_recognizer_results: List[presidio_analyzer.recognizer_result.RecognizerResult], nlp_artifacts: presidio_analyzer.nlp_engine.nlp_artifacts.NlpArtifacts, context: Optional[List[str]] = None) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,355 | presidio_analyzer.entity_recognizer | get_supported_entities |
Return the list of entities this recognizer can identify.
:return: A list of the supported entities by this recognizer
| def get_supported_entities(self) -> List[str]:
"""
Return the list of entities this recognizer can identify.
:return: A list of the supported entities by this recognizer
"""
return self.supported_entities
| (self) -> List[str] |
16,356 | presidio_analyzer.entity_recognizer | get_supported_language |
Return the language this recognizer can support.
:return: A list of the supported language by this recognizer
| def get_supported_language(self) -> str:
"""
Return the language this recognizer can support.
:return: A list of the supported language by this recognizer
"""
return self.supported_language
| (self) -> str |
16,357 | presidio_analyzer.entity_recognizer | get_version |
Return the version of this recognizer.
:return: The current version of this recognizer
| def get_version(self) -> str:
"""
Return the version of this recognizer.
:return: The current version of this recognizer
"""
return self.version
| (self) -> str |
16,358 | presidio_analyzer.entity_recognizer | load |
Initialize the recognizer assets if needed.
(e.g. machine learning models)
| @abstractmethod
def load(self) -> None:
"""
Initialize the recognizer assets if needed.
(e.g. machine learning models)
"""
| (self) -> NoneType |
16,359 | presidio_analyzer.entity_recognizer | remove_duplicates |
Remove duplicate results.
Remove duplicates in case the two results
have identical start and ends and types.
:param results: List[RecognizerResult]
:return: List[RecognizerResult]
| @staticmethod
def remove_duplicates(results: List[RecognizerResult]) -> List[RecognizerResult]:
"""
Remove duplicate results.
Remove duplicates in case the two results
have identical start and ends and types.
:param results: List[RecognizerResult]
:return: List[RecognizerResult]
"""
results = list(set(results))
results = sorted(results, key=lambda x: (-x.score, x.start, -(x.end - x.start)))
filtered_results = []
for result in results:
if result.score == 0:
continue
to_keep = result not in filtered_results # equals based comparison
if to_keep:
for filtered in filtered_results:
# If result is contained in one of the other results
if (
result.contained_in(filtered)
and result.entity_type == filtered.entity_type
):
to_keep = False
break
if to_keep:
filtered_results.append(result)
return filtered_results
| (results: List[presidio_analyzer.recognizer_result.RecognizerResult]) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,360 | presidio_analyzer.entity_recognizer | to_dict |
Serialize self to dictionary.
:return: a dictionary
| def to_dict(self) -> Dict:
"""
Serialize self to dictionary.
:return: a dictionary
"""
return_dict = {
"supported_entities": self.supported_entities,
"supported_language": self.supported_language,
"name": self.name,
"version": self.version,
}
return return_dict
| (self) -> Dict |
16,361 | presidio_analyzer.context_aware_enhancers.lemma_context_aware_enhancer | LemmaContextAwareEnhancer |
A class representing a lemma based context aware enhancer logic.
Context words might enhance confidence score of a recognized entity,
LemmaContextAwareEnhancer is an implementation of Lemma based context aware logic,
it compares spacy lemmas of each word in context of the matched entity to given
context and the recognizer context words,
if matched it enhance the recognized entity confidence score by a given factor.
:param context_similarity_factor: How much to enhance confidence of match entity
:param min_score_with_context_similarity: Minimum confidence score
:param context_prefix_count: how many words before the entity to match context
:param context_suffix_count: how many words after the entity to match context
| class LemmaContextAwareEnhancer(ContextAwareEnhancer):
"""
A class representing a lemma based context aware enhancer logic.
Context words might enhance confidence score of a recognized entity,
LemmaContextAwareEnhancer is an implementation of Lemma based context aware logic,
it compares spacy lemmas of each word in context of the matched entity to given
context and the recognizer context words,
if matched it enhance the recognized entity confidence score by a given factor.
:param context_similarity_factor: How much to enhance confidence of match entity
:param min_score_with_context_similarity: Minimum confidence score
:param context_prefix_count: how many words before the entity to match context
:param context_suffix_count: how many words after the entity to match context
"""
def __init__(
self,
context_similarity_factor: float = 0.35,
min_score_with_context_similarity: float = 0.4,
context_prefix_count: int = 5,
context_suffix_count: int = 0,
):
super().__init__(
context_similarity_factor=context_similarity_factor,
min_score_with_context_similarity=min_score_with_context_similarity,
context_prefix_count=context_prefix_count,
context_suffix_count=context_suffix_count,
)
def enhance_using_context(
self,
text: str,
raw_results: List[RecognizerResult],
nlp_artifacts: NlpArtifacts,
recognizers: List[EntityRecognizer],
context: Optional[List[str]] = None,
) -> List[RecognizerResult]:
"""
Update results in case the lemmas of surrounding words or input context
words are identical to the context words.
Using the surrounding words of the actual word matches, look
for specific strings that if found contribute to the score
of the result, improving the confidence that the match is
indeed of that PII entity type
:param text: The actual text that was analyzed
:param raw_results: Recognizer results which didn't take
context into consideration
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param recognizers: the list of recognizers
:param context: list of context words
""" # noqa D205 D400
# create a deep copy of the results object, so we can manipulate it
results = copy.deepcopy(raw_results)
# create recognizer context dictionary
recognizers_dict = {recognizer.id: recognizer for recognizer in recognizers}
# Create empty list in None or lowercase all context words in the list
if not context:
context = []
else:
context = [word.lower() for word in context]
# Sanity
if nlp_artifacts is None:
logger.warning("NLP artifacts were not provided")
return results
for result in results:
recognizer = None
# get recognizer matching the result, if found.
if (
result.recognition_metadata
and RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
in result.recognition_metadata.keys()
):
recognizer = recognizers_dict.get(
result.recognition_metadata[
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
]
)
if not recognizer:
logger.debug(
"Recognizer name not found as part of the "
"recognition_metadata dict in the RecognizerResult. "
)
continue
# skip recognizer result if the recognizer doesn't support
# context enhancement
if not recognizer.context:
logger.debug(
"recognizer '%s' does not support context enhancement",
recognizer.name,
)
continue
# skip context enhancement if already boosted by recognizer level
if result.recognition_metadata.get(
RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY
):
logger.debug("result score already boosted, skipping")
continue
# extract lemmatized context from the surrounding of the match
word = text[result.start : result.end]
surrounding_words = self._extract_surrounding_words(
nlp_artifacts=nlp_artifacts, word=word, start=result.start
)
# combine other sources of context with surrounding words
surrounding_words.extend(context)
supportive_context_word = self._find_supportive_word_in_context(
surrounding_words, recognizer.context
)
if supportive_context_word != "":
result.score += self.context_similarity_factor
result.score = max(result.score, self.min_score_with_context_similarity)
result.score = min(result.score, ContextAwareEnhancer.MAX_SCORE)
# Update the explainability object with context information
# helped to improve the score
result.analysis_explanation.set_supportive_context_word(
supportive_context_word
)
result.analysis_explanation.set_improved_score(result.score)
return results
@staticmethod
def _find_supportive_word_in_context(
context_list: List[str], recognizer_context_list: List[str]
) -> str:
"""
Find words in the text which are relevant for context evaluation.
A word is considered a supportive context word if there's exact match
between a keyword in context_text and any keyword in context_list.
:param context_list words before and after the matched entity within
a specified window size
:param recognizer_context_list a list of words considered as
context keywords manually specified by the recognizer's author
"""
word = ""
# If the context list is empty, no need to continue
if context_list is None or recognizer_context_list is None:
return word
for predefined_context_word in recognizer_context_list:
# result == true only if any of the predefined context words
# is found exactly or as a substring in any of the collected
# context words
result = next(
(
True
for keyword in context_list
if predefined_context_word in keyword
),
False,
)
if result:
logger.debug("Found context keyword '%s'", predefined_context_word)
word = predefined_context_word
break
return word
def _extract_surrounding_words(
self, nlp_artifacts: NlpArtifacts, word: str, start: int
) -> List[str]:
"""Extract words surrounding another given word.
The text from which the context is extracted is given in the nlp
doc.
:param nlp_artifacts: An abstraction layer which holds different
items which are the result of a NLP pipeline
execution on a given text
:param word: The word to look for context around
:param start: The start index of the word in the original text
"""
if not nlp_artifacts.tokens:
logger.info("Skipping context extraction due to lack of NLP artifacts")
# if there are no nlp artifacts, this is ok, we can
# extract context and we return a valid, yet empty
# context
return [""]
# Get the already prepared words in the given text, in their
# LEMMATIZED version
lemmatized_keywords = nlp_artifacts.keywords
# since the list of tokens is not necessarily aligned
# with the actual index of the match, we look for the
# token index which corresponds to the match
token_index = self._find_index_of_match_token(
word, start, nlp_artifacts.tokens, nlp_artifacts.tokens_indices
)
# index i belongs to the PII entity, take the preceding n words
# and the successing m words into a context list
backward_context = self._add_n_words_backward(
token_index,
self.context_prefix_count,
nlp_artifacts.lemmas,
lemmatized_keywords,
)
forward_context = self._add_n_words_forward(
token_index,
self.context_suffix_count,
nlp_artifacts.lemmas,
lemmatized_keywords,
)
context_list = []
context_list.extend(backward_context)
context_list.extend(forward_context)
context_list = list(set(context_list))
logger.debug("Context list is: %s", " ".join(context_list))
return context_list
@staticmethod
def _find_index_of_match_token(
word: str, start: int, tokens, tokens_indices: List[int] # noqa ANN001
) -> int:
found = False
# we use the known start index of the original word to find the actual
# token at that index, we are not checking for equivilance since the
# token might be just a substring of that word (e.g. for phone number
# 555-124564 the first token might be just '555' or for a match like '
# rocket' the actual token will just be 'rocket' hence the misalignment
# of indices)
# Note: we are iterating over the original tokens (not the lemmatized)
i = -1
for i, token in enumerate(tokens, 0):
# Either we found a token with the exact location, or
# we take a token which its characters indices covers
# the index we are looking for.
if (tokens_indices[i] == start) or (start < tokens_indices[i] + len(token)):
# found the interesting token, the one that around it
# we take n words, we save the matching lemma
found = True
break
if not found:
raise ValueError(
"Did not find word '" + word + "' "
"in the list of tokens although it "
"is expected to be found"
)
return i
@staticmethod
def _add_n_words(
index: int,
n_words: int,
lemmas: List[str],
lemmatized_filtered_keywords: List[str],
is_backward: bool,
) -> List[str]:
"""
Prepare a string of context words.
Return a list of words which surrounds a lemma at a given index.
The words will be collected only if exist in the filtered array
:param index: index of the lemma that its surrounding words we want
:param n_words: number of words to take
:param lemmas: array of lemmas
:param lemmatized_filtered_keywords: the array of filtered
lemmas from the original sentence,
:param is_backward: if true take the preceeding words, if false,
take the successing words
"""
i = index
context_words = []
# The entity itself is no interest to us...however we want to
# consider it anyway for cases were it is attached with no spaces
# to an interesting context word, so we allow it and add 1 to
# the number of collected words
# collect at most n words (in lower case)
remaining = n_words + 1
while 0 <= i < len(lemmas) and remaining > 0:
lower_lemma = lemmas[i].lower()
if lower_lemma in lemmatized_filtered_keywords:
context_words.append(lower_lemma)
remaining -= 1
i = i - 1 if is_backward else i + 1
return context_words
def _add_n_words_forward(
self,
index: int,
n_words: int,
lemmas: List[str],
lemmatized_filtered_keywords: List[str],
) -> List[str]:
return self._add_n_words(
index, n_words, lemmas, lemmatized_filtered_keywords, False
)
def _add_n_words_backward(
self,
index: int,
n_words: int,
lemmas: List[str],
lemmatized_filtered_keywords: List[str],
) -> List[str]:
return self._add_n_words(
index, n_words, lemmas, lemmatized_filtered_keywords, True
)
| (context_similarity_factor: float = 0.35, min_score_with_context_similarity: float = 0.4, context_prefix_count: int = 5, context_suffix_count: int = 0) |
16,362 | presidio_analyzer.context_aware_enhancers.lemma_context_aware_enhancer | __init__ | null | def __init__(
self,
context_similarity_factor: float = 0.35,
min_score_with_context_similarity: float = 0.4,
context_prefix_count: int = 5,
context_suffix_count: int = 0,
):
super().__init__(
context_similarity_factor=context_similarity_factor,
min_score_with_context_similarity=min_score_with_context_similarity,
context_prefix_count=context_prefix_count,
context_suffix_count=context_suffix_count,
)
| (self, context_similarity_factor: float = 0.35, min_score_with_context_similarity: float = 0.4, context_prefix_count: int = 5, context_suffix_count: int = 0) |
16,363 | presidio_analyzer.context_aware_enhancers.lemma_context_aware_enhancer | _add_n_words |
Prepare a string of context words.
Return a list of words which surrounds a lemma at a given index.
The words will be collected only if exist in the filtered array
:param index: index of the lemma that its surrounding words we want
:param n_words: number of words to take
:param lemmas: array of lemmas
:param lemmatized_filtered_keywords: the array of filtered
lemmas from the original sentence,
:param is_backward: if true take the preceeding words, if false,
take the successing words
| @staticmethod
def _add_n_words(
index: int,
n_words: int,
lemmas: List[str],
lemmatized_filtered_keywords: List[str],
is_backward: bool,
) -> List[str]:
"""
Prepare a string of context words.
Return a list of words which surrounds a lemma at a given index.
The words will be collected only if exist in the filtered array
:param index: index of the lemma that its surrounding words we want
:param n_words: number of words to take
:param lemmas: array of lemmas
:param lemmatized_filtered_keywords: the array of filtered
lemmas from the original sentence,
:param is_backward: if true take the preceeding words, if false,
take the successing words
"""
i = index
context_words = []
# The entity itself is no interest to us...however we want to
# consider it anyway for cases were it is attached with no spaces
# to an interesting context word, so we allow it and add 1 to
# the number of collected words
# collect at most n words (in lower case)
remaining = n_words + 1
while 0 <= i < len(lemmas) and remaining > 0:
lower_lemma = lemmas[i].lower()
if lower_lemma in lemmatized_filtered_keywords:
context_words.append(lower_lemma)
remaining -= 1
i = i - 1 if is_backward else i + 1
return context_words
| (index: int, n_words: int, lemmas: List[str], lemmatized_filtered_keywords: List[str], is_backward: bool) -> List[str] |
16,364 | presidio_analyzer.context_aware_enhancers.lemma_context_aware_enhancer | _add_n_words_backward | null | def _add_n_words_backward(
self,
index: int,
n_words: int,
lemmas: List[str],
lemmatized_filtered_keywords: List[str],
) -> List[str]:
return self._add_n_words(
index, n_words, lemmas, lemmatized_filtered_keywords, True
)
| (self, index: int, n_words: int, lemmas: List[str], lemmatized_filtered_keywords: List[str]) -> List[str] |
16,365 | presidio_analyzer.context_aware_enhancers.lemma_context_aware_enhancer | _add_n_words_forward | null | def _add_n_words_forward(
self,
index: int,
n_words: int,
lemmas: List[str],
lemmatized_filtered_keywords: List[str],
) -> List[str]:
return self._add_n_words(
index, n_words, lemmas, lemmatized_filtered_keywords, False
)
| (self, index: int, n_words: int, lemmas: List[str], lemmatized_filtered_keywords: List[str]) -> List[str] |
16,366 | presidio_analyzer.context_aware_enhancers.lemma_context_aware_enhancer | _extract_surrounding_words | Extract words surrounding another given word.
The text from which the context is extracted is given in the nlp
doc.
:param nlp_artifacts: An abstraction layer which holds different
items which are the result of a NLP pipeline
execution on a given text
:param word: The word to look for context around
:param start: The start index of the word in the original text
| def _extract_surrounding_words(
self, nlp_artifacts: NlpArtifacts, word: str, start: int
) -> List[str]:
"""Extract words surrounding another given word.
The text from which the context is extracted is given in the nlp
doc.
:param nlp_artifacts: An abstraction layer which holds different
items which are the result of a NLP pipeline
execution on a given text
:param word: The word to look for context around
:param start: The start index of the word in the original text
"""
if not nlp_artifacts.tokens:
logger.info("Skipping context extraction due to lack of NLP artifacts")
# if there are no nlp artifacts, this is ok, we can
# extract context and we return a valid, yet empty
# context
return [""]
# Get the already prepared words in the given text, in their
# LEMMATIZED version
lemmatized_keywords = nlp_artifacts.keywords
# since the list of tokens is not necessarily aligned
# with the actual index of the match, we look for the
# token index which corresponds to the match
token_index = self._find_index_of_match_token(
word, start, nlp_artifacts.tokens, nlp_artifacts.tokens_indices
)
# index i belongs to the PII entity, take the preceding n words
# and the successing m words into a context list
backward_context = self._add_n_words_backward(
token_index,
self.context_prefix_count,
nlp_artifacts.lemmas,
lemmatized_keywords,
)
forward_context = self._add_n_words_forward(
token_index,
self.context_suffix_count,
nlp_artifacts.lemmas,
lemmatized_keywords,
)
context_list = []
context_list.extend(backward_context)
context_list.extend(forward_context)
context_list = list(set(context_list))
logger.debug("Context list is: %s", " ".join(context_list))
return context_list
| (self, nlp_artifacts: presidio_analyzer.nlp_engine.nlp_artifacts.NlpArtifacts, word: str, start: int) -> List[str] |
16,367 | presidio_analyzer.context_aware_enhancers.lemma_context_aware_enhancer | _find_index_of_match_token | null | @staticmethod
def _find_index_of_match_token(
word: str, start: int, tokens, tokens_indices: List[int] # noqa ANN001
) -> int:
found = False
# we use the known start index of the original word to find the actual
# token at that index, we are not checking for equivilance since the
# token might be just a substring of that word (e.g. for phone number
# 555-124564 the first token might be just '555' or for a match like '
# rocket' the actual token will just be 'rocket' hence the misalignment
# of indices)
# Note: we are iterating over the original tokens (not the lemmatized)
i = -1
for i, token in enumerate(tokens, 0):
# Either we found a token with the exact location, or
# we take a token which its characters indices covers
# the index we are looking for.
if (tokens_indices[i] == start) or (start < tokens_indices[i] + len(token)):
# found the interesting token, the one that around it
# we take n words, we save the matching lemma
found = True
break
if not found:
raise ValueError(
"Did not find word '" + word + "' "
"in the list of tokens although it "
"is expected to be found"
)
return i
| (word: str, start: int, tokens, tokens_indices: List[int]) -> int |
16,368 | presidio_analyzer.context_aware_enhancers.lemma_context_aware_enhancer | _find_supportive_word_in_context |
Find words in the text which are relevant for context evaluation.
A word is considered a supportive context word if there's exact match
between a keyword in context_text and any keyword in context_list.
:param context_list words before and after the matched entity within
a specified window size
:param recognizer_context_list a list of words considered as
context keywords manually specified by the recognizer's author
| @staticmethod
def _find_supportive_word_in_context(
context_list: List[str], recognizer_context_list: List[str]
) -> str:
"""
Find words in the text which are relevant for context evaluation.
A word is considered a supportive context word if there's exact match
between a keyword in context_text and any keyword in context_list.
:param context_list words before and after the matched entity within
a specified window size
:param recognizer_context_list a list of words considered as
context keywords manually specified by the recognizer's author
"""
word = ""
# If the context list is empty, no need to continue
if context_list is None or recognizer_context_list is None:
return word
for predefined_context_word in recognizer_context_list:
# result == true only if any of the predefined context words
# is found exactly or as a substring in any of the collected
# context words
result = next(
(
True
for keyword in context_list
if predefined_context_word in keyword
),
False,
)
if result:
logger.debug("Found context keyword '%s'", predefined_context_word)
word = predefined_context_word
break
return word
| (context_list: List[str], recognizer_context_list: List[str]) -> str |
16,369 | presidio_analyzer.context_aware_enhancers.lemma_context_aware_enhancer | enhance_using_context |
Update results in case the lemmas of surrounding words or input context
words are identical to the context words.
Using the surrounding words of the actual word matches, look
for specific strings that if found contribute to the score
of the result, improving the confidence that the match is
indeed of that PII entity type
:param text: The actual text that was analyzed
:param raw_results: Recognizer results which didn't take
context into consideration
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param recognizers: the list of recognizers
:param context: list of context words
| def enhance_using_context(
self,
text: str,
raw_results: List[RecognizerResult],
nlp_artifacts: NlpArtifacts,
recognizers: List[EntityRecognizer],
context: Optional[List[str]] = None,
) -> List[RecognizerResult]:
"""
Update results in case the lemmas of surrounding words or input context
words are identical to the context words.
Using the surrounding words of the actual word matches, look
for specific strings that if found contribute to the score
of the result, improving the confidence that the match is
indeed of that PII entity type
:param text: The actual text that was analyzed
:param raw_results: Recognizer results which didn't take
context into consideration
:param nlp_artifacts: The nlp artifacts contains elements
such as lemmatized tokens for better
accuracy of the context enhancement process
:param recognizers: the list of recognizers
:param context: list of context words
""" # noqa D205 D400
# create a deep copy of the results object, so we can manipulate it
results = copy.deepcopy(raw_results)
# create recognizer context dictionary
recognizers_dict = {recognizer.id: recognizer for recognizer in recognizers}
# Create empty list in None or lowercase all context words in the list
if not context:
context = []
else:
context = [word.lower() for word in context]
# Sanity
if nlp_artifacts is None:
logger.warning("NLP artifacts were not provided")
return results
for result in results:
recognizer = None
# get recognizer matching the result, if found.
if (
result.recognition_metadata
and RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
in result.recognition_metadata.keys()
):
recognizer = recognizers_dict.get(
result.recognition_metadata[
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY
]
)
if not recognizer:
logger.debug(
"Recognizer name not found as part of the "
"recognition_metadata dict in the RecognizerResult. "
)
continue
# skip recognizer result if the recognizer doesn't support
# context enhancement
if not recognizer.context:
logger.debug(
"recognizer '%s' does not support context enhancement",
recognizer.name,
)
continue
# skip context enhancement if already boosted by recognizer level
if result.recognition_metadata.get(
RecognizerResult.IS_SCORE_ENHANCED_BY_CONTEXT_KEY
):
logger.debug("result score already boosted, skipping")
continue
# extract lemmatized context from the surrounding of the match
word = text[result.start : result.end]
surrounding_words = self._extract_surrounding_words(
nlp_artifacts=nlp_artifacts, word=word, start=result.start
)
# combine other sources of context with surrounding words
surrounding_words.extend(context)
supportive_context_word = self._find_supportive_word_in_context(
surrounding_words, recognizer.context
)
if supportive_context_word != "":
result.score += self.context_similarity_factor
result.score = max(result.score, self.min_score_with_context_similarity)
result.score = min(result.score, ContextAwareEnhancer.MAX_SCORE)
# Update the explainability object with context information
# helped to improve the score
result.analysis_explanation.set_supportive_context_word(
supportive_context_word
)
result.analysis_explanation.set_improved_score(result.score)
return results
| (self, text: str, raw_results: List[presidio_analyzer.recognizer_result.RecognizerResult], nlp_artifacts: presidio_analyzer.nlp_engine.nlp_artifacts.NlpArtifacts, recognizers: List[presidio_analyzer.entity_recognizer.EntityRecognizer], context: Optional[List[str]] = None) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
16,370 | presidio_analyzer.local_recognizer | LocalRecognizer | PII entity recognizer which runs on the same process as the AnalyzerEngine. | class LocalRecognizer(ABC, EntityRecognizer):
"""PII entity recognizer which runs on the same process as the AnalyzerEngine."""
| (supported_entities: List[str], name: str = None, supported_language: str = 'en', version: str = '0.0.1', context: Optional[List[str]] = None) |
16,380 | presidio_analyzer.pattern | Pattern |
A class that represents a regex pattern.
:param name: the name of the pattern
:param regex: the regex pattern to detect
:param score: the pattern's strength (values varies 0-1)
| class Pattern:
"""
A class that represents a regex pattern.
:param name: the name of the pattern
:param regex: the regex pattern to detect
:param score: the pattern's strength (values varies 0-1)
"""
def __init__(self, name: str, regex: str, score: float):
self.name = name
self.regex = regex
self.score = score
self.compiled_regex = None
self.compiled_with_flags = None
def to_dict(self) -> Dict:
"""
Turn this instance into a dictionary.
:return: a dictionary
"""
return_dict = {"name": self.name, "score": self.score, "regex": self.regex}
return return_dict
@classmethod
def from_dict(cls, pattern_dict: Dict) -> "Pattern":
"""
Load an instance from a dictionary.
:param pattern_dict: a dictionary holding the pattern's parameters
:return: a Pattern instance
"""
return cls(**pattern_dict)
def __repr__(self):
"""Return string representation of instance."""
return json.dumps(self.to_dict())
def __str__(self):
"""Return string representation of instance."""
return json.dumps(self.to_dict())
| (name: str, regex: str, score: float) |
16,381 | presidio_analyzer.pattern | __init__ | null | def __init__(self, name: str, regex: str, score: float):
self.name = name
self.regex = regex
self.score = score
self.compiled_regex = None
self.compiled_with_flags = None
| (self, name: str, regex: str, score: float) |
16,382 | presidio_analyzer.pattern | __repr__ | Return string representation of instance. | def __repr__(self):
"""Return string representation of instance."""
return json.dumps(self.to_dict())
| (self) |
16,383 | presidio_analyzer.pattern | __str__ | Return string representation of instance. | def __str__(self):
"""Return string representation of instance."""
return json.dumps(self.to_dict())
| (self) |
16,384 | presidio_analyzer.pattern | to_dict |
Turn this instance into a dictionary.
:return: a dictionary
| def to_dict(self) -> Dict:
"""
Turn this instance into a dictionary.
:return: a dictionary
"""
return_dict = {"name": self.name, "score": self.score, "regex": self.regex}
return return_dict
| (self) -> Dict |
16,385 | presidio_analyzer.pattern_recognizer | PatternRecognizer |
PII entity recognizer using regular expressions or deny-lists.
:param patterns: A list of patterns to detect
:param deny_list: A list of words to detect,
in case our recognizer uses a predefined list of words (deny list)
:param context: list of context words
:param deny_list_score: confidence score for a term
identified using a deny-list
:param global_regex_flags: regex flags to be used in regex matching,
including deny-lists.
| class PatternRecognizer(LocalRecognizer):
"""
PII entity recognizer using regular expressions or deny-lists.
:param patterns: A list of patterns to detect
:param deny_list: A list of words to detect,
in case our recognizer uses a predefined list of words (deny list)
:param context: list of context words
:param deny_list_score: confidence score for a term
identified using a deny-list
:param global_regex_flags: regex flags to be used in regex matching,
including deny-lists.
"""
def __init__(
self,
supported_entity: str,
name: str = None,
supported_language: str = "en",
patterns: List[Pattern] = None,
deny_list: List[str] = None,
context: List[str] = None,
deny_list_score: float = 1.0,
global_regex_flags: Optional[int] = re.DOTALL | re.MULTILINE | re.IGNORECASE,
version: str = "0.0.1",
):
if not supported_entity:
raise ValueError("Pattern recognizer should be initialized with entity")
if not patterns and not deny_list:
raise ValueError(
"Pattern recognizer should be initialized with patterns"
" or with deny list"
)
super().__init__(
supported_entities=[supported_entity],
supported_language=supported_language,
name=name,
version=version,
)
if patterns is None:
self.patterns = []
else:
self.patterns = patterns
self.context = context
self.deny_list_score = deny_list_score
self.global_regex_flags = global_regex_flags
if deny_list:
deny_list_pattern = self._deny_list_to_regex(deny_list)
self.patterns.append(deny_list_pattern)
self.deny_list = deny_list
else:
self.deny_list = []
def load(self): # noqa D102
pass
def analyze(
self,
text: str,
entities: List[str],
nlp_artifacts: Optional[NlpArtifacts] = None,
regex_flags: Optional[int] = None,
) -> List[RecognizerResult]:
"""
Analyzes text to detect PII using regular expressions or deny-lists.
:param text: Text to be analyzed
:param entities: Entities this recognizer can detect
:param nlp_artifacts: Output values from the NLP engine
:param regex_flags: regex flags to be used in regex matching
:return:
"""
results = []
if self.patterns:
pattern_result = self.__analyze_patterns(text, regex_flags)
results.extend(pattern_result)
return results
def _deny_list_to_regex(self, deny_list: List[str]) -> Pattern:
"""
Convert a list of words to a matching regex.
To be analyzed by the analyze method as any other regex patterns.
:param deny_list: the list of words to detect
:return:the regex of the words for detection
"""
# Escape deny list elements as preparation for regex
escaped_deny_list = [re.escape(element) for element in deny_list]
regex = r"(?:^|(?<=\W))(" + "|".join(escaped_deny_list) + r")(?:(?=\W)|$)"
return Pattern(name="deny_list", regex=regex, score=self.deny_list_score)
def validate_result(self, pattern_text: str) -> Optional[bool]:
"""
Validate the pattern logic e.g., by running checksum on a detected pattern.
:param pattern_text: the text to validated.
Only the part in text that was detected by the regex engine
:return: A bool indicating whether the validation was successful.
"""
return None
def invalidate_result(self, pattern_text: str) -> Optional[bool]:
"""
Logic to check for result invalidation by running pruning logic.
For example, each SSN number group should not consist of all the same digits.
:param pattern_text: the text to validated.
Only the part in text that was detected by the regex engine
:return: A bool indicating whether the result is invalidated
"""
return None
@staticmethod
def build_regex_explanation(
recognizer_name: str,
pattern_name: str,
pattern: str,
original_score: float,
validation_result: bool,
regex_flags: int,
) -> AnalysisExplanation:
"""
Construct an explanation for why this entity was detected.
:param recognizer_name: Name of recognizer detecting the entity
:param pattern_name: Regex pattern name which detected the entity
:param pattern: Regex pattern logic
:param original_score: Score given by the recognizer
:param validation_result: Whether validation was used and its result
:param regex_flags: Regex flags used in the regex matching
:return: Analysis explanation
"""
explanation = AnalysisExplanation(
recognizer=recognizer_name,
original_score=original_score,
pattern_name=pattern_name,
pattern=pattern,
validation_result=validation_result,
regex_flags=regex_flags,
)
return explanation
def __analyze_patterns(
self, text: str, flags: int = None
) -> List[RecognizerResult]:
"""
Evaluate all patterns in the provided text.
Including words in the provided deny-list
:param text: text to analyze
:param flags: regex flags
:return: A list of RecognizerResult
"""
flags = flags if flags else self.global_regex_flags
results = []
for pattern in self.patterns:
match_start_time = datetime.datetime.now()
# Compile regex if flags differ from flags the regex was compiled with
if not pattern.compiled_regex or pattern.compiled_with_flags != flags:
pattern.compiled_with_flags = flags
pattern.compiled_regex = re.compile(pattern.regex, flags=flags)
matches = pattern.compiled_regex.finditer(text)
match_time = datetime.datetime.now() - match_start_time
logger.debug(
"--- match_time[%s]: %s.%s seconds",
pattern.name,
match_time.seconds,
match_time.microseconds,
)
for match in matches:
start, end = match.span()
current_match = text[start:end]
# Skip empty results
if current_match == "":
continue
score = pattern.score
validation_result = self.validate_result(current_match)
description = self.build_regex_explanation(
self.name,
pattern.name,
pattern.regex,
score,
validation_result,
flags,
)
pattern_result = RecognizerResult(
entity_type=self.supported_entities[0],
start=start,
end=end,
score=score,
analysis_explanation=description,
recognition_metadata={
RecognizerResult.RECOGNIZER_NAME_KEY: self.name,
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY: self.id,
},
)
if validation_result is not None:
if validation_result:
pattern_result.score = EntityRecognizer.MAX_SCORE
else:
pattern_result.score = EntityRecognizer.MIN_SCORE
invalidation_result = self.invalidate_result(current_match)
if invalidation_result is not None and invalidation_result:
pattern_result.score = EntityRecognizer.MIN_SCORE
if pattern_result.score > EntityRecognizer.MIN_SCORE:
results.append(pattern_result)
# Update analysis explanation score following validation or invalidation
description.score = pattern_result.score
results = EntityRecognizer.remove_duplicates(results)
return results
def to_dict(self) -> Dict:
"""Serialize instance into a dictionary."""
return_dict = super().to_dict()
return_dict["patterns"] = [pat.to_dict() for pat in self.patterns]
return_dict["deny_list"] = self.deny_list
return_dict["context"] = self.context
return_dict["supported_entity"] = return_dict["supported_entities"][0]
del return_dict["supported_entities"]
return return_dict
@classmethod
def from_dict(cls, entity_recognizer_dict: Dict) -> "PatternRecognizer":
"""Create instance from a serialized dict."""
patterns = entity_recognizer_dict.get("patterns")
if patterns:
patterns_list = [Pattern.from_dict(pat) for pat in patterns]
entity_recognizer_dict["patterns"] = patterns_list
return cls(**entity_recognizer_dict)
| (supported_entity: str, name: str = None, supported_language: str = 'en', patterns: List[presidio_analyzer.pattern.Pattern] = None, deny_list: List[str] = None, context: List[str] = None, deny_list_score: float = 1.0, global_regex_flags: Optional[int] = regex.I|regex.M|regex.S, version: str = '0.0.1') |
16,386 | presidio_analyzer.pattern_recognizer | __analyze_patterns |
Evaluate all patterns in the provided text.
Including words in the provided deny-list
:param text: text to analyze
:param flags: regex flags
:return: A list of RecognizerResult
| def __analyze_patterns(
self, text: str, flags: int = None
) -> List[RecognizerResult]:
"""
Evaluate all patterns in the provided text.
Including words in the provided deny-list
:param text: text to analyze
:param flags: regex flags
:return: A list of RecognizerResult
"""
flags = flags if flags else self.global_regex_flags
results = []
for pattern in self.patterns:
match_start_time = datetime.datetime.now()
# Compile regex if flags differ from flags the regex was compiled with
if not pattern.compiled_regex or pattern.compiled_with_flags != flags:
pattern.compiled_with_flags = flags
pattern.compiled_regex = re.compile(pattern.regex, flags=flags)
matches = pattern.compiled_regex.finditer(text)
match_time = datetime.datetime.now() - match_start_time
logger.debug(
"--- match_time[%s]: %s.%s seconds",
pattern.name,
match_time.seconds,
match_time.microseconds,
)
for match in matches:
start, end = match.span()
current_match = text[start:end]
# Skip empty results
if current_match == "":
continue
score = pattern.score
validation_result = self.validate_result(current_match)
description = self.build_regex_explanation(
self.name,
pattern.name,
pattern.regex,
score,
validation_result,
flags,
)
pattern_result = RecognizerResult(
entity_type=self.supported_entities[0],
start=start,
end=end,
score=score,
analysis_explanation=description,
recognition_metadata={
RecognizerResult.RECOGNIZER_NAME_KEY: self.name,
RecognizerResult.RECOGNIZER_IDENTIFIER_KEY: self.id,
},
)
if validation_result is not None:
if validation_result:
pattern_result.score = EntityRecognizer.MAX_SCORE
else:
pattern_result.score = EntityRecognizer.MIN_SCORE
invalidation_result = self.invalidate_result(current_match)
if invalidation_result is not None and invalidation_result:
pattern_result.score = EntityRecognizer.MIN_SCORE
if pattern_result.score > EntityRecognizer.MIN_SCORE:
results.append(pattern_result)
# Update analysis explanation score following validation or invalidation
description.score = pattern_result.score
results = EntityRecognizer.remove_duplicates(results)
return results
| (self, text: str, flags: Optional[int] = None) -> List[presidio_analyzer.recognizer_result.RecognizerResult] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.