inputs
stringlengths 312
52k
| targets
stringlengths 1
3.1k
⌀ | block_type
stringclasses 11
values | scenario
stringclasses 7
values |
---|---|---|---|
<filename>open-iris/src/iris/nodes/iris_response/probe_schemas/regular_probe_schema.py<fim_prefix>from typing import List, Literal, Optional, Tuple, Union
import numpy as np
from pydantic import Field, PositiveInt, confloat, fields, validator
from iris.io.errors import ProbeSchemaError
from iris.nodes.iris_response.probe_schemas.probe_schema_interface import ProbeSchema
class RegularProbeSchema(ProbeSchema):
"""Probe Schema for a regular Grid."""
class RegularProbeSchemaParameters(ProbeSchema.ProbeSchemaParameters):
"""RegularProbeSchema parameters."""
n_rows: int = Field(..., gt=1)
n_cols: int = Field(..., gt=1)
boundary_rho: List[confloat(ge=0.0, lt=1)]
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"],
List[confloat(ge=0.0, lt=1)],
]
image_shape: Optional[List[PositiveInt]]
@validator("boundary_rho", "boundary_phi")
def check_overlap(
cls: type,
v: Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]],
field: fields.ModelField,
) -> Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]:
"""Validate offsets to avoid overlap.
Args:
cls (type): Class type.
v (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ProbeSchemaError: Raises warning that offsets are together too large.
Returns:
Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]: The value for boundary_rho or boundary_phi respectively
"""
if isinstance(v, List):
if (v[0] + v[1]) >= 1:
raise ProbeSchemaError(
f"Offset for {field.name} on left and right corner must be a sum smaller 1, otherwise, offsets overlap."
)
return v
__parameters_type__ = RegularProbeSchemaParameters
def __init__(
self,
n_rows: int,
n_cols: int,
boundary_rho: List[float] = [0, 0.0625],
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]
] = "periodic-left",
image_shape: Optional[List[PositiveInt]] = None,
) -> None:
"""Assign parameters.
Args:
n_rows (int): Number of rows used, represents the number of different rho
values
n_cols (int): Number of columns used, represents the number of different
phi values
boundary_rho (List[float], optional): List with two values f1 and f2. The sampling goes from 0+f1 to 0-f2.
boundary_phi (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one column to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last column are placed with an offset to the
borders, that is half of the spacing of the two columns
- 'periodic-left': the first column is at the border of the bottom of the image, while
the last column is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
image_shape (list, optional): list containing the desired image dimensions. If provided, the function will throw
a warning if interpolation happens, i.e. if a kernel would be placed in between two pixels. Defaults to None.
"""
super().__init__(
n_rows=n_rows,
n_cols=n_cols,
boundary_rho=boundary_rho,
boundary_phi=boundary_phi,
image_shape=image_shape,
)
def generate_schema(self) -> Tuple[np.ndarray, np.ndarray]:
"""Generate rhos and phis.
Return:
Tuple[np.ndarray, np.ndarray]: the rhos and phis.
"""
rho = np.linspace(
0 + self.params.boundary_rho[0], 1 - self.params.boundary_rho[1], self.params.n_rows, endpoint=True
)
if self.params.boundary_phi == "periodic-symmetric":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
phi = phi + (phi[1] - phi[0]) / 2
if self.params.bo<fim_suffix>undary_phi == "periodic-left":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
if isinstance(self.params.boundary_phi, List):
phi = np.linspace(
0 + self.params.boundary_phi[0], 1 - self.params.boundary_phi[1], self.params.n_cols, endpoint=True
)
phis, rhos = np.meshgrid(phi, rho)
rhos = rhos.flatten()
phis = phis.flatten()
# if image_shape provided: verify that values lie on pixel values
if self.params.image_shape is not None:
rhos_pixel_values = rhos * self.params.image_shape[0]
phis_pixel_values = phis * self.params.image_shape[1]
rho_pixel_values = np.logical_or(
np.less_equal(rhos_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), rhos_pixel_values % 1),
).all()
phi_pixel_values = np.logical_or(
np.less_equal(phis_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), phis_pixel_values % 1),
).all()
if not rho_pixel_values:
raise ProbeSchemaError(
f"Choice for n_rows {self.params.n_rows} leads to interpolation errors, please change input variables"
)
if not phi_pixel_values:
raise ProbeSchemaError(f"Choice for n_cols {self.params.n_cols} leads to interpolation errors")
return rhos, phis
@staticmethod
def find_suitable_n_rows(
row_min: int,
row_max: int,
length: int,
boundary_condition: Union[
Literal["periodic-symmetric", "periodic-left"],
List[float],
] = "periodic_symmetric",
) -> List[int]:
"""Find proper spacing of rows/columns for given boundary conditions (i.e. image size, offset. etc).
Args:
row_min (int): Starting value for row count
row_max (int): End value for row count
length (int): Pixels in the respective dimension
boundary_condition (Union[Literal["periodic-symmetric", "periodic-left"], List[float]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one row to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last row are placed with an offset to the
borders, that is half of the spacing of the two rows
- 'periodic-left': the first row is at the border of the bottom of the image, while
the last row is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
Returns:
list: List of all number of rows that does not lead to interpolation errors
"""
suitable_values: List[int] = []
# loop through all values and validate whether they are suitable
for counter in range(row_min, row_max + 1):
if boundary_condition == "periodic-symmetric":
values = np.linspace(0, 1, counter, endpoint=False)
values = values + (values[1] - values[0]) / 2
if boundary_condition == "periodic-left":
values = np.linspace(0, 1, counter, endpoint=False)
if isinstance(boundary_condition, List):
values = np.linspace(0 + boundary_condition[0], 1 - boundary_condition[1], counter, endpoint=True)
pixel_values = values * length
pixel_values_modulo = pixel_values % 1
no_interpolation = np.less_equal(pixel_values_modulo, 10 ** (-10))
no_interpolation = np.logical_or(no_interpolation, np.less_equal(1 - 10 ** (-10), pixel_values_modulo))
no_interpolation = no_interpolation.all()
if no_interpolation:
suitable_values.append(counter)
return suitable_values
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>open-iris/src/iris/io/validators.py<fim_prefix>from typing import Any, Callable, Dict, Iterable, List
import numpy as np
from pydantic import fields
# ----- validators -----
def is_odd(cls: type, v: int, field: fields.ModelField) -> int:
"""Check that kernel size are odd numbers.
Args:
cls (type): Class type.
v (int): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if number isn't odd.
Returns:
int: `v` sent for further processing.
"""
if (v % 2) == 0:
raise ValueError(f"{cls.__name__}: {field.name} must be odd numbers.")
return v
def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if array has only boolean values, i.e. is binary.
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain bool datatypes.
Returns:
np.ndarray: `v` sent for further processing.
"""
if v.dtype != np.dtype("bool"):
raise ValueError(f"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}")
return v
def is_list_of_points(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if np.ndarray has shape (_, 2).
Args:
cls (type): Class type.
v (np.ndarray): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if array doesn't contain 2D points.
Returns:
np.ndarray: `v` sent for further processing.
"""
if len(v.shape) != 2 or v.shape[1] != 2:
raise ValueError(f"{cls.__name__}: {field.name} must have shape (_, 2).")
return v
def is_not_empty(cls: type, v: List[Any], field: fields.ModelField) -> List[Any]:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (List[Any]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Exception raised if list is empty.
Returns:
List[Any]: `v` sent for further processing.
"""
if len(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} list cannot be empty.")
return v
def is_not_zero_sum(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that both inputs are not empty.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raised if v doesn't sum to 0.
Returns:
Any: `v` sent for further processing.
"""
if np.sum(v) == 0:
raise ValueError(f"{cls.__name__}: {field.name} sum cannot be zero.")
return v
def are_all_positive(cls: type, v: Any, field: fields.ModelField) -> Any:
"""Check that all values are positive.
Args:
cls (type): Class type.
v (Any): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ValueError: Raise if not all values in are positive.
Returns:
Any: `v` sent for further processing.
"""
if isinstance(v, Iterable):
if not np.array([value >= 0 for value in v]).all():
raise ValueError(f"{cls.__name__}: all {field.name} must be positive. Received {v}")
elif v < 0.0:
raise ValueError(f"{cls.__name__}: {field.name} must be positive. Received {v}")
return v
def to_dtype_float32(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Convert input np.ndarray to dtype np.float32.
Args:
cls (type): Class type.
v (np.ndarray): Value to convert
field (fields.ModelField): Field descriptor.
Returns:
np.ndarray: `v` sent for further processing.
"""
return v.astype(np.float32)
# ----- root_validators -----
def is_valid_bbox(cls: type, values: Dict[str, float]) -> Dict[str, float]:
"""Check that the bounding box is valid."""
if values["x_min"] >= values["x_max"] or values["y_min"] >= values["y_max"]:
raise ValueError(
f'{cls.__name__}: invalid bbox. x_min={values["x_min"]}, x_max={values["x_max"]},'
f' y_min={values["y_min"]}, y_max={values["y_max"]}'
)
return values
# ----- parametrized validators -----
def is_array_n_dimensions(nb_dimensions: int) -> Callable:
"""Create a pydantic validator checking if an array is n-dimensional.
Args:
nb_dimensions (int): number of dimensions the array must have
Returns:
Callable: the validator.
"""
def validator(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:
"""Check if the array has the right number of dimensions."""
if len(v.shape) != nb_dimensions and (v.shape != (0,) or nb_dimensions != 0):
raise ValueError(
f"{cls.__name__}: wrong number of dimensions for {field.name}. "
f"Expected {nb_dimensions}, got {len(v.shape)}"
)
return v
return validator
# ----- parametrized root_validators -----
def are_lengths_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same length.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, List[Any]]) -> Dict[str, List[Any]]:
"""Check if len(field1) equals len(field2)."""
if len(values[field1]) != len(val<fim_suffix>ues[field2]):
raise ValueError(
f"{cls.__name__}: {field1} and {field2} length mismatch, "
f"resp. {len(values[field1])} and {len(values[field2])}"
)
return values
return __root_validator
def are_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if the two fields have the same shape.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if field1.shape equals field2.shape."""
if values[field1].shape != values[field2].shape:
raise ValueError(f"{cls.__name__}: {field1} and {field2} shape mismatch.")
return values
return __root_validator
def are_all_shapes_equal(field1: str, field2: str) -> Callable:
"""Create a pydantic validator checking if two lists of array have the same shape per element.
This function creates a pydantic validator for two lists of np.ndarrays which checks if they have the same length,
and if all of their element have the same shape one by one.
Args:
field1 (str): name of the first field
field2 (str): name of the first field
Returns:
Callable: the validator.
"""
def __root_validator(cls: type, values: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
"""Check if len(field1) equals len(field2) and if every element have the same shape."""
shapes_field_1 = [element.shape for element in values[field1]]
shapes_field_2 = [element.shape for element in values[field2]]
if len(values[field1]) != len(values[field2]) or shapes_field_1 != shapes_field_2:
raise ValueError(
f"{cls.__name__}: {field1} and {field2} shape mismatch, resp. {shapes_field_1} and {shapes_field_2}."
)
return values
return __root_validator
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h -<fim_suffix> 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
i<fim_suffix>f norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
#<fim_suffix> normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/utils/math.py<fim_prefix>import math
from typing import Dict, Tuple
import numpy as np
def area(array: np.ndarray) -> float:
"""Shoelace formula for simple polygon area calculation.
WARNING: This formula only works for simple polygons, i.e planar polygon without self-intersection nor holes.
These conditions are not checked within this function.
Args:
array (np.ndarray): np array representing a polygon as a list of points, i.e. of shape (_, 2).
Raises:
ValueError: if the input array does not have shape (_, 2)
Returns:
float: Polygon area
References:
[1] https://en.wikipedia.org/wiki/Shoelace_formula
[2] https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
"""
if len(array.shape) != 2 or array.shape[1] != 2:
raise ValueError(f"Unable to determine the area of a polygon with shape {array.shape}. Expecting (_, 2).")
xs, ys = array.T
area = 0.5 * np.abs(np.dot(xs, np.roll(ys, 1)) - np.dot(ys, np.roll(xs, 1)))
return float(area)
def estimate_diameter(polygon: np.ndarray) -> float:
"""Estimates the diameter of an arbitrary arc by evaluating the maximum distance between any two points on the arc.
Args:
polygon (np.ndarray): Polygon points.
Returns:
float: Estimated diameter length.
Reference:
[1] https://sparrow.dev/pairwise-distance-in-numpy/
"""
return float(np.linalg.norm(polygon[:, None, :] - polygon[None, :, :], axis=-1).max())
def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:
"""Convert xs and ys cartesian coordinates to polar coordinates.
Args:
xs (np.ndarray): x values.
ys (np.ndarray): y values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).
"""
x_rel: np.ndarray = xs - center_x
y_rel: np.ndarray = ys - center_y
C = np.vectorize(complex)(x_rel, y_rel)
rho = np.abs(C)
phi = np.angle(C) % (2 * np.pi)
return rho, phi
def polar2cartesian(
rhos: np.ndarray, phis: np.ndarray, center_x: float, center_y: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Convert polar coordinates to cartesian coordinates.
Args:
rho (np.ndarray): rho values.
phi (np.ndarray): phi values.
center_x (float): center's x.
center_y (float): center's y.
Returns:
Tuple[np.ndarray, np.ndarray]: Converted coordinates (xs, ys).
"""
xs = center_x + rhos * np.cos(phis)
ys = center_y + rhos * np.sin(phis)
return xs, ys
def orientation(moments: Dict[str, float]) -> float:
"""Compute the main orientation of a contour or a binary image given its precomputed cv2 moments.
Args:
moments (Dict[str, float]): cv2.moments of desired the binary image or contour.
Returns:
float: Main orientation of the shape. The orientation is a float in [-pi/2, pi/2[ representing the signed angle from the x axis.
"""
# Edge case of null denominator
if (moments["mu20"] - moments["mu02"]) == 0:
if moments["mu11"] == 0:
orientation = 0.0
else:
orientation = math.copysign(np.pi / 4, moments["mu11"])
else:
# General formula
orientation = 0.5 * np.arctan(2 * moments["mu11"] / (moments["mu20"] - moments["mu02"]))
if (moments["mu20"] - moments["mu02"]) < 0:
orientation += np.pi / 2
# Restricting the angle to [-pi/2, pi/2[
orientation = np.mod(orientation + np.pi / 2, np.pi) - np.pi / 2
return orientation
def eccentricity(moments: Dict[str, float]) -> float:
r"""Compute the eccentricity of a contour or a binary image given its precomputed cv2 moments.
The eccentricity is a number in [0, 1] which caracterises the "roundness" or "linearity" of a shape.
A perfect circle will have an eccentricity of 0, and an infinite line an eccentricity of 1.
For ellipses, the eccentricity is calculated as :math:`\frac{\sqrt{a^2 - b^2}}{a^2}`
with a (resp. b) the semi-major (resp. -minor) axis of the ellipses.
For `mu20 + mu02 == 0`, i.e. perfect line, the max theoretical value (1.0) is returned
Args:
moments (Dict[str, float]): cv2.moments of desired the binary image or contour.
Returns:
eccentricity (float): the eccentricity of the contour or binary map.
Reference:
[1] https://t1.daumcdn.net/cfile/tistory/15425F4150F4EBFC19
"""
if moments["mu20"] + moments["mu02"] == 0:
return 1.0
# fmt: off
eccentricity = ((moments["mu20"] - moments["mu02"]) ** 2 + 4 * moments["mu11"] ** 2) / (moments["mu20"] + moments["mu02"]) ** 2
# <fim_suffix>fmt: on
return eccentricity
def apply_weights_1d(scores_1d: np.ndarray, weights_1d: np.ndarray) -> float:
"""Apply weights for score fusion.
Args:
scores_1d (np.ndarray): scores to be fused.
weights_1d (np.ndarray): weights.
Raises:
ValueError: if the input 1d arrays do not have the same length.
Returns:
float: fused score.
"""
if len(scores_1d) != len(weights_1d):
raise ValueError("Unable to apply weights. Dimension is different between scores and weights.")
if len(weights_1d) == 0:
raise ValueError("Unable to apply weights. Empty arrays.")
if np.sum(weights_1d) == 0:
raise ValueError("Unable to apply weights. Sum of weights is zero.")
weighted_score = np.sum(np.multiply(scores_1d, weights_1d))
return weighted_score / np.sum(weights_1d)
def polygon_length(polygon: np.ndarray, max_point_distance: int = 20) -> float:
"""Compute the length of a polygon represented as a (_, 2)-dimensionnal numpy array.
One polygon can include several disjoint arcs, which should be identified as separate so that the distance
between them is not counted. If a polygon is made of two small arc separated by a large distance, then the large
distance between the two arcs will not be discounted in the polygon's length
WARNING: The input polygon is assumed to be non-looped, i.e. if the first and last point are not equal,
which is the case for all ou GeometryPolygons. The last implicit segment looping back from the
last to the first point is therefore not included in the computed polygon length.
Args:
polygon (np.ndarray): (_, 2) - shaped numpy array representing a polygon.
max_point_distance (int): Maximum distance between two points for them to be considered part of the same arc.
Returns:
float: length of the polygon, in pixels.
"""
if polygon.ndim != 2 or polygon.shape[1] != 2:
raise ValueError(f"This function expects a polygon, i.e. an array of shape (_, 2). Got {polygon.shape}")
inter_point_distances = np.linalg.norm(np.roll(polygon, 1, axis=0) - polygon, axis=1)
inter_point_distances = inter_point_distances[inter_point_distances < max_point_distance]
return inter_point_distances.sum()
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
#<fim_suffix> calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py<fim_prefix>from typing import Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import EyeCentersEstimationError
class BisectorsMethod(Algorithm):
"""Implementation of eye's center estimation algorithm using bisectors method for finding a circle center.
This algorithm samples a given number of bisectors from the pupil and iris polygons, and averages their intersection
to produce the polygon center. This method is robust against noise in the polygons, making it a good choice for
non-perfect shapes. It is also robust to polygons missing parts of the circle arc, making it a good choice for
partially-occluded shapes.
LIMITATIONS:
The iris and pupil can be approximated to circles, when the user is properly gazing at the camera.
This requires that the cases of off-gaze have already been filtered out.
"""
class Parameters(Algorithm.Parameters):
"""Default Parameters for BisectorsMethod algorithm."""
num_bisectors: int = Field(..., gt=0)
min_distance_between_sector_points: float = Field(..., gt=0.0, lt=1.0)
max_iterations: int = Field(..., gt=0)
__parameters_type__ = Parameters
def __init__(
self,
num_bisectors: int = 100,
min_distance_between_sector_points: float = 0.75,
max_iterations: int = 50,
) -> None:
"""Assign parameters.
Args:
num_bisectors (int, optional): Number of bisectors.. Defaults to 100.
min_distance_between_sector_points (float, optional): Minimum distance between sectors expressed as a fractional value of a circular shape diameter. Defaults to 0.75.
max_iterations (int, optional): Max iterations for bisector search.. Defaults to 50.
"""
super().__init__(
num_bisectors=num_bisectors,
min_distance_between_sector_points=min_distance_between_sector_points,
max_iterations=max_iterations,
)
def run(self, geometries: GeometryPolygons) -> EyeCenters:
"""Estimate eye's iris and pupil centers.
Args:
geometries (GeometryPolygons): Geometry polygons.
Returns:
EyeCenters: Eye's centers object.
"""
pupil_center_x, pupil_center_y = self._find_center_coords(geometries.pupil_array, geometries.pupil_diameter)
iris_center_x, iris_center_y = self._find_center_coords(geometries.iris_array, geometries.iris_diameter)
return EyeCenters(pupil_x=pupil_center_x, pupil_y=pupil_center_y, iris_x=iris_center_x, iris_y=iris_center_y)
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
def _calculate_perpendicular_bisectors(
self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.
A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.
Args:
polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.
min_distance_between_sector_points_in_px (float): Minimum distance between sector points.
Raises:
EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!
Returns:
Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.
"""
np.random.seed(142857)
bisectors_first_points = np.empty([0, 2])
bisectors_second_points = np.empty([0, 2])
for _ in range(self.params.max_iterations):
random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))
first_drawn_points = polygon[random_indices[:, 0]]
second_drawn_points = polygon[random_indices[:, 1]]
norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)
mask = norms > min_distance_between_sector_points_in_px
bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])
bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])
if len(bisectors_first_points) >= self.params.num_bisectors:
break
else:
raise EyeCentersEstimationError(
"Not able to find enough random pairs of points on the arc with a large enough distance!"
)
bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]
bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]
bisectors_center = (bisectors_first_points + bisectors_second_points) / 2
# Flip xs with ys and flip sign of on of them to create a 90deg rotation
inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)
inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]
# Add perpendicular vector to center<fim_suffix> and normalize
norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)
inv_bisectors_center_slope[:, 0] /= norm
inv_bisectors_center_slope[:, 1] /= norm
first_bisectors_point = bisectors_center - inv_bisectors_center_slope
second_bisectors_point = bisectors_center + inv_bisectors_center_slope
return first_bisectors_point, second_bisectors_point
def _find_best_intersection(self, fst_points: np.ndarray, sec_points: np.ndarray) -> Tuple[float, float]:
"""fst_points and sec_points are NxD arrays defining N lines. D is the dimension of the space.
This function returns the least squares intersection of the N lines from the system given by eq. 13 in
http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf.
Args:
fst_points (np.ndarray): First bisectors points.
sec_points (np.ndarray): Second bisectors points.
Returns:
Tuple[float, float]: Best intersection point.
Reference:
[1] http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf
"""
norm_bisectors = (sec_points - fst_points) / np.linalg.norm(sec_points - fst_points, axis=1)[:, np.newaxis]
# Generate the array of all projectors I - n*n.T
projections = np.eye(norm_bisectors.shape[1]) - norm_bisectors[:, :, np.newaxis] * norm_bisectors[:, np.newaxis]
# Generate R matrix and q vector
R = projections.sum(axis=0)
q = (projections @ fst_points[:, :, np.newaxis]).sum(axis=0)
# Solve the least squares problem for the intersection point p: Rp = q
p = np.linalg.lstsq(R, q, rcond=None)[0]
intersection_x, intersection_y = p
return intersection_x.item(), intersection_y.item()
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/geometry_estimation/lsq_ellipse_fit_with_refinement.py<fim_prefix>from typing import List
import cv2
import numpy as np
from pydantic import Field
from iris.callbacks.callback_interface import Callback
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import GeometryPolygons
class LSQEllipseFitWithRefinement(Algorithm):
"""Algorithm that implements least square ellipse fit with iris polygon refinement by finding points to refine by computing euclidean distance.
Algorithm steps:
1) Use OpenCV's fitEllipse method to fit an ellipse to predicted iris and pupil polygons.
2) Refine predicted pupil polygons points to their original location to prevent location precision loss for those points which were predicted by semseg algorithm.
"""
class Parameters(Algorithm.Parameters):
"""Parameters of least square ellipse fit extrapolation algorithm."""
dphi: float = Field(..., gt=0.0, lt=360.0)
__parameters_type__ = Parameters
def __init__(self, dphi: float = 1.0, callbacks: List[Callback] = []) -> None:
"""Assign parameters.
Args:
dphi (float, optional): Angle's delta. Defaults to 1.0.
callbacks (List[Callback], optional): List of callbacks. Defaults to [].
"""
super().__init__(dphi=dphi, callbacks=callbacks)
def run(self, input_polygons: GeometryPolygons) -> GeometryPolygons:
"""Estimate extrapolated polygons with OpenCV's method fitEllipse.
Args:
input_polygons (GeometryPolygons): Smoothed polygons.
Returns:
GeometryPolygons: Extrapolated polygons.
"""
extrapolated_pupil = self._extrapolate(input_polygons.pupil_array)
extrapolated_iris = self._extrapolate(input_polygons.iris_array)
for point in input_polygons.pupil_array:
extrapolated_pupil[self._find_correspondence(point, extrapolated_pupil)] = point
return GeometryPolygons(
pupil_array=extrapolated_pupil, iris_array=extrapolated_iris, eyeball_array=input_polygons.eyeball_array
)
def _extrapolate(self, polygon_points: np.ndarray) -> np.ndarray:
"""Perform extrapolation for points in an array.
Args:
polygon_points (np.ndarray): Smoothed polygon ready for applying extrapolation algorithm on it.
Returns:
np.ndarray: Estimated extrapolated polygon.
"""
(x0, y0), (a, b), theta = cv2.fitEllipse(polygon_points)
extrapolated_polygon = LSQEllipseFitWithRefinement.parametric_ellipsis(
a / 2, b / 2, x0, y0, np.radians(theta), round(360 / self.params.dphi)
)
# Rotate such that 0 degree is parallel with x-axis and array is c<fim_suffix>lockwise
roll_amount = round((-theta - 90) / self.params.dphi)
extrapolated_polygon = np.flip(np.roll(extrapolated_polygon, roll_amount, axis=0), axis=0)
return extrapolated_polygon
def _find_correspondence(self, src_point: np.ndarray, dst_points: np.ndarray) -> int:
"""Find correspondence with Euclidean distance.
Args:
src_point (np.ndarray): Source points.
dst_points (np.ndarray): Destination points.
Returns:
int: Source point index the closes one to the destination points.
"""
src_x, src_y = src_point
distance = (dst_points[:, 1] - src_y) ** 2 + (dst_points[:, 0] - src_x) ** 2
idx = np.where(distance == distance.min())[0]
return idx
@staticmethod
def parametric_ellipsis(a: float, b: float, x0: float, y0: float, theta: float, nb_step: int = 100) -> np.ndarray:
"""Given the parameters of a general ellipsis, returns an array of points in this ellipsis.
Args:
a (float): Major axis length.
b (float): Minor axis length.
x0 (float): x offset.
y0 (float): y offset.
theta (float): rotation of the ellipsis.
nb_step (int): number of points in the ellipsis.
Returns:
np.ndarray: points within the ellipsis.
"""
t = np.linspace(0, 2 * np.pi, nb_step)
x_coords = x0 + b * np.cos(t) * np.sin(-theta) + a * np.sin(t) * np.cos(-theta)
y_coords = y0 + b * np.cos(t) * np.cos(-theta) - a * np.sin(t) * np.sin(-theta)
return np.array([x_coords, y_coords]).T
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar co<fim_suffix>ordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/iris_response/probe_schemas/regular_probe_schema.py<fim_prefix>from typing import List, Literal, Optional, Tuple, Union
import numpy as np
from pydantic import Field, PositiveInt, confloat, fields, validator
from iris.io.errors import ProbeSchemaError
from iris.nodes.iris_response.probe_schemas.probe_schema_interface import ProbeSchema
class RegularProbeSchema(ProbeSchema):
"""Probe Schema for a regular Grid."""
class RegularProbeSchemaParameters(ProbeSchema.ProbeSchemaParameters):
"""RegularProbeSchema parameters."""
n_rows: int = Field(..., gt=1)
n_cols: int = Field(..., gt=1)
boundary_rho: List[confloat(ge=0.0, lt=1)]
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"],
List[confloat(ge=0.0, lt=1)],
]
image_shape: Optional[List[PositiveInt]]
@validator("boundary_rho", "boundary_phi")
def check_overlap(
cls: type,
v: Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]],
field: fields.ModelField,
) -> Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]:
"""Validate offsets to avoid overlap.
Args:
cls (type): Class type.
v (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]): Value to check.
field (fields.ModelField): Field descriptor.
Raises:
ProbeSchemaError: Raises warning that offsets are together too large.
Returns:
Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]]: The value for boundary_rho or boundary_phi respectively
"""
if isinstance(v, List):
if (v[0] + v[1]) >= 1:
raise ProbeSchemaError(
f"Offset for {field.name} on left and right corner must be a sum smaller 1, otherwise, offsets overlap."
)
return v
__parameters_type__ = RegularProbeSchemaParameters
def __init__(
self,
n_rows: int,
n_cols: int,
boundary_rho: List[float] = [0, 0.0625],
boundary_phi: Union[
Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]
] = "periodic-left",
image_shape: Optional[List[PositiveInt]] = None,
) -> None:
"""Assign parameters.
Args:
n_rows (int): Number of rows used, represents the number of different rho
values
n_cols (int): Number of columns used, represents the number of different
phi values
boundary_rho (List[float], optional): List with two values f1 and f2. The sampling goes from 0+f1 to 0-f2.
boundary_phi (Union[Literal["periodic-symmetric", "periodic-left"], List[confloat(ge=0.0, lt=1)]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one column to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last column are placed with an offset to the
borders, that is half of the spacing of the two columns
- 'periodic-left': the first column is at the border of the bottom of the image, while
the last column is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
image_shape (list, optional): list containing the desired image dimensions. If provided, the function will throw
a warning if interpolation happens, i.e. if a kernel would be placed in between two pixels. Defaults to None.
"""
super().__init__(
n_rows=n_rows,
n_cols=n_cols,
boundary_rho=boundary_rho,
boundary_phi=boundary_phi,
image_shape=image_shape,
)
def generate_schema(self) -> Tuple[np.ndarray, np.ndarray]:
"""Generate rhos and phis.
Return:
Tuple[np.ndarray, np.ndarray]: the rhos and phis.
"""
rho = np.linspace(
0 + self.params.boundary_rho[0], 1 - self.params.boundary_rho[1], self.params.n_rows, endpoint=True
)
if self.params.boundary_phi == "periodic-symmetric":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
phi = phi + (phi[1] - phi[0]) / 2
if self.params.boundary_phi == "periodic-left":
phi = np.linspace(0, 1, self.params.n_cols, endpoint=False)
if isinstance(self.params.boundary_phi, List):
phi = np.linspace(
0 + self.params.boundary_phi[0], 1 - self.params.boundary_phi[1], self.params.n_cols, endpoint=True
)
phis, rhos = np.meshgrid(phi, rho)
rhos = rhos.flatten()
phis = phis.flatten()
# if image_shap<fim_suffix>e provided: verify that values lie on pixel values
if self.params.image_shape is not None:
rhos_pixel_values = rhos * self.params.image_shape[0]
phis_pixel_values = phis * self.params.image_shape[1]
rho_pixel_values = np.logical_or(
np.less_equal(rhos_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), rhos_pixel_values % 1),
).all()
phi_pixel_values = np.logical_or(
np.less_equal(phis_pixel_values % 1, 10 ** (-10)),
np.less_equal(1 - 10 ** (-10), phis_pixel_values % 1),
).all()
if not rho_pixel_values:
raise ProbeSchemaError(
f"Choice for n_rows {self.params.n_rows} leads to interpolation errors, please change input variables"
)
if not phi_pixel_values:
raise ProbeSchemaError(f"Choice for n_cols {self.params.n_cols} leads to interpolation errors")
return rhos, phis
@staticmethod
def find_suitable_n_rows(
row_min: int,
row_max: int,
length: int,
boundary_condition: Union[
Literal["periodic-symmetric", "periodic-left"],
List[float],
] = "periodic_symmetric",
) -> List[int]:
"""Find proper spacing of rows/columns for given boundary conditions (i.e. image size, offset. etc).
Args:
row_min (int): Starting value for row count
row_max (int): End value for row count
length (int): Pixels in the respective dimension
boundary_condition (Union[Literal["periodic-symmetric", "periodic-left"], List[float]], optional): Boundary conditions for the probing
can either be periodic or non-periodic, if they are periodic, the distance
from one row to the next must be the same also for the boundaries.
Else, no conditions for the boundaries are required. Options are:
- 'periodic-symmetric': the first and the last row are placed with an offset to the
borders, that is half of the spacing of the two rows
- 'periodic-left': the first row is at the border of the bottom of the image, while
the last row is one spacing apart from the top of the image
- list with two values: in this case the an offset of value f1 and f2 is set on both ends, i.e. the
the sampling no longer goes from 0 to 1 ('no-offset') but instead from 0+f1 to 0-f2
Defaults to "periodic_symmetric".
Returns:
list: List of all number of rows that does not lead to interpolation errors
"""
suitable_values: List[int] = []
# loop through all values and validate whether they are suitable
for counter in range(row_min, row_max + 1):
if boundary_condition == "periodic-symmetric":
values = np.linspace(0, 1, counter, endpoint=False)
values = values + (values[1] - values[0]) / 2
if boundary_condition == "periodic-left":
values = np.linspace(0, 1, counter, endpoint=False)
if isinstance(boundary_condition, List):
values = np.linspace(0 + boundary_condition[0], 1 - boundary_condition[1], counter, endpoint=True)
pixel_values = values * length
pixel_values_modulo = pixel_values % 1
no_interpolation = np.less_equal(pixel_values_modulo, 10 ** (-10))
no_interpolation = np.logical_or(no_interpolation, np.less_equal(1 - 10 ** (-10), pixel_values_modulo))
no_interpolation = no_interpolation.all()
if no_interpolation:
suitable_values.append(counter)
return suitable_values
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/eye_properties_estimation/bisectors_method.py<fim_prefix>from typing import Tuple
import numpy as np
from pydantic import Field
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeCenters, GeometryPolygons
from iris.io.errors import EyeCentersEstimationError
class BisectorsMethod(Algorithm):
"""Implementation of eye's center estimation algorithm using bisectors method for finding a circle center.
This algorithm samples a given number of bisectors from the pupil and iris polygons, and averages their intersection
to produce the polygon center. This method is robust against noise in the polygons, making it a good choice for
non-perfect shapes. It is also robust to polygons missing parts of the circle arc, making it a good choice for
partially-occluded shapes.
LIMITATIONS:
The iris and pupil can be approximated to circles, when the user is properly gazing at the camera.
This requires that the cases of off-gaze have already been filtered out.
"""
class Parameters(Algorithm.Parameters):
"""Default Parameters for BisectorsMethod algorithm."""
num_bisectors: int = Field(..., gt=0)
min_distance_between_sector_points: float = Field(..., gt=0.0, lt=1.0)
max_iterations: int = Field(..., gt=0)
__parameters_type__ = Parameters
def __init__(
self,
num_bisectors: int = 100,
min_distance_between_sector_points: float = 0.75,
max_iterations: int = 50,
) -> None:
"""Assign parameters.
Args:
num_bisectors (int, optional): Number of bisectors.. Defaults to 100.
min_distance_between_sector_points (float, optional): Minimum distance between sectors expressed as a fractional value of a circular shape diameter. Defaults to 0.75.
max_iterations (int, optional): Max iterations for bisector search.. Defaults to 50.
"""
super().__init__(
num_bisectors=num_bisectors,
min_distance_between_sector_points=min_distance_between_sector_points,
max_iterations=max_iterations,
)
def run(self, geometries: GeometryPolygons) -> EyeCenters:
"""Estimate eye's iris and pupil centers.
Args:
geometries (GeometryPolygons): Geometry polygons.
Returns:
EyeCenters: Eye's centers object.
"""
pupil_center_x, pupil_center_y = self._find_center_coords(geometries.pupil_array, geometries.pupil_diameter)
iris_center_x, iris_center_y = self._find_center_coords(geometries.iris_array, geometries.iris_diameter)
return EyeCenters(pupil_x=pupil_center_x, pupil_y=pupil_center_y, iris_x=iris_center_x, iris_y=iris_center_y)
def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:
"""Find center coordinates of a polygon.
Args:
polygon (np.ndarray): np.ndarray.
diameter (float): diameter of the polygon.
Returns:
Tuple[float, float]: Tuple with the center location coordinates (x, y).
"""
min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter
first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(
polygon, min_distance_between_sector_points_in_px
)
return self._find_best_intersection(first_bisectors_point, second_bisectors_point)
def _calculate_perpendicular_bisectors(
self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.
A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.
Args:
polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.
min_distance_between_sector_points_in_px (float): Minimum distance between sector points.
Raises:
EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!
Returns:
Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.
"""
np.random.seed(142857)
bisectors_first_points = np.empty([0, 2])
bisectors_second_points = np.empty([0, 2])
for _ in range(self.params.max_iterations):
random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))
first_drawn_points = polygon[random_indices[:, 0]]
second_drawn_points = polygon[random_indices[:, 1]]
norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)
mask = norms > min_distance_between_sector_points_in_px
bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])
bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])
if len(bisectors_first_points) >= self.params.num_bisectors:
break
else:
raise EyeCentersEstimationError(
"Not able to find enough random pairs of points on the arc with a large enough distance!"
)
bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]
bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]
bisectors_center = (bisectors_first_points + bisectors_second_points) / 2
# Flip xs with ys and flip <fim_suffix>sign of on of them to create a 90deg rotation
inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)
inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]
# Add perpendicular vector to center and normalize
norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)
inv_bisectors_center_slope[:, 0] /= norm
inv_bisectors_center_slope[:, 1] /= norm
first_bisectors_point = bisectors_center - inv_bisectors_center_slope
second_bisectors_point = bisectors_center + inv_bisectors_center_slope
return first_bisectors_point, second_bisectors_point
def _find_best_intersection(self, fst_points: np.ndarray, sec_points: np.ndarray) -> Tuple[float, float]:
"""fst_points and sec_points are NxD arrays defining N lines. D is the dimension of the space.
This function returns the least squares intersection of the N lines from the system given by eq. 13 in
http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf.
Args:
fst_points (np.ndarray): First bisectors points.
sec_points (np.ndarray): Second bisectors points.
Returns:
Tuple[float, float]: Best intersection point.
Reference:
[1] http://cal.cs.illinois.edu/~johannes/research/LS_line_intersecpdf
"""
norm_bisectors = (sec_points - fst_points) / np.linalg.norm(sec_points - fst_points, axis=1)[:, np.newaxis]
# Generate the array of all projectors I - n*n.T
projections = np.eye(norm_bisectors.shape[1]) - norm_bisectors[:, :, np.newaxis] * norm_bisectors[:, np.newaxis]
# Generate R matrix and q vector
R = projections.sum(axis=0)
q = (projections @ fst_points[:, :, np.newaxis]).sum(axis=0)
# Solve the least squares problem for the intersection point p: Rp = q
p = np.linalg.lstsq(R, q, rcond=None)[0]
intersection_x, intersection_y = p
return intersection_x.item(), intersection_y.item()
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envelope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: cal<fim_suffix>culate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/iris_response/image_filters/gabor_filters.py<fim_prefix>from typing import Any, Dict, Tuple
import numpy as np
from pydantic import Field, conint, root_validator, validator
import iris.io.validators as pydantic_v
from iris.io.errors import ImageFilterError
from iris.nodes.iris_response.image_filters.image_filter_interface import ImageFilter
def upper_bound_Gabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bounds of Gabor filter parameters such as sigma_phi, sigma_rho and lambda_phi for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: Raised if 1) sigma_phi is greater than kernel_size[0], 2) sigma_rho is greater than kernel_size[1], 3) lambda_phi greater than kernel_size[0].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, sigma_phi, sigma_rho, lambda_phi = (
values["kernel_size"],
values["sigma_phi"],
values["sigma_rho"],
values["lambda_phi"],
)
if sigma_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: sigma_phi can not be greater than kernel_size[0].")
if sigma_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: sigma_rho can not be greater than kernel_size[1].")
if lambda_phi >= kernel_size[0]:
raise ImageFilterError("Invalid parameters: lambda_phi can not be greater than kernel_size[0].")
return values
def upper_bound_LogGabor_parameters(cls: type, values: Dict[str, Any]) -> Dict[str, Any]:
"""Check upper bound of LogGabor filter parameter lambda_rho for the given kernel_size.
Args:
cls (type): class type.
values (Dict[str, Any]): values to be checked.
Raises:
ImageFilterError: lambda_phi can not be greater than kernel_size[1].
Returns:
Dict[str, Any]: values of checked parameters.
"""
kernel_size, lambda_rho = values["kernel_size"], values["lambda_rho"]
if lambda_rho >= kernel_size[1]:
raise ImageFilterError("Invalid parameters: lambda_rho can not be greater than kernel_size[1].")
return values
def get_xy_mesh(kernel_size: Tuple[int, int]) -> Tuple[np.ndarray, np.ndarray]:
"""Get (x,y) meshgrids for a given kernel size.
Args:
kernel_size (Tuple[int, int]): Kernel width and height.
Returns:
Tuple[np.ndarray, np.ndarray]: meshgrid of (x, y) positions.
"""
ksize_phi_half = kernel_size[0] // 2
ksize_rho_half = kernel_size[1] // 2
y, x = np.meshgrid(
np.arange(-ksize_phi_half, ksize_phi_half + 1),
np.arange(-ksize_rho_half, ksize_rho_half + 1),
indexing="xy",
sparse=True,
)
return x, y
def get_radius(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""Get radius to the image center for a given array of relative positions (x,y).
Args:
x (np.ndarray): x position relative to the image center.
y (np.ndarray): y position relative to the image center.
Returns:
np.ndarray: radius to the image center.
"""
radius = np.sqrt(x**2 + y**2)
return radius
def rotate(x: np.ndarray, y: np.ndarray, angle: float) -> Tuple[np.ndarray, np.ndarray]:
"""Rotate a given array of relative positions (x,y) by a given angle.
Args:
x (np.ndarray): x position.
y (np.ndarray): y position.
angle (float): angle for rotation (in degrees).
Returns:
Tuple[np.ndarray, np.ndarray]: rotated x, y positions.
"""
cos_theta = np.cos(angle * np.pi / 180)
sin_theta = np.sin(angle * np.pi / 180)
rotx = x * cos_theta + y * sin_theta
roty = -x * sin_theta + y * cos_theta
return rotx, roty
def normalize_kernel_values(kernel_values: np.ndarray) -> np.ndarray:
"""Normalize the kernel values so that the square sum is 1.
Args:
kernel_values (np.ndarray): Kernel values (complex numbers).
Returns:
np.ndarray: normalized Kernel values.
"""
norm_real = np.linalg.norm(kernel_values.real, ord="fro")
if norm_real > 0:
kernel_values.real /= norm_real
norm_imag = np.linalg.norm(kernel_values.imag, ord="fro")
if norm_imag > 0:
kernel_values.imag /= norm_imag
return kernel_values
def convert_to_fixpoint_kernelvalues(kernel_values: np.ndarray) -> np.ndarray:
"""Convert the kernel values (both real and imaginary) to fix points.
Args:
kernel_values (np.ndarray): Kernel values.
Returns:
np.ndarray: fix-point Kernel values.
"""
if np.iscomplexobj(kernel_values):
kernel_values.real = np.round(kernel_values.real * 2**15)
kernel_values.imag = np.round(kernel_values.imag * 2**15)
else:
kernel_values = np.round(kernel_values * 2**15)
return kernel_values
class GaborFilter(ImageFilter):
"""Implementation of a 2D Gabor filter.
Reference:
[1] https://inc.ucsd.edu/mplab/75/media//gabor.pdf.
"""
class Parameters(ImageFilter.Parameters):
"""GaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., ge=1)
sigma_rho: float = Field(..., ge=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_phi: float = Field(..., ge=2)
dc_correction: bool
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_Gabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_phi: float,
dc_correction: bool = True,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): phi standard deviation.
sigma_rho (float): rho standard deviation.
theta_degrees (float): orientation of kernel in degrees.
lambda_phi (float): wavelength of the sinusoidal factor, lower value = thinner strip.
dc_correction (bool, optional): whether to enable DC correction. Defaults to True.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_phi=lambda_phi,
dc_correction=dc_correction,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D Gabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
rotx, roty = rotate(x, y, self.params.theta_degrees)
# calculate carrier and envel<fim_suffix>ope
carrier = 1j * 2 * np.pi / self.params.lambda_phi * rotx
envelope = -(rotx**2 / self.params.sigma_phi**2 + roty**2 / self.params.sigma_rho**2) / 2
# calculate kernel values
kernel_values = np.exp(envelope + carrier)
kernel_values /= 2 * np.pi * self.params.sigma_phi * self.params.sigma_rho
# apply DC correction
if self.params.dc_correction:
# Step 1: calculate mean value of Gabor Wavelet
g_mean = np.mean(np.real(kernel_values), axis=-1)
# Step 2: define gaussian offset
correction_term_mean = np.mean(envelope, axis=-1)
# Step 3: substract gaussian
kernel_values = kernel_values - (g_mean / correction_term_mean)[:, np.newaxis] * envelope
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
class LogGaborFilter(ImageFilter):
"""Implementation of a 2D LogGabor filter.
Reference:
[1] https://en.wikipedia.org/wiki/Log_Gabor_filter.
"""
class Parameters(ImageFilter.Parameters):
"""LogGaborFilter parameters."""
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]
sigma_phi: float = Field(..., gt=0, le=np.pi)
sigma_rho: float = Field(..., gt=0.1, le=1)
theta_degrees: float = Field(..., ge=0, lt=360)
lambda_rho: float = Field(..., gt=2)
to_fixpoints: bool
_upper_bound = root_validator(pre=True, allow_reuse=True)(upper_bound_LogGabor_parameters)
_is_odd = validator("kernel_size", allow_reuse=True, each_item=True)(pydantic_v.is_odd)
__parameters_type__ = Parameters
def __init__(
self,
*,
kernel_size: Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)],
sigma_phi: float,
sigma_rho: float,
theta_degrees: float,
lambda_rho: float,
to_fixpoints: bool = False,
) -> None:
"""Assign parameters.
Args:
kernel_size (Tuple[conint(gt=3, lt=99), conint(gt=3, lt=99)]): Kernel width and height.
sigma_phi (float): bandwidth in phi (frequency domain).
sigma_rho (float): bandwidth in rho (frequency domain).
theta_degrees (float): orientation of filter in degrees.
lambda_rho (float): wavelength in rho.
to_fixpoints (bool, optional): whether to convert kernel values to fixpoints. Defaults to False.
"""
super().__init__(
kernel_size=kernel_size,
sigma_phi=sigma_phi,
sigma_rho=sigma_rho,
theta_degrees=theta_degrees,
lambda_rho=lambda_rho,
to_fixpoints=to_fixpoints,
)
def compute_kernel_values(self) -> np.ndarray:
"""Compute 2D LogGabor filter kernel values.
Returns:
np.ndarray: Kernel values.
"""
# convert to polar coordinates
x, y = get_xy_mesh(self.params.kernel_size)
radius = get_radius(x, y)
# remove 0 radius value in the center
ksize_phi_half = self.params.kernel_size[0] // 2
ksize_rho_half = self.params.kernel_size[1] // 2
radius[ksize_rho_half][ksize_phi_half] = 1
# get angular distance
[rotx, roty] = rotate(x, y, self.params.theta_degrees)
dtheta = np.arctan2(roty, rotx)
# calculate envelope and orientation
envelope = np.exp(
-0.5
* np.log2(radius * self.params.lambda_rho / self.params.kernel_size[1]) ** 2
/ self.params.sigma_rho**2
)
envelope[ksize_rho_half][ksize_phi_half] = 0
orientation = np.exp(-0.5 * dtheta**2 / self.params.sigma_phi**2)
# calculate kernel values
kernel_values = envelope * orientation
kernel_values = np.fft.fftshift(np.fft.ifft2(np.fft.ifftshift(kernel_values)))
# normalize kernel values
kernel_values = normalize_kernel_values(kernel_values)
if self.params.to_fixpoints:
kernel_values = convert_to_fixpoint_kernelvalues(kernel_values)
return kernel_values
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/normalization/perspective_normalization.py<fim_prefix>from typing import Collection, List, Tuple
import cv2
import numpy as np
from pydantic import Field, validator
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeOrientation, GeometryPolygons, IRImage, NoiseMask, NormalizedIris
from iris.io.errors import NormalizationError
from iris.nodes.normalization.common import correct_orientation, generate_iris_mask, interpolate_pixel_intensity
class PerspectiveNormalization(Algorithm):
"""Implementation of a normalization algorithm which uses perspective transformation to map image pixels.
Algorithm steps:
1) Create a grid of trapezoids around iris in original image based on following algorithm parameters: res_in_phi, res_in_r, intermediate_radiuses.
2) Create a grid of corresponding to each trapezoid rectangles in normalized image.
3) For each corresponding trapezoid, rectangle pair compute perspective matrix to estimate normalized image pixel location in an original image location.
4) Map each normalized image pixel to original image pixel based on estimated perspective matrix and perform bilinear interpolation if necessary.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for PerspectiveNormalization."""
res_in_phi: int = Field(..., gt=0)
res_in_r: int = Field(..., gt=0)
skip_boundary_points: int = Field(..., gt=0)
intermediate_radiuses: Collection[float]
oversat_threshold: int = Field(..., gt=0)
@validator("intermediate_radiuses")
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
__parameters_type__ = Parameters
def __init__(
self,
res_in_phi: int = 512,
res_in_r: int = 128,
skip_boundary_points: int = 1,
intermediate_radiuses: Collection[float] = np.linspace(0.0, 1.0, 8),
oversat_threshold: int = 254,
) -> None:
"""Assign parameters.
Args:
res_in_phi (int): Normalized image phi resolution. Defaults to 512.
res_in_r (int): Normalized image r resolution. Defaults to 128.
skip_boundary_points (int, optional): Take every nth point from estimated boundaries when generating correspondences.
Defaults to 1.
intermediate_radiuses (t.Iterable[float], optional): Intermediate rings radiuses used to generate additional points for estimating transformations.
Defaults to np.linspace(0.0, 1.0, 8).
oversat_threshold (int, optional): threshold for masking over-satuated pixels. Defaults to 254.
"""
super().__init__(
res_in_phi=res_in_phi,
res_in_r=res_in_r,
skip_boundary_points=skip_boundary_points,
intermediate_radiuses=intermediate_radiuses,
oversat_threshold=oversat_threshold,
)
def run(
self,
image: IRImage,
noise_mask: NoiseMask,
extrapolated_contours: GeometryPolygons,
eye_orientation: EyeOrientation,
) -> NormalizedIris:
"""Normalize iris using perspective transformation estimated for every region of an image separately.
Args:
image (IRImage): Input image to normalize.
noise_mask (NoiseMask): Noise mask.
extrapolated_contours (GeometryPolygons): Extrapolated contours.
eye_orientation (EyeOrientation): Eye orientation angle.
Returns:
NormalizedIris: NormalizedIris object containing normalized image and iris mask.
"""
if len(extrapolated_contours.pupil_array) != len(extrapolated_contours.iris_array):
raise NormalizationError("Extrapolated amount of iris and pupil points must be the same.")
pupil_points, iris_points = correct_orientation(
extrapolated_contours.pupil_array,
extrapolated_contours.iris_array,
eye_orientation.angle,
)
iris_mask = generate_iris_mask(extrapolated_contours, noise_mask.mask)
iris_mask[image.img_data >= self.params.oversat_threshold] = False
src_points, dst_points = self._generate_correspondences(pupil_points, iris_points)
normalized_iris = NormalizedIris(
normalized_image=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=np.float32),
normalized_mask=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=bool),
)
for angle_point_idx in range(src_points.shape[1] - 1):
for ring_idx in range(src_points.shape[0] - 1):
current_src, current_dst = self._correspondence_rois_coords(
angle_idx=angle_point_idx,
ring_idx=ring_idx,
src_points=src_points,
dst_points=dst_points,
)
xmin, ymin, xmax, ymax = self._bbox_coords(current_dst)
normalized_image_roi, normalized_mask_roi = self._normalize_roi(
original_image=image.img_data,
iris_mask=iris_mask,
src_points=current_src.astype(np.float32),
dst_points=current_dst.astype(np.float32),
normalize_roi_output_shape=(ymax - ymin, xmax - xmin),
)
normalized_iris.normalized_image[ymin:ymax, xmin:xmax] = normalized_image_roi
normalized_iris.normalized_mask[ymin:ymax, xmin:xmax] = normalized_mask_roi
return normalized_iris
def _generate_correspondences(
self, pupil_points: np.ndarray, iris_points: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate correspondences between points in original image and normalized image.
Args:
pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with generated correspondences.
"""
pupil_points = pupil_points[:: self.params.skip_boundary_points]
iris_points = iris_points[:: self.params.skip_boundary_points]
src_points = []
for radius in self.params.intermediate_radiuses:
ring = pupil_points + radius * (iris_points - pupil_points)
ring = np.vstack([ring, ring[0]])
src_points.append(ring)
src_points = np.array(src_points)
num_rings, num_ring_points = src_points.shape[:2]
dst_xs, dst_ys = np.meshgrid(
np.linspace(0, self.params.res_in_phi, num_ring_points).astype(int),
np.linspace(0, self.params.res_in_r, num_rings).astype(int),
)
dst_points = np.array([dst_xs, dst_ys]).transpose((1, 2, 0))
return src_points, dst_points
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
t<fim_suffix>ry:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
except IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
def _bbox_coords(self, norm_dst_points: np.ndarray) -> Tuple[int, int, int, int]:
"""Extract the bounding box of currently processed normalized image ROI.
Args:
norm_dst_points (np.ndarray): Normalized image ROI coordinates.
Returns:
Tuple[int, int, int, int]: Bounding box coordinates in form (xmin, ymin, xmax, ymax).
"""
xmin, ymin = norm_dst_points[0].astype(int)
xmax, ymax = norm_dst_points[-1].astype(int)
return (xmin, ymin, xmax, ymax)
def _correspondence_rois_coords(
self,
angle_idx: int,
ring_idx: int,
src_points: np.ndarray,
dst_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate a single correspondence ROIs between original image and normalized one based on angle index and ring index.
Args:
angle_idx (int): Boundary point angle index.
ring_idx (int): Intermediate ring index.
src_points (np.ndarray): All mapping points from an original image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
dst_points (np.ndarray): All mapping points from an normalized image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with extracted from src_points and dst_points ROIs.
"""
src_roi = src_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
dst_roi = dst_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
return src_roi.reshape(4, 2), dst_roi.reshape(4, 2)
@staticmethod
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
@staticmethod
def homogeneous2cartesian(points: np.ndarray) -> np.ndarray:
"""Convert points in homogeneous coordinates to cartesian coordinates.
Args:
points (np.ndarray): Points in homogeneous coordinates. Array should be in format: [[x values], [y values], [perspective scale values]].
Returns:
np.ndarray: Points in cartesian coordinates. Returned array will have format: [[x values], [y values]].
"""
points /= points[-1]
points = points[:2]
return points
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try<fim_suffix>:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
t<fim_suffix>ry:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except <fim_suffix>IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/normalization/perspective_normalization.py<fim_prefix>from typing import Collection, List, Tuple
import cv2
import numpy as np
from pydantic import Field, validator
from iris.io.class_configs import Algorithm
from iris.io.dataclasses import EyeOrientation, GeometryPolygons, IRImage, NoiseMask, NormalizedIris
from iris.io.errors import NormalizationError
from iris.nodes.normalization.common import correct_orientation, generate_iris_mask, interpolate_pixel_intensity
class PerspectiveNormalization(Algorithm):
"""Implementation of a normalization algorithm which uses perspective transformation to map image pixels.
Algorithm steps:
1) Create a grid of trapezoids around iris in original image based on following algorithm parameters: res_in_phi, res_in_r, intermediate_radiuses.
2) Create a grid of corresponding to each trapezoid rectangles in normalized image.
3) For each corresponding trapezoid, rectangle pair compute perspective matrix to estimate normalized image pixel location in an original image location.
4) Map each normalized image pixel to original image pixel based on estimated perspective matrix and perform bilinear interpolation if necessary.
"""
class Parameters(Algorithm.Parameters):
"""Parameters class for PerspectiveNormalization."""
res_in_phi: int = Field(..., gt=0)
res_in_r: int = Field(..., gt=0)
skip_boundary_points: int = Field(..., gt=0)
intermediate_radiuses: Collection[float]
oversat_threshold: int = Field(..., gt=0)
@validator("intermediate_radiuses")
def check_intermediate_radiuses(cls: type, v: Collection[float]) -> Collection[float]:
"""Check intermediate_radiuses parameter.
Args:
cls (type): PerspectiveNormalization.Parameters class.
v (Collection[float]): Variable value to check.
Raises:
NormalizationError: Raised if number of radiuses is invalid or min value is less then 0.0 or greater than 1.0.
Returns:
Collection[float]: intermediate_radiuses value passed for further processing.
"""
if len(v) < 2:
raise NormalizationError(f"Invalid number of intermediate_radiuses: {len(v)}.")
if min(v) < 0.0:
raise NormalizationError(f"Invalid min value of intermediate_radiuses: {min(v)}.")
if max(v) > 1.0:
raise NormalizationError(f"Invalid max value of intermediate_radiuses: {max(v)}.")
return v
__parameters_type__ = Parameters
def __init__(
self,
res_in_phi: int = 512,
res_in_r: int = 128,
skip_boundary_points: int = 1,
intermediate_radiuses: Collection[float] = np.linspace(0.0, 1.0, 8),
oversat_threshold: int = 254,
) -> None:
"""Assign parameters.
Args:
res_in_phi (int): Normalized image phi resolution. Defaults to 512.
res_in_r (int): Normalized image r resolution. Defaults to 128.
skip_boundary_points (int, optional): Take every nth point from estimated boundaries when generating correspondences.
Defaults to 1.
intermediate_radiuses (t.Iterable[float], optional): Intermediate rings radiuses used to generate additional points for estimating transformations.
Defaults to np.linspace(0.0, 1.0, 8).
oversat_threshold (int, optional): threshold for masking over-satuated pixels. Defaults to 254.
"""
super().__init__(
res_in_phi=res_in_phi,
res_in_r=res_in_r,
skip_boundary_points=skip_boundary_points,
intermediate_radiuses=intermediate_radiuses,
oversat_threshold=oversat_threshold,
)
def run(
self,
image: IRImage,
noise_mask: NoiseMask,
extrapolated_contours: GeometryPolygons,
eye_orientation: EyeOrientation,
) -> NormalizedIris:
"""Normalize iris using perspective transformation estimated for every region of an image separately.
Args:
image (IRImage): Input image to normalize.
noise_mask (NoiseMask): Noise mask.
extrapolated_contours (GeometryPolygons): Extrapolated contours.
eye_orientation (EyeOrientation): Eye orientation angle.
Returns:
NormalizedIris: NormalizedIris object containing normalized image and iris mask.
"""
if len(extrapolated_contours.pupil_array) != len(extrapolated_contours.iris_array):
raise NormalizationError("Extrapolated amount of iris and pupil points must be the same.")
pupil_points, iris_points = correct_orientation(
extrapolated_contours.pupil_array,
extrapolated_contours.iris_array,
eye_orientation.angle,
)
iris_mask = generate_iris_mask(extrapolated_contours, noise_mask.mask)
iris_mask[image.img_data >= self.params.oversat_threshold] = False
src_points, dst_points = self._generate_correspondences(pupil_points, iris_points)
normalized_iris = NormalizedIris(
normalized_image=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=np.float32),
normalized_mask=np.zeros((self.params.res_in_r, self.params.res_in_phi), dtype=bool),
)
for angle_point_idx in range(src_points.shape[1] - 1):
for ring_idx in range(src_points.shape[0] - 1):
current_src, current_dst = self._correspondence_rois_coords(
angle_idx=angle_point_idx,
ring_idx=ring_idx,
src_points=src_points,
dst_points=dst_points,
)
xmin, ymin, xmax, ymax = self._bbox_coords(current_dst)
normalized_image_roi, normalized_mask_roi = self._normalize_roi(
original_image=image.img_data,
iris_mask=iris_mask,
src_points=current_src.astype(np.float32),
dst_points=current_dst.astype(np.float32),
normalize_roi_output_shape=(ymax - ymin, xmax - xmin),
)
normalized_iris.normalized_image[ymin:ymax, xmin:xmax] = normalized_image_roi
normalized_iris.normalized_mask[ymin:ymax, xmin:xmax] = normalized_mask_roi
return normalized_iris
def _generate_correspondences(
self, pupil_points: np.ndarray, iris_points: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate correspondences between points in original image and normalized image.
Args:
pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points = 512, xy_coords = 2).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with generated correspondences.
"""
pupil_points = pupil_points[:: self.params.skip_boundary_points]
iris_points = iris_points[:: self.params.skip_boundary_points]
src_points = []
for radius in self.params.intermediate_radiuses:
ring = pupil_points + radius * (iris_points - pupil_points)
ring = np.vstack([ring, ring[0]])
src_points.append(ring)
src_points = np.array(src_points)
num_rings, num_ring_points = src_points.shape[:2]
dst_xs, dst_ys = np.meshgrid(
np.linspace(0, self.params.res_in_phi, num_ring_points).astype(int),
np.linspace(0, self.params.res_in_r, num_rings).astype(int),
)
dst_points = np.array([dst_xs, dst_ys]).transpose((1, 2, 0))
return src_points, dst_points
def _normalize_roi(
self,
original_image: np.ndarray,
iris_mask: np.ndarray,
src_points: np.ndarray,
dst_points: np.ndarray,
normalize_roi_output_shape: Tuple[float, float],
) -> Tuple[np.ndarray, np.ndarray]:
"""Normalize a single ROI of an image.
Args:
original_image (np.ndarray): Entire input image to normalize.
iris_mask (np.ndarray): Iris class segmentation mask.
src_points (np.ndarray): ROI's original input image points.
dst_points (np.ndarray): ROI's normalized image points.
normalize_roi_output_shape (t.Tuple[float, float]): Output shape of normalized ROI.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with normalized image and mask ROIs.
"""
xmin, ymin, xmax, ymax = self._bbox_coords(dst_points)
normalize_image_xs = np.arange(xmin, xmax)
normalize_image_ys = np.arange(ymin, ymax)
normalize_image_points = np.meshgrid(normalize_image_xs, normalize_image_ys)
normalize_image_points = self.cartesian2homogeneous(normalize_image_points)
perspective_mat = cv2.getPerspectiveTransform(dst_points, src_points)
mapped_points = np.matmul(perspective_mat, normalize_image_points)
mapped_points = self.homogeneous2cartesian(mapped_points)
normalized_image_roi = np.zeros(normalize_roi_output_shape, dtype=np.float32)
normalized_mask_roi = np.zeros(normalize_roi_output_shape, dtype=bool)
for image_xy, normalized_xy in zip(mapped_points.T, normalize_image_points.T[..., :2]):
norm_x, norm_y = normalized_xy.astype(int)
shifted_y, shifted_x = norm_y - ymin, norm_x - xmin
normalized_image_roi[shifted_y, shifted_x] = interpolate_pixel_intensity(
original_image, pixel_coords=image_xy
)
try:
img_x, img_y = map(int, image_xy)
normalized_mask_roi[shifted_y, shifted_x] = iris_mask[img_y, img_x]
exce<fim_suffix>pt IndexError:
normalized_mask_roi[shifted_y, shifted_x] = False
return normalized_image_roi / 255.0, normalized_mask_roi
def _bbox_coords(self, norm_dst_points: np.ndarray) -> Tuple[int, int, int, int]:
"""Extract the bounding box of currently processed normalized image ROI.
Args:
norm_dst_points (np.ndarray): Normalized image ROI coordinates.
Returns:
Tuple[int, int, int, int]: Bounding box coordinates in form (xmin, ymin, xmax, ymax).
"""
xmin, ymin = norm_dst_points[0].astype(int)
xmax, ymax = norm_dst_points[-1].astype(int)
return (xmin, ymin, xmax, ymax)
def _correspondence_rois_coords(
self,
angle_idx: int,
ring_idx: int,
src_points: np.ndarray,
dst_points: np.ndarray,
) -> Tuple[np.ndarray, np.ndarray]:
"""Generate a single correspondence ROIs between original image and normalized one based on angle index and ring index.
Args:
angle_idx (int): Boundary point angle index.
ring_idx (int): Intermediate ring index.
src_points (np.ndarray): All mapping points from an original image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
dst_points (np.ndarray): All mapping points from an normalized image.
NumPy array of shape (
num_intermediate_rings = self.intermediate_radiuses,
num_boundary_points = 512 // self.skip_boundary_points,
xy_coords = 2
).
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with extracted from src_points and dst_points ROIs.
"""
src_roi = src_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
dst_roi = dst_points[ring_idx : ring_idx + 2, angle_idx : angle_idx + 2]
return src_roi.reshape(4, 2), dst_roi.reshape(4, 2)
@staticmethod
def cartesian2homogeneous(points: List[np.ndarray]) -> np.ndarray:
"""Convert points in cartesian coordinates to homogeneous coordinates.
Args:
points (List[np.ndarray]): Points in cartesian coordinates. Array should be in format: [[x values], [y values]].
Returns:
np.ndarray: Points in homogeneous coordinates. Returned array will have format: [[x values], [y values], [1 ... 1]].
"""
x_coords, y_coords = points
x_coords = x_coords.reshape(-1, 1)
y_coords = y_coords.reshape(-1, 1)
homogeneous_coords = np.hstack([x_coords, y_coords, np.ones((len(x_coords), 1))])
return homogeneous_coords.T
@staticmethod
def homogeneous2cartesian(points: np.ndarray) -> np.ndarray:
"""Convert points in homogeneous coordinates to cartesian coordinates.
Args:
points (np.ndarray): Points in homogeneous coordinates. Array should be in format: [[x values], [y values], [perspective scale values]].
Returns:
np.ndarray: Points in cartesian coordinates. Returned array will have format: [[x values], [y values]].
"""
points /= points[-1]
points = points[:2]
return points
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexEr<fim_suffix>ror:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def ge<fim_suffix>t_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolation_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle> | null | METHOD | complete_current_header_empty_completion |
<filename>open-iris/src/iris/nodes/normalization/common.py<fim_prefix>from typing import Tuple
import numpy as np
from pydantic import NonNegativeInt
from iris.io.dataclasses import GeometryPolygons
from iris.utils import common
def generate_iris_mask(extrapolated_contours: GeometryPolygons, noise_mask: np.ndarray) -> np.ndarray:
"""Generate iris mask by first finding the intersection region between extrapolated iris contours and eyeball contours. Then remove from the outputted mask those pixels for which noise_mask is equal to True.
Args:
extrapolated_contours (GeometryPolygons): Iris polygon vertices.
noise_mask (np.ndarray): Noise mask.
Returns:
np.ndarray: Iris mask.
"""
img_h, img_w = noise_mask.shape[:2]
iris_mask = common.contour_to_mask(extrapolated_contours.iris_array, (img_w, img_h))
eyeball_mask = common.contour_to_mask(extrapolated_contours.eyeball_array, (img_w, img_h))
iris_mask = iris_mask & eyeball_mask
iris_mask = ~(iris_mask & noise_mask) & iris_mask
return iris_mask
def correct_orientation(
pupil_points: np.ndarray, iris_points: np.ndarray, eye_orientation: float
) -> Tuple[np.ndarray, np.ndarray]:
"""Correct orientation by changing the starting angle in pupil and iris points' arrays.
Args:
pupil_points (np.ndarray): Pupil boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
iris_points (np.ndarray): Iris boundary points' array. NumPy array of shape (num_points = 512, xy_coords = 2).
eye_orientation (float): Eye orientation angle in radians.
Returns:
Tuple[np.ndarray, np.ndarray]: Tuple with rotated based on eye_orientation angle boundary points (pupil_points, iris_points).
"""
orientation_angle = np.degrees(eye_orientation)
num_rotations = -round(orientation_angle * len(pupil_points) / 360.0)
pupil_points = np.roll(pupil_points, num_rotations, axis=0)
iris_points = np.roll(iris_points, num_rotations, axis=0)
return pupil_points, iris_points
def getgrids(res_in_r: NonNegativeInt, p2i_ratio: NonNegativeInt) -> np.ndarray:
"""Generate radius grids for nonlinear normalization based on p2i_ratio (pupil_to_iris ratio).
Args:
res_in_r (NonNegativeInt): Normalized image r resolution.
p2i_ratio (NonNegativeInt): pupil_to_iris ratio, range in [0,100]
Returns:
np.ndarray: nonlinear sampling grids for normalization
"""
p = [np.square(x) for x in np.arange(28, max(74 - p2i_ratio, p2i_ratio - 14), 1)]
q = p - p[0]
q = q / q[-1]
grids = np.interp(np.linspace(0, 1.0, res_in_r + 1), np.linspace(0, 1.0, len(q)), q)
return grids[0:-1] + np.diff(grids) / 2
def interpolate_pixel_intensity(image: np.ndarray, pixel_coords: Tuple[float, float]) -> float:
"""Perform bilinear interpolation to estimate pixel intensity in a given location.
Args:
image (np.ndarray): Original, not normalized image.
pixel_coords (Tuple[float, float]): Pixel coordinates.
Returns:
float: Interpolated pixel intensity.
Reference:
[1] https://en.wikipedia.org/wiki/Bilinear_interpolation
"""
def get_pixel_intensity(image: np.ndarray, pixel_x: float, pixel_y: float) -> float:
"""Get the intensity value of a pixel from an intensity image.
Args:
image (np.ndarray): Intensity image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
float: Pixel value.
"""
try:
return image[int(pixel_y), int(pixel_x)]
except IndexError:
return 0.0
def get_interpolatio<fim_suffix>n_points_coords(
image: np.ndarray, pixel_x: float, pixel_y: float
) -> Tuple[float, float, float, float]:
"""Extract interpolation points coordinates.
Args:
image (np.ndarray): Original, not normalized image.
pixel_x (float): Pixel x coordinate.
pixel_y (float): Pixel y coordinate.
Returns:
Tuple[float, float, float, float]: Tuple with interpolation points coordinates in a format (xmin, ymin, xmax, ymax).
"""
xmin, ymin = np.floor(pixel_x), np.floor(pixel_y)
xmax, ymax = np.ceil(pixel_x), np.ceil(pixel_y)
img_h, img_w = image.shape[:2]
if xmin == xmax and not xmax == img_w - 1:
xmax += 1
if xmin == xmax and xmax == img_w - 1:
xmin -= 1
if ymin == ymax and not ymax == img_h - 1:
ymax += 1
if ymin == ymax and ymax == img_h - 1:
ymin -= 1
return xmin, ymin, xmax, ymax
pixel_x, pixel_y = pixel_coords
xmin, ymin, xmax, ymax = get_interpolation_points_coords(image, pixel_x=pixel_x, pixel_y=pixel_y)
lower_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymax)
lower_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymax)
upper_left_pixel_intensity = get_pixel_intensity(image, pixel_x=xmin, pixel_y=ymin)
upper_right_pixel_intensity = get_pixel_intensity(image, pixel_x=xmax, pixel_y=ymin)
xs_differences = np.array([xmax - pixel_x, pixel_x - xmin])
neighboring_pixel_intensities = np.array(
[
[lower_left_pixel_intensity, upper_left_pixel_intensity],
[lower_right_pixel_intensity, upper_right_pixel_intensity],
]
)
ys_differences = np.array([[pixel_y - ymin], [ymax - pixel_y]])
pixel_intensity = np.matmul(np.matmul(xs_differences, neighboring_pixel_intensities), ys_differences)
return pixel_intensity.item()
<fim_middle> | null | METHOD | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements c<fim_suffix>ontain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""D<fim_suffix>ecode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarra<fim_suffix>y of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigr<fim_suffix>am matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating w<fim_suffix>hich elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payl<fim_suffix>oad MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Ru<fim_suffix>n edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/utils/row_viewable_matrix.py<fim_prefix>import numbers
import numpy as np
from searcharray.utils.mat_set import SparseMatSet
from typing import Optional, Union, Dict, List
def rowwise_eq(mat: SparseMatSet, other: SparseMatSet) -> Union[bool, np.ndarray]:
"""Check equals on a<fim_suffix> row-by-row basis."""
if len(mat) != len(other):
return False
row_eq = np.zeros(mat.shape[0], dtype=np.dtype('bool'))
for row_idx in range(len(mat)):
if np.all(mat[row_idx] == other[row_idx]):
row_eq[row_idx] = True
return row_eq
class RowViewableMatrix:
"""A slicable matrix that can return views without copying."""
def __init__(self, mat: SparseMatSet, rows: Optional[np.ndarray] = None, subset=False):
self.mat = mat
self.col_cache: Dict[int, np.ndarray] = {}
self.cols_cached: List[int] = []
if rows is None:
self.rows = np.arange(self.mat.shape[0])
elif isinstance(rows, numbers.Integral):
self.rows = np.array([rows])
else:
self.rows = rows
self.subset = subset
def slice(self, keys):
return RowViewableMatrix(self.mat, self.rows[keys], subset=True)
def __setitem__(self, keys, values):
# Replace nan with 0
self.col_cache = {}
self.cols_cached = []
actual_keys = self.rows[keys]
if isinstance(actual_keys, numbers.Integral):
self.mat[actual_keys] = values
elif len(actual_keys) > 0:
self.mat[actual_keys] = values
def copy_row_at(self, row):
return self.mat[self.rows[row]]
def copy(self):
return RowViewableMatrix(self.mat.copy(), self.rows.copy(), subset=self.subset)
def cols_per_row(self):
return self.mat[self.rows].num_cols_per_row()
def copy_col_at(self, col):
if col not in self.col_cache:
self.col_cache[col] = self.mat[self.rows, col]
self.cols_cached.append(col)
if len(self.cols_cached) > 10:
del self.col_cache[self.cols_cached.pop(0)]
return self.col_cache[col]
def __getitem__(self, key):
if isinstance(key, numbers.Integral):
return self.copy_row_at(key)
else:
return self.slice(key)
@property
def nbytes(self):
return self.mat.nbytes + \
self.rows.nbytes
@property
def shape(self):
return (len(self.rows), self.mat.shape[1])
def resize(self, shape):
self.mat.ensure_capacity(shape[0] - 1)
def __len__(self):
return len(self.rows)
def __repr__(self):
return f"RowViewableMatrix({repr(self.mat)}, {repr(self.rows)})"
def __str__(self):
return f"RowViewableMatrix({str(self.mat)}, {str(self.rows)})"
def __eq__(self, other):
return rowwise_eq(self.mat[self.rows], other.mat[other.rows])
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boo<fim_suffix>sts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Sco<fim_suffix>re each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find a<fim_suffix>djacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# W<fim_suffix>hen other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive <fim_suffix>toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwis<fim_suffix>e, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # cei<fim_suffix>ling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we sho<fim_suffix>uld?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/similarity.py<fim_prefix>"""Similarity functions given term stats."""
from typing import Protocol
import numpy as np
class Similarity(Protocol):
"""Similarity function protocol."""
def __call__(self, term_freqs: np.ndarray, doc_freqs: np.ndarray, doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate similarity scores."""
...
def bm25_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity function, as in Lucene 9."""
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freq<fim_suffix>s
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = term_freqs / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
def bm25_legacy_similarity(k1: float = 1.2, b: float = 0.75) -> Similarity:
"""BM25 similarity prior to LUCENE-8563 with k1 + 1 in numerator."""
# (freq * (k1 + 1)) / (freq + k1 * (1 - b + b * fieldLength / avgFieldLength))
def bm25(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate BM25 scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf
idf = np.log(1 + (num_docs - sum_dfs + 0.5) / (sum_dfs + 0.5))
# Calculate tf
tf = (term_freqs * (k1 + 1)) / (term_freqs + k1 * (1 - b + b * doc_lens / avg_doc_lens))
return idf * tf
return bm25
def classic_similarity() -> Similarity:
"""Classic Lucene TF-IDF similarity function."""
def classic(term_freqs: np.ndarray, doc_freqs: np.ndarray,
doc_lens: np.ndarray,
avg_doc_lens: int, num_docs: int) -> np.ndarray:
"""Calculate classic TF-IDF scores."""
# Sum doc freqs
sum_dfs = np.sum(doc_freqs, axis=0)
# Calculate idf as log((docCount+1)/(docFreq+1)) + 1
idf = np.log((num_docs + 1) / (sum_dfs + 1)) + 1
length_norm = 1.0 / np.sqrt(doc_lens)
# Calculate tf
tf = np.sqrt(term_freqs)
return idf * tf * length_norm
return classic
default_bm25 = bm25_similarity()
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Co<fim_suffix>unt number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two str<fim_suffix>ategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# U<fim_suffix>NFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle> | null | LINE_COMMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set<fim_suffix>(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance<fim_suffix>(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if<fim_suffix> isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str)<fim_suffix>:
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses<fim_suffix> <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is N<fim_suffix>one:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_ma<fim_suffix>t.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
i<fim_suffix>f len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
i<fim_suffix>f '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if ph<fim_suffix>rase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | IF | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool<fim_suffix>)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.<fim_suffix>zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mas<fim_suffix>k & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
ret<fim_suffix>urn matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_score<fim_suffix>s = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_ter<fim_suffix>m_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lh<fim_suffix>s_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encod<fim_suffix>ed & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.a<fim_suffix>rray([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
retu<fim_suffix>rn decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | STATEMENT | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fi<fim_suffix>elds.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in que<fim_suffix>ry_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.ite<fim_suffix>ms():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for po<fim_suffix>sn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s <fim_suffix>in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_<fim_suffix>search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/utils/roaringish.py<fim_prefix>"""Roaring-ish bit array for storing sorted integers in numpy array.
See - https://softwaredoug.com/blog/2024/01/21/search-array-phrase-algorithm
"""
import numpy as np
import sortednp as snp
import logging
import numbers
from typing import Optional, Tuple, List, Union
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
DEFAULT_KEY_MASK = np.uint64(0xFFFFFFF000000000)
DEFAULT_KEY_BITS = np.uint64(28)
DEFAULT_PAYLOAD_MSB_MASK = np.uint64(0x0000000FFFFC0000)
DEFAULT_PAYLOAD_MSB_BITS = np.uint64(18)
DEFAULT_PAYLOAD_LSB_MASK = np.uint64(0x000000000003FFFF)
DEFAULT_PAYLOAD_LSB_BITS = np.uint64(18)
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
_algorithm = snp.GALLOPING_SEARCH
def n_msb_mask(n: np.uint64) -> np.uint64:
"""Return the n most significant bits of num."""
return np.uint64(~(np.uint64(_1 << (_64 - n))) + _1)
def sorted_unique(arr: np.ndarray) -> np.ndarray:
return snp.intersect(arr, arr, duplicates=snp.DROP)
class RoaringishEncoder:
"""An encoder for key->integer sets as a numpy array.
Each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs | payload
(different number of MSBs / payload bits can be specified)
"""
def __init__(self, key_bits: np.uint64 = DEFAULT_KEY_BITS):
payload_bits = _64 - key_bits
self.payload_msb_bits = payload_bits // _2
self.payload_lsb_bits = np.uint64(payload_bits - self.payload_msb_bits)
self.key_bits = key_bits
assert self.key_bits.dtype == np.uint64
# key bits MSB of 64 bits
self.key_mask = n_msb_mask(key_bits)
self.payload_msb_mask = n_msb_mask(np.uint64(self.payload_msb_bits + key_bits)) & ~self.key_mask
assert self.payload_msb_bits.dtype == np.uint64, f"MSB bits dtype was {self.payload_msb_bits.dtype}"
assert self.payload_msb_mask.dtype == np.uint64, f"MSB mask dtype was {self.payload_msb_mask.dtype}"
self.payload_lsb_mask = (_1 << self.payload_lsb_bits) - np.uint64(1)
assert self.payload_lsb_bits.dtype == np.uint64, f"LSB bits dtype was {self.payload_lsb_bits.dtype}"
assert self.payload_lsb_mask.dtype == np.uint64, f"LSB mask dtype was {self.payload_lsb_mask.dtype}"
if key_bits == DEFAULT_KEY_BITS:
assert self.key_mask == DEFAULT_KEY_MASK
assert self.payload_msb_mask == DEFAULT_PAYLOAD_MSB_MASK
assert self.payload_lsb_mask == DEFAULT_PAYLOAD_LSB_MASK
self.max_payload = np.uint64(2**self.payload_lsb_bits - 1)
def validate_payload(self, payload: np.ndarray):
"""Optional validation of payload."""
if np.any(payload > self.max_payload):
raise ValueError(f"Positions must be less than {2**self.payload_lsb_bits}")
def encode(self, payload: np.ndarray,
keys: Optional[np.ndarray] = None,
boundaries: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""Pack a sorted array of integers into compact bit numpy array.
each returned array represents a single term, with key as MSBS, ie:
| 32 MSBs | 16 LSBs | 16 LSBs |
key | bits msbs| payload
for later easy intersection of 32+16 msbs, then checking for adjacent
positions
If boundaries are provided, then we consider multiple distinct payloads
being encoded simultaneously, and we return the boundaries of each
"""
cols = np.floor_divide(payload, self.payload_lsb_bits, dtype=np.uint64) # Header of bit to use
cols <<= self.payload_msb_bits
if keys is not None:
cols |= keys.astype(np.uint64) << (_64 - self.key_bits)
values = payload % self.payload_lsb_bits # Value to encode
change_indices_one_doc = np.nonzero(np.diff(cols))[0] + 1
change_indices_one_doc = np.concatenate([[0], change_indices_one_doc])
if boundaries is not None:
change_indices = snp.merge(change_indices_one_doc, boundaries,
duplicates=snp.DROP)
new_boundaries = snp.intersect(boundaries, change_indices, indices=True)[1][1]
new_boundaries = np.concatenate([new_boundaries, [len(change_indices)]])
else:
change_indices = change_indices_one_doc
new_boundaries = None
# 0 as a position, goes in bit 1,
# 1 as a position, goes in bit 2, etc
values = _1 << values
cols |= values
encoded = cols
if len(encoded) == 0:
return encoded, new_boundaries
reduced = np.bitwise_or.reduceat(encoded, change_indices)
return reduced, new_boundaries
def decode(self, encoded: np.ndarray, get_keys: bool = True) -> Union[List[Tuple[np.uint64, np.ndarray]], List[np.ndarray]]:
"""Decode an encoded bit array into keys / payloads."""
keys = (encoded & self.key_mask) >> (_64 - self.key_bits)
msbs = (encoded & self.payload_msb_mask) >> self.payload_msb_bits
to_concat = []
for bit in range(self.payload_<fim_suffix>lsb_bits):
mask = 1 << bit
lsbs = encoded & mask
set_lsbs = (lsbs != 0)
this_keys = keys[set_lsbs]
payload = bit + (msbs[set_lsbs] * self.payload_lsb_bits)
doc_with_posn = np.dstack([this_keys, payload])[0]
to_concat.append(doc_with_posn)
stacked = np.vstack(to_concat)
# Sort by doc_id, then posn
sorted_payload = stacked[np.lexsort((stacked[:, 1], stacked[:, 0]))]
keys, idx = np.unique(sorted_payload[:, 0], return_index=True)
grouped = np.split(sorted_payload[:, 1], idx[1:])
if get_keys:
return list(zip(keys, grouped))
else:
return grouped
def keys(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
return (encoded & self.key_mask) >> (_64 - self.key_bits)
def keys_unique(self, encoded: np.ndarray) -> np.ndarray:
"""Return keys from encoded."""
keys = self.keys(encoded)
intersected = sorted_unique(keys)
return intersected
def payload_msb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload MSBs from encoded."""
return (encoded & self.payload_msb_mask) >> self.payload_msb_bits
def payload_lsb(self, encoded: np.ndarray) -> np.ndarray:
"""Return payload LSBs from encoded."""
return encoded & self.payload_lsb_mask
def intersect_rshift(self, lhs: np.ndarray, rhs: np.ndarray,
rshift: np.int64 = _neg1) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
rshift : int how much to shift rhs by to the right
"""
rhs_int = rhs
assert rshift < 0, "rshift must be negative"
rhs_int = rhs[self.payload_msb(rhs) >= np.abs(rshift)]
rshft = rshift.view(np.uint64)
rhs_shifted = (rhs_int >> self.payload_lsb_bits) + rshft
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs_shifted,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs_int[rhs_idx]
def intersect(self, lhs: np.ndarray, rhs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Return the MSBs that are common to both lhs and rhs (same keys, same MSBs)
Parameters
----------
lhs : np.ndarray of uint64 (encoded) values
rhs : np.ndarray of uint64 (encoded) values
"""
# assert np.all(np.diff(rhs_shifted) >= 0), "not sorted"
_, (lhs_idx, rhs_idx) = snp.intersect(lhs >> self.payload_lsb_bits,
rhs >> self.payload_lsb_bits,
indices=True,
algorithm=_algorithm)
return lhs[lhs_idx], rhs[rhs_idx]
def slice(self, encoded: np.ndarray, keys: np.ndarray) -> np.ndarray:
"""Get list of encoded that have values in keys."""
assert len(keys.shape) == 1
assert len(encoded.shape) == 1
encoded_keys = encoded.view(np.uint64) >> (_64 - self.key_bits)
_, (idx_docs, idx_enc) = snp.intersect(keys, encoded_keys, indices=True,
duplicates=snp.KEEP_MAX_N,
algorithm=_algorithm)
return encoded[idx_enc]
def convert_keys(keys) -> np.ndarray:
"""Convert keys to range or np.ndarray of uint64."""
if isinstance(keys, numbers.Number):
return np.asarray([keys], dtype=np.uint64)
elif isinstance(keys, list):
return np.asarray(keys, dtype=np.uint64)
elif isinstance(keys, np.ndarray):
return keys.astype(np.uint64)
elif isinstance(keys, range) and len(keys) > 0:
# UNFORTUNATE COPY
return np.arange(keys[0], keys[-1] + 1, dtype=np.uint64) + keys[0]
elif isinstance(keys, range):
return np.asarray([], dtype=np.uint64)
raise ValueError(f"Unknown type for keys: {type(keys)}")
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask <fim_suffix>in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encod<fim_suffix>ed_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for<fim_suffix> field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | FOR | complete_current_header_empty_completion |
<filename>searcharray/searcharray/term_dict.py<fim_prefix>import sys
class TermMissingError(KeyError):
def __init__(self, msg):
super().__init__(msg)
class TermDict:
def __init__(self):
self.term_to_ids = {}
self.id_to_terms = {}
def add_term(self, term):
if term in self.term_to_ids:
return self.term_to_ids[term]
term_id = len(self.term_to_ids)
self.term_to_ids[term] = term_id
self.id_to_terms[term_id] = term
return term_id
def copy(self):
new_dict = TermDict()
new_dict.term_to_ids = dict(self.term_to_ids)
new_dict.id_to_terms = dict(self.id_to_terms.copy())
return new_dict
def get_term_id(self, term):
try<fim_suffix>:
return self.term_to_ids[term]
except KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
def compatible(self, other) -> bool:
# Intersect the terms in both dictionaries
terms_self = list(self.term_to_ids.keys())
terms_other = list(other.term_to_ids.keys())
shortest = min(len(terms_self), len(terms_other))
return terms_self[:shortest] == terms_other[:shortest]
# If the intersection is empty, the dictionaries are not compatible
def __len__(self):
return len(self.term_to_ids)
def __repr__(self):
return repr(self.term_to_ids)
@property
def nbytes(self):
bytes_used = sys.getsizeof(self.term_to_ids) + sys.getsizeof(self.id_to_terms)
return bytes_used
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try<fim_suffix>:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
t<fim_suffix>ry:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
tr<fim_suffix>y:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try<fim_suffix>:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try<fim_suffix>:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
tr<fim_suffix>y:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
t<fim_suffix>ry:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
t<fim_suffix>ry:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
tr<fim_suffix>y:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | TRY | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMi<fim_suffix>ssingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except Va<fim_suffix>lueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexErr<fim_suffix>or:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except<fim_suffix> KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except TermMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingE<fim_suffix>rror:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
except KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except <fim_suffix>KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/phrase/middle_out.py<fim_prefix>"""Encode positions in bits along with some neighboring information for wrapping.
See this notebook for motivation:
https://colab.research.google.com/drive/10tIEkdlCE_1J_CcgEcV0jkLfBc-0H4am?authuser=1#scrollTo=XWzy-n9dF3PG
"""
import numpy as np
import sortednp as snp
from copy import deepcopy
from typing import List, Tuple, Dict, Union, cast, Optional
from searcharray.utils.roaringish import RoaringishEncoder, convert_keys, sorted_unique
import numbers
import logging
from collections import defaultdict
from searcharray.utils.bitcount import bit_count64
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
encoder = RoaringishEncoder()
# To not constantly type coerce
_64 = np.uint64(64)
_2 = np.uint64(2)
_1 = np.uint64(1)
_0 = np.uint64(0)
_neg1 = np.int64(-1)
MAX_POSN = encoder.max_payload
def inner_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays, within a 64 bit word with same MSBs.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect(lhs, rhs)
lhs_doc_ids = encoder.keys(lhs_int)
if len(lhs_int) != len(rhs_int):
raise ValueError("Encoding error, MSBs apparently are duplicated among your encoded posn arrays.")
if len(lhs_int) == 0:
return phrase_freqs, rhs_int
same_term = (len(lhs_int) == len(rhs_int) and lhs_int[0] == rhs_int[0])
if same_term:
# Find adjacent matches
rhs_shift = rhs_int << _1
overlap = lhs_int & rhs_shift
overlap = encoder.payload_lsb(overlap)
adjacents = bit_count64(overlap).astype(np.int64)
adjacents -= -np.floor_divide(adjacents, -2) # ceiling divide
phrase_freqs[lhs_doc_ids] += adjacents
return phrase_freqs, rhs_int
overlap_bits = (lhs_int & encoder.payload_lsb_mask) & ((rhs_int & encoder.payload_lsb_mask) >> _1)
rhs_next2 = (overlap_bits << _1) & encoder.payload_lsb_mask
rhs_next2 |= (rhs_int & (encoder.key_mask | encoder.payload_msb_mask))
phrase_freqs2 = phrase_freqs.copy()
matches2 = overlap_bits > 0
if np.any(matches2):
transitions = np.argwhere(np.diff(lhs_doc_ids[matches2]) != 0).flatten() + 1
transitions = np.insert(transitions, 0, 0)
counted_bits = bit_count64(overlap_bits[matches2])
reduced = np.add.reduceat(counted_bits,
transitions)
phrase_freqs2[np.unique(lhs_doc_ids[matches2])] += reduced
return phrase_freqs2, rhs_next2
def adjacent_bigram_freqs(lhs: np.ndarray, rhs: np.ndarray,
phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays where they occur in adjacent 64 bit words.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
lhs_int, rhs_int = encoder.intersect_rshift(lhs, rhs, rshift=_neg1)
lhs_doc_ids = encoder.keys(lhs_int)
# lhs lsb set and rhs lsb's most significant bit set
upper_bit = _1 << (encoder.payload_lsb_bits - _1)
matches = ((lhs_int & upper_bit) != 0) & ((rhs_int & _1) != 0)
unique, counts = np.unique(lhs_doc_ids[matches], return_counts=True)
phrase_freqs[unique] += counts
rhs_next = rhs_int
rhs_next[~matches] |= ~encoder.payload_lsb_mask
rhs_next[matches] |= (encoder.payload_lsb_mask & _1)
return phrase_freqs, rhs_next
def bigram_freqs(lhs: np.ndarray, rhs: np.ndarray, phrase_freqs: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Count bigram matches between two encoded arrays.
Returns:
--------
count: number of matches per doc
rhs_next: the next rhs array to continue matching
"""
# Combine lhs and rhs matches from two strategies
phrase_freqs, rhs_next_inner = inner_bigram_freqs(lhs, rhs, phrase_freqs)
phrase_freqs, rhs_next_adj = adjacent_bigram_freqs(lhs, rhs, phrase_freqs)
rhs_next = np.sort(np.concatenate([rhs_next_inner, rhs_next_adj]))
# Combine
return phrase_freqs, rhs_next
def trim_phrase_search(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> List[np.ndarray]:
"""Trim long phrases by searching the rarest terms first."""
# Start with rarest term
shortest_keys = None
shortest_idx = None
min_len = 1e100
max_len = 0
for idx, enc_posn in enumerate(encoded_posns):
if len(enc_posn) < min_len:
shortest_keys = encoder.keys(enc_posn)
shortest_idx = idx
min_len = len(enc_posn)
if len(enc_posn) > max_len:
max_len = len(enc_posn)
if shortest_keys is None:
return encoded_posns
for enc_posn_idx in range(len(encoded_posns)):
if enc_posn_idx == shortest_idx:
continue
if len(encoded_posns[enc_posn_idx]) > (10 * min_len):
encoded_posns[enc_posn_idx] = encoder.slice(encoded_posns[enc_posn_idx],
shortest_keys)
return encoded_posns
def compute_phrase_freqs(encoded_posns: List[np.ndarray],
phrase_freqs: np.ndarray) -> np.ndarray:
if len(encoded_posns) < 2:
raise ValueError("phrase must have at least two terms")
# Trim long phrases by searching the rarest terms first
if len(encoded_posns) > 3:
encoded_posns = trim_phrase_search(encoded_posns, phrase_freqs)
mask = np.ones(len(phrase_freqs), dtype=bool)
lhs = encoded_posns[0]
for rhs in encoded_posns[1:]:
# Only count the count of the last bigram (ignoring the ones where priors did not match)
phrase_freqs[mask] = 0
phrase_freqs, lhs = bigram_freqs(lhs, rhs, phrase_freqs)
mask &= (phrase_freqs > 0)
phrase_freqs[~mask] = 0
return phrase_freqs
class PosnBitArrayFromFlatBuilder:
""" Build from sorted array shape num terms x 3.
0th is term id
1st is doc id
2nd is posn
Sorted by term id then posns
"""
def __init__(self, flat_array: np.ndarray):
self.flat_array = flat_array
def build(self):
"""Slice the flat array into a 2d array of doc ids and posns."""
term_boundaries = np.argwhere(np.diff(self.flat_array[0]) > 0).flatten() + 1
term_boundaries = np.concatenate([[0], term_boundaries, [len(self.flat_array[1])]])
encoded, enc_term_boundaries = encoder.encode(keys=self.flat_array[1].view(np.uint64),
boundaries=term_boundaries[:-1],
payload=self.flat_array[2].view(np.uint64))
term_ids = self.flat_array[0][term_boundaries[:-1]]
encoded_term_posns = {}
for into_terms, (beg_idx, end_idx) in enumerate(zip(enc_term_boundaries[:-1], enc_term_boundaries[1:])):
sliced = encoded[beg_idx:end_idx]
encoded_term_posns[term_ids[into_terms]] = sliced
return PosnBitArray(encoded_term_posns, self.flat_array[1].max())
class PosnBitArrayBuilder:
def __init__(self):
self.term_posns = defaultdict(list)
self.term_posn_doc_ids = defaultdict(list)
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns: List[int]):
doc_ids = [doc_id] * len(posns)
self.term_posns[term_id].extend(posns)
self.term_posn_doc_ids[term_id].extend(doc_ids)
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
encoded_term_posns = {}
for term_id, posns in self.term_posns.items():
if len(posns) == 0:
posns = np.asarray([], dtype=np.uint32).flatten()
elif isinstance(posns, list):
posns_arr = np.asarray(posns, dtype=np.uint32).flatten()
posns = posns_arr
doc_ids = self.term_posn_doc_ids[term_id]
if isinstance(doc_ids, list):
doc_ids = np.asarray(doc_ids, dtype=np.uint32)
encoded = encoder.encode(keys=doc_ids, payload=posns)
if check:
decode_again = encoder.decode(encoded)
docs_to_posns = dict(decode_again)
doc_ids_again = []
posns_again = []
for doc_id, posns_dec in docs_to_posns.items():
for posn in posns_dec:
doc_ids_again.append(doc_id)
posns_again.append(posn)
assert np.array_equal(doc_ids_again, doc_ids)
assert np.array_equal(posns, posns_again)
encoded_term_posns[term_id] = encoded
return PosnBitArray(encoded_term_posns, self.max_doc_id)
class PosnBitArrayAlreadyEncBuilder:
def __init__(self):
self.encoded_term_posns = {}
self.max_doc_id = 0
def add_posns(self, doc_id: int, term_id: int, posns):
self.encoded_term_posns[term_id] = posns
def ensure_capacity(self, doc_id):
self.max_doc_id = max(self.max_doc_id, doc_id)
def build(self, check=False):
return PosnBitArray(self.encoded_term_posns, self.max_doc_id)
def index_range(rng, key):
if key is None:
return rng
if isinstance(rng, np.ndarray):
return rng[key]
if isinstance(key, slice):
return rng[key]
elif isinstance(key, numbers.Number):
return rng[key]
elif isinstance(key, np.ndarray):
try:
# UNFORTUNATE COPY
r_val = np.asarray(list(rng))[key]
return r_val
except IndexError as e:
raise e
# Last resort
# UNFORTUNATE COPY
# Here probably elipses or a tuple of various things
return np.asarray(list(rng))[key]
class PosnBitArray:
def __init__(self, encoded_term_posns, max_doc_id: int):
self.encoded_term_posns = encoded_term_posns
self.max_doc_id = max_doc_id
self.docfreq_cache : Dict[int, np.uint64] = {}
self.termfreq_cache : Dict[int, Tuple[np.ndarray, np.ndarray]] = {}
def copy(self):
new = PosnBitArray(deepcopy(self.encoded_term_posns), self.max_doc_id)
return new
def concat(self, other):
"""Merge other into self.
Assumes other's doc ids are not overlapping with self's doc ids.
"""
# Shared terms
shared_terms = set(self.encoded_term_posns.keys()).intersection(set(other.encoded_term_posns.keys()))
for term_id in shared_terms:
# Append then sort
self.encoded_term_posns[term_id] = np.concatenate([self.encoded_term_posns[term_id], other.encoded_term_posns[term_id]])
self.encoded_term_posns[term_id].sort()
only_other_terms = set(other.encoded_term_posns.keys()).difference(set(self.encoded_term_posns.keys()))
for term_id in only_other_terms:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
self.max_doc_id = max(self.max_doc_id, other.max_doc_id)
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def slice(self, key):
sliced_term_posns = {}
doc_ids = convert_keys(key)
max_doc_id = np.max(doc_ids)
for term_id, posns in self.encoded_term_posns.items():
encoded = self.encoded_term_posns[term_id]
assert len(encoded.shape) == 1
sliced_term_posns[term_id] = encoder.slice(encoded, keys=doc_ids)
return PosnBitArray(sliced_term_posns, max_doc_id)
def __getitem__(self, key):
return self.slice(key)
def merge(self, other):
# Unique terms
unique_terms = set(self.encoded_term_posns.keys()).union(set(other.encoded_term_posns.keys()))
for term_id in unique_terms:
if term_id not in other.encoded_term_posns:
continue
elif term_id not in self.encoded_term_posns:
self.encoded_term_posns[term_id] = other.encoded_term_posns[term_id]
else:
posns_self = self.encoded_term_posns[term_id]
posns_other = other.encoded_term_posns[term_id]
self.encoded_term_posns[term_id] = snp.merge(posns_self, posns_other)
self.max_doc_id = self.max_doc_id + other.max_doc_id
# Empty caches
self.termfreq_cache = {}
self.docfreq_cache = {}
def doc_encoded_posns(self, term_id: int, doc_id: int) -> np.ndarray:
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np.asarray([doc_id], dtype=np.uint64))
return term_posns
def phrase_freqs(self, term_ids: List[int], phrase_freqs: np.ndarray,
doc_ids: np.ndarray) -> np.ndarray:
if len(term_ids) < 2:
raise ValueError("Must have at least two terms")
if phrase_freqs.shape[0] == self.max_doc_id + 1:
enc_term_posns = [self.encoded_term_posns[term_id] for term_id in term_ids]
else:
enc_term_posns = [encoder.slice(self.encoded_term_posns[term_id],
keys=doc_ids.view(np.uint64)) for term_id in term_ids]
return compute_phrase_freqs(enc_term_posns, phrase_freqs)
def positions(self, term_id: int, doc_ids) -> Union[List[np.ndarray], np.ndarray]:
if isinstance(doc_ids, numbers.Number):
doc_ids = np.asarray([doc_ids])
try:
np_doc_ids = convert_keys(doc_ids)
term_posns = encoder.slice(self.encoded_term_posns[term_id],
keys=np_doc_ids)
e<fim_suffix>xcept KeyError:
r_val = [np.array([], dtype=np.uint32) for doc_id in doc_ids]
if len(r_val) == 1 and len(doc_ids) == 1 and isinstance(doc_ids[0], numbers.Number):
return [r_val[0]]
return r_val
decoded = encoder.decode(encoded=term_posns, get_keys=True)
if len(decoded) == 0:
return [np.array([], dtype=np.uint32)]
if len(decoded) != len(doc_ids):
# Fill non matches
decoded = cast(List[Tuple[np.uint64, np.ndarray]], decoded)
as_dict: Dict[np.uint64, np.ndarray] = dict(decoded)
decs = []
for doc_id in doc_ids:
if doc_id in as_dict:
decs.append(as_dict[doc_id])
else:
decs.append(np.array([], dtype=np.uint32))
return decs
else:
decs = [dec[1] for dec in decoded]
return decs
def termfreqs(self, term_id: int, doc_ids: Optional[np.ndarray] = None) -> Tuple[np.ndarray, np.ndarray]:
"""Count term freqs using unique positions."""
if doc_ids is None:
return self._termfreqs_with_cache(term_id)
encoded = self.encoded_term_posns[term_id]
term_posns = encoded
term_posns = encoder.slice(encoded,
keys=doc_ids.astype(np.uint64))
return self._computed_term_freqs(term_posns)
def _computed_term_freqs(self, term_posns) -> Tuple[np.ndarray, np.ndarray]:
doc_ids = encoder.keys(term_posns)
change_indices = np.nonzero(np.diff(doc_ids))[0]
change_indices = np.concatenate((np.asarray([0]), change_indices + 1))
posns = term_posns & encoder.payload_lsb_mask
bit_counts = bit_count64(posns)
term_freqs = np.add.reduceat(bit_counts, change_indices)
return sorted_unique(doc_ids), term_freqs
def _termfreqs_with_cache(self, term_id: int) -> Tuple[np.ndarray, np.ndarray]:
try:
return self.termfreq_cache[term_id]
except KeyError:
term_posns = self.encoded_term_posns[term_id]
doc_ids, term_freqs = self._computed_term_freqs(term_posns)
if self._is_cached(term_id):
self.termfreq_cache[term_id] = (doc_ids, term_freqs)
return doc_ids, term_freqs
def _is_cached(self, term_id: int) -> bool:
return term_id in self.docfreq_cache
def _docfreq_from_cache(self, term_id: int) -> np.uint64:
return self.docfreq_cache[term_id]
def _maybe_cache_docfreq(self, term_id: int, docfreq: np.uint64):
if self.max_doc_id >= 100000 and docfreq > (self.max_doc_id // 100):
self.docfreq_cache[term_id] = docfreq
def docfreq(self, term_id: int) -> np.uint64:
try:
return self.docfreq_cache[term_id]
except KeyError:
encoded = self.encoded_term_posns[term_id]
docfreq = np.uint64(encoder.keys_unique(encoded).size)
self._maybe_cache_docfreq(term_id, docfreq)
return docfreq
def insert(self, key, term_ids_to_posns, is_encoded=False):
new_posns = PosnBitArrayBuilder()
if is_encoded:
new_posns = PosnBitArrayAlreadyEncBuilder()
max_doc_id = 0
for doc_id, new_posns_row in enumerate(term_ids_to_posns):
for term_id, positions in new_posns_row:
new_posns.add_posns(doc_id, term_id, positions)
max_doc_id = max(doc_id, max_doc_id)
new_posns.max_doc_id = max_doc_id
ins_arr = new_posns.build()
self.merge(ins_arr)
@property
def nbytes(self):
arr_bytes = 0
for doc_id, posn in self.encoded_term_posns.items():
arr_bytes += posn.nbytes
for term_id, (doc_ids, term_freqs) in self.termfreq_cache.items():
arr_bytes += doc_ids.nbytes
arr_bytes += term_freqs.nbytes
for term_id, docfreq in self.docfreq_cache.items():
arr_bytes += docfreq.nbytes
return arr_bytes
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/postings.py<fim_prefix>"""Tokenized, searchable text as a pandas dtype."""
import pandas as pd
import numbers
from pandas.api.extensions import ExtensionDtype, ExtensionArray, register_extension_dtype
from pandas.api.types import is_list_like
from pandas.api.extensions import take
import json
from collections import Counter
import warnings
import logging
from typing import List, Union, Optional, Iterable
import numpy as np
from searcharray.phrase.scan_merge import scan_merge_ins
from searcharray.phrase.posn_diffs import compute_phrase_freqs
from searcharray.phrase.middle_out import PosnBitArray
from searcharray.similarity import Similarity, default_bm25
from searcharray.indexing import build_index_from_tokenizer, build_index_from_terms_list
from searcharray.term_dict import TermMissingError
logger = logging.getLogger(__name__)
# When running in pytest
import sys # noqa
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.ERROR)
formatter = logging.Formatter("[%(filename)s:%(lineno)s - %(funcName)20s() ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.ERROR)
class Terms:
"""An indexed search doc - a single bag of tokenized words and positions."""
def __init__(self,
postings,
doc_len: int = 0,
posns: Optional[dict] = None,
encoded=False):
self.postings = postings
self.posns = None
self.encoded = encoded
self.doc_len = doc_len
self.posns = posns
def _validate_posns(self):
# (For testing/assertions) - Confirm every term in positions also in postings
if self.posns is None:
return
for term in self.posns:
if term not in self.postings:
raise ValueError(f"Term {term} in positions but not in postings. ")
def termfreq(self, token):
return self.postings[token]
def terms(self):
return self.postings.items()
def positions(self, term=None):
if self.posns is None:
return {}
if term is None:
posns = self.posns.items()
else:
posns = self.posns[term]
return posns
def raw_positions(self, term_dict, term=None):
if self.posns is None:
return {}
if term is None:
posns = [(term_dict.get_term_id(term), posns) for term, posns in self.posns.items()]
else:
posns = [(term_dict.get_term_id(term), self.posns[term])]
return posns
def tf_to_dense(self, term_dict):
"""Convert to a dense vector of term frequencies."""
dense = np.zeros(len(term_dict))
for term, freq in self.terms():
dense[term_dict.get_term_id(term)] = freq
return dense
def __len__(self):
return len(self.postings)
def __repr__(self):
posting_keys = set(self.postings.keys())
rval = f"Terms({posting_keys})"
return rval
def __str__(self):
return repr(self)
def __eq__(self, other):
# Flip to the other implementation if we're comparing to a SearchArray
# to get a boolean array back
if isinstance(other, SearchArray):
return other == self
same_postings = isinstance(other, Terms) and self.postings == other.postings
if same_postings and self.doc_len == other.doc_len:
return True
def __lt__(self, other):
# return isinstance(other, Terms) and hash(self) < hash(other)
keys_both = set(self.postings.keys()).union(set(other.postings.keys()))
# Sort lexically
keys_both = sorted(keys_both)
# Iterate as if these are two vectors of the same large dimensional vector sparse
for key in keys_both:
lhs_val = 0
rhs_val = 0
try:
lhs_val = self.postings[key]
except KeyError:
pass
try:
rhs_val = other.postings[key]
except KeyError:
pass
if lhs_val < rhs_val:
return True
elif lhs_val > rhs_val:
return False
else:
continue
return False
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not (self < other) and self != other
def __hash__(self):
return hash(json.dumps(self.postings, sort_keys=True))
class TermsDtype(ExtensionDtype):
"""Pandas dtype for terms."""
name = 'tokenized_text'
type = Terms
kind = 'O'
@classmethod
def construct_from_string(cls, string):
if not isinstance(string, str):
raise TypeError(
"'construct_from_string' expects a string, got {}".format(type(string))
)
elif string == cls.name:
return cls()
else:
raise TypeError(
"Cannot construct a '{}' from '{}'".format(cls.__name__, string)
)
@classmethod
def construct_array_type(cls):
return SearchArray
def __repr__(self):
return 'TermsDtype()'
@property
def na_value(self):
return Terms({})
def valid_value(self, value):
return isinstance(value, dict) or pd.isna(value) or isinstance(value, Terms)
register_extension_dtype(TermsDtype)
def ws_tokenizer(string):
if pd.isna(string):
return []
if not isinstance(string, str):
raise ValueError("Expected a string")
return string.split()
def _row_to_postings_row(doc_id, row, doc_len, term_dict, posns: PosnBitArray):
tfs = {}
labeled_posns = {}
for term_idx in row.cols:
term = term_dict.get_term(term_idx)
tfs[term] = 1
enc_term_posns = posns.doc_encoded_posns(term_idx, doc_id=doc_id)
labeled_posns[term] = enc_term_posns
result = Terms(tfs, posns=labeled_posns,
doc_len=doc_len, encoded=True)
return result
class SearchArray(ExtensionArray):
"""An array of tokenized text (Termss)."""
dtype = TermsDtype()
def __init__(self, postings, tokenizer=ws_tokenizer, avoid_copies=True):
# Check dtype, raise TypeError
if not is_list_like(postings):
raise TypeError("Expected list-like object, got {}".format(type(postings)))
self.avoid_copies = avoid_copies
self.tokenizer = tokenizer
self.term_mat, self.posns, \
self.term_dict, self.avg_doc_length, \
self.doc_lens = build_index_from_terms_list(postings, Terms)
@classmethod
def index(cls, array: Iterable, tokenizer=ws_tokenizer,
truncate=False, batch_size=100000, avoid_copies=True) -> 'SearchArray':
"""Index an array of strings using tokenizer."""
if not is_list_like(array):
raise TypeError("Expected list-like object, got {}".format(type(array)))
term_mat, posns, term_dict, avg_doc_length, doc_lens =\
build_index_from_tokenizer(array, tokenizer, batch_size=batch_size,
truncate=truncate)
postings = cls([], tokenizer=tokenizer, avoid_copies=avoid_copies)
postings.term_mat = term_mat
postings.posns = posns
postings.term_dict = term_dict
postings.avg_doc_length = avg_doc_length
postings.doc_lens = doc_lens
return postings
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
"""Construct a new SearchArray from a sequence of scalars (PostingRow or convertible into)."""
if dtype is not None:
if not isinstance(dtype, TermsDtype):
return scalars
if isinstance(scalars, np.ndarray) and scalars.dtype == TermsDtype():
return cls(scalars)
# String types
elif isinstance(scalars, np.ndarray) and scalars.dtype.kind in 'US':
return cls(scalars)
# Other objects
elif isinstance(scalars, np.ndarray) and scalars.dtype != object:
return scalars
return cls(scalars)
def memory_usage(self, deep=False):
"""Return memory usage of this array in bytes."""
return self.nbytes
@property
def nbytes(self):
return self.term_mat.nbytes + self.posns.nbytes + self.doc_lens.nbytes + self.term_dict.nbytes
def __getitem__(self, key):
key = pd.api.indexers.check_array_indexer(self, key)
# Want to take rows of term freqs
if isinstance(key, numbers.Integral):
try:
rows = self.term_mat[key]
doc_len = self.doc_lens[key]
doc_id = key
if doc_id < 0:
doc_id += len(self)
return _row_to_postings_row(doc_id, rows[0], doc_len,
self.term_dict, self.posns)
except IndexError:
raise IndexError("index out of bounds")
else:
# Construct a sliced view of this array
sliced_tfs = self.term_mat.slice(key)
sliced_posns = self.posns.slice(sliced_tfs.rows) if not self.avoid_copies else self.posns
arr = SearchArray([], tokenizer=self.tokenizer)
arr.term_mat = sliced_tfs
arr.doc_lens = self.doc_lens[key]
arr.posns = sliced_posns
arr.term_dict = self.term_dict
arr.avg_doc_length = self.avg_doc_length
return arr
def __setitem__(self, key, value):
"""Set an item in the array."""
key = pd.api.indexers.check_array_indexer(self, key)
if isinstance(value, pd.Series):
value = value.values
if isinstance(value, pd.DataFrame):
value = value.values.flatten()
if isinstance(value, SearchArray):
value = value.to_numpy()
if isinstance(value, list):
value = np.asarray(value, dtype=object)
if not isinstance(value, np.ndarray) and not self.dtype.valid_value(value):
raise ValueError(f"Cannot set non-object array to SearchArray -- you passed type:{type(value)} -- {value}")
# Cant set a single value to an array
if isinstance(key, numbers.Integral) and isinstance(value, np.ndarray):
raise ValueError("Cannot set a single value to an array")
try:
is_encoded = False
posns = None
term_mat = np.asarray([])
doc_lens = np.asarray([])
if isinstance(value, float):
term_mat = np.asarray([value])
doc_lens = np.asarray([0])
elif isinstance(value, Terms):
term_mat = np.asarray([value.tf_to_dense(self.term_dict)])
doc_lens = np.asarray([value.doc_len])
is_encoded = value.encoded
posns = [value.raw_positions(self.term_dict)]
elif isinstance(value, np.ndarray):
term_mat = np.asarray([x.tf_to_dense(self.term_dict) for x in value])
doc_lens = np.asarray([x.doc_len for x in value])
is_encoded = value[0].encoded if len(value) > 0 else False
posns = [x.raw_positions(self.term_dict) for x in value]
np.nan_to_num(term_mat, copy=False, nan=0)
self.term_mat[key] = term_mat
self.doc_lens[key] = doc_lens
if posns is not None:
self.posns.insert(key, posns, is_encoded)
# Assume we have a positions for each term, doc pair. We can just update it.
# Otherwise we would have added new terms
except TermMissingError:
self._add_new_terms(key, value)
def _add_new_terms(self, key, value):
msg = """Adding new terms! This might not be good if you tokenized this new text
with a different tokenizer.
Also. This is slow."""
warnings.warn(msg)
scan_value = value
if isinstance(value, Terms):
scan_value = np.asarray([value])
for row in scan_value:
for term in row.terms():
self.term_dict.add_term(term[0])
self.term_mat.resize((self.term_mat.shape[0], len(self.term_dict)))
# Ensure posns_lookup has at least max self.posns
self[key] = value
def value_counts(
self,
dropna: bool = True,
):
if dropna:
counts = Counter(self[:])
counts.pop(Terms({}), None)
else:
counts = Counter(self[:])
return pd.Series(counts)
def __len__(self):
len_rval = len(self.term_mat.rows)
return len_rval
def __ne__(self, other):
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
return ~(self == other)
def __eq__(self, other):
"""Return a boolean numpy array indicating elementwise equality."""
# When other is a dataframe or series, not implemented
if isinstance(other, pd.DataFrame) or isinstance(other, pd.Series) or isinstance(other, pd.Index):
return NotImplemented
# When other is an ExtensionArray
if isinstance(other, SearchArray):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
else:
# Compatible term dicts, and same term freqs
# (not looking at positions, maybe we should?)
if self.term_dict.compatible(other.term_dict):
return (self.term_mat == other.term_mat) & (self.doc_lens == other.doc_lens)
else:
return np.zeros(len(self), dtype=bool)
# return np.array(self[:]) == np.array(other[:])
# When other is a scalar value
elif isinstance(other, Terms):
other = SearchArray([other], tokenizer=self.tokenizer)
warnings.warn("Comparing a scalar value to a SearchArray. This is slow.")
return np.array(self[:]) == np.array(other[:])
# When other is a sequence but not an ExtensionArray
# its an array of dicts
elif is_list_like(other):
if len(self) != len(other):
return False
elif len(other) == 0:
return np.array([], dtype=bool)
# We actually don't know how it was tokenized
other = SearchArray(other, tokenizer=self.tokenizer)
return np.array(self[:]) == np.array(other[:])
# Return False where 'other' is neither the same length nor a scalar
else:
return np.full(len(self), False)
def isna(self):
# Every row with all 0s
empties = self.doc_lens == 0
return empties
def take(self, indices, allow_fill=False, fill_value=None):
# Want to take rows of term freqs
row_indices = np.arange(len(self.term_mat.rows))
# Take within the row indices themselves
result_indices = take(row_indices, indices, allow_fill=allow_fill, fill_value=-1)
if allow_fill and -1 in result_indices:
if fill_value is None or pd.isna(fill_value):
fill_value = Terms({}, encoded=True)
to_fill_mask = result_indices == -1
# This is slow as it rebuilds all the term dictionaries
# on the subsequent assignment lines
# However, this case tends to be the exception for
# most dataframe operations
taken = SearchArray([fill_value] * len(result_indices))
taken[~to_fill_mask] = self[result_indices[~to_fill_mask]].copy()
return taken
else:
taken = self[result_indices].copy()
return taken
def copy(self):
postings_arr = SearchArray([], tokenizer=self.tokenizer)
postings_arr.doc_lens = self.doc_lens.copy()
postings_arr.term_mat = self.term_mat.copy()
postings_arr.posns = self.posns
postings_arr.term_dict = self.term_dict
postings_arr.avg_doc_length = self.avg_doc_length
if not self.avoid_copies:
postings_arr.posns = self.posns.copy()
postings_arr.term_dict = self.term_dict.copy()
return postings_arr
@classmethod
def _concat_same_type(cls, to_concat):
concatenated_data = np.concatenate([ea[:] for ea in to_concat])
return SearchArray(concatenated_data, tokenizer=to_concat[0].tokenizer)
@classmethod
def _from_factorized(cls, values, original):
return cls(values)
def _values_for_factorize(self):
"""Return an array and missing value suitable for factorization (ie grouping)."""
arr = np.asarray(self[:], dtype=object)
return arr, Terms({})
def _check_token_arg(self, token):
if isinstance(token, str):
return token
elif isinstance(token, list) and len(token) == 1:
return token[0]
elif isinstance(token, list):
return token
else:
raise TypeError("Expected a string or list of strings for phrases")
# ***********************************************************
# Search functionality
# ***********************************************************
def termfreqs(self, token: Union[List[str], str]) -> np.ndarray:
token = self._check_token_arg(token)
if isinstance(token, list):
return self.phrase_freq(token)
try:
term_id = self.term_dict.get_term_id(token)
matches = np.zeros(len(self), dtype=int)
slice_of_rows = None
if self.term_mat.subset:
slice_of_rows = self.term_mat.rows
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
mask = np.isin(self.term_mat.rows, doc_ids)
matches[mask] = termfreqs
return matches
else:
doc_ids, termfreqs = self.posns.termfreqs(term_id,
doc_ids=slice_of_rows)
matches[doc_ids] = termfreqs
return matches
except Te<fim_suffix>rmMissingError:
return np.zeros(len(self), dtype=int)
def docfreq(self, token: str) -> int:
if not isinstance(token, str):
raise TypeError("Expected a string")
# Count number of rows where the term appears
try:
return self.posns.docfreq(self.term_dict.get_term_id(token))
except TermMissingError:
return 0
def doclengths(self) -> np.ndarray:
return self.doc_lens
def match(self, token: Union[List[str], str], slop: int = 1) -> np.ndarray:
"""Return a boolean numpy array indicating which elements contain the given term."""
token = self._check_token_arg(token)
if isinstance(token, list):
term_freq = self.phrase_freq(token)
else:
term_freq = self.termfreqs(token)
return term_freq > 0
def score(self, token: Union[str, List[str]], similarity: Similarity = default_bm25) -> np.ndarray:
"""Score each doc using a similarity function.
Parameters
----------
token : str or list of str of what to search (already tokenized)
similarity : How to score the documents. Default is BM25.
"""
# Get term freqs per token
token = self._check_token_arg(token)
# For expensive toknes, we compute doc freq first, so we
# cache them in the DF cache, to let TF cache know it should be cached
tokens_l = [token] if isinstance(token, str) else token
all_dfs = np.asarray([self.docfreq(token) for token in tokens_l])
tfs = self.termfreqs(token)
token = self._check_token_arg(token)
doc_lens = self.doclengths()
scores = similarity(term_freqs=tfs, doc_freqs=all_dfs,
doc_lens=doc_lens, avg_doc_lens=self.avg_doc_length,
num_docs=len(self))
return scores
def positions(self, token: str, key=None) -> List[np.ndarray]:
"""Return a list of lists of positions of the given term."""
term_id = self.term_dict.get_term_id(token)
key = self.term_mat.rows[key] if key is not None else self.term_mat.rows
posns = self.posns.positions(term_id, doc_ids=key)
return posns
def and_query(self, tokens: Union[List[str], List[List[str]]]) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.ones(len(self), dtype=bool)
for curr_mask in masks:
mask = mask & curr_mask
return mask
def or_query(self, tokens: Union[List[str], List[List[str]]], min_should_match: int = 1) -> np.ndarray:
"""Return a mask on the postings array indicating which elements contain all terms."""
masks = [self.match(term) for term in tokens]
mask = np.sum(masks, axis=0) >= min_should_match
return mask
def phrase_freq(self, tokens: List[str], slop=1) -> np.ndarray:
if slop == 1 and len(tokens) == len(set(tokens)):
phrase_freqs = np.zeros(len(self))
try:
doc_ids = self.term_mat.rows
term_ids = [self.term_dict.get_term_id(token) for token in tokens]
return self.posns.phrase_freqs(term_ids, doc_ids=doc_ids,
phrase_freqs=phrase_freqs)
except TermMissingError:
return phrase_freqs
else:
return self.phrase_freq_every_diff(tokens, slop=slop)
def phrase_freq_scan(self, tokens: List[str], mask=None, slop=1) -> np.ndarray:
if mask is None:
mask = self.and_query(tokens)
if np.sum(mask) == 0:
return mask
# Gather positions
posns = [self.positions(token, mask) for token in tokens]
phrase_freqs = np.zeros(len(self))
phrase_freqs[mask] = scan_merge_ins(posns, phrase_freqs[mask], slop=slop)
return phrase_freqs
def phrase_freq_every_diff(self, tokens: List[str], slop=1) -> np.ndarray:
phrase_freqs = -np.ones(len(self))
mask = self.and_query(tokens)
phrase_freqs[~mask] = 0
if np.sum(mask) == 0:
return phrase_freqs
term_posns = [self.positions(term, mask) for term in tokens]
for width in [10, 20, 30, 40]:
phrase_freqs[mask] = compute_phrase_freqs(term_posns,
phrase_freqs[mask],
slop=slop,
width=width)
remaining_mask = phrase_freqs == -1
if np.any(remaining_mask):
remainder_freqs = self.phrase_freq_scan(tokens, mask=remaining_mask, slop=slop)
phrase_freqs[remaining_mask] = remainder_freqs[remaining_mask]
return phrase_freqs
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except Va<fim_suffix>lueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/term_dict.py<fim_prefix>import sys
class TermMissingError(KeyError):
def __init__(self, msg):
super().__init__(msg)
class TermDict:
def __init__(self):
self.term_to_ids = {}
self.id_to_terms = {}
def add_term(self, term):
if term in self.term_to_ids:
return self.term_to_ids[term]
term_id = len(self.term_to_ids)
self.term_to_ids[term] = term_id
self.id_to_terms[term_id] = term
return term_id
def copy(self):
new_dict = TermDict()
new_dict.term_to_ids = dict(self.term_to_ids)
new_dict.id_to_terms = dict(self.id_to_terms.copy())
return new_dict
def get_term_id(self, term):
try:
return self.term_to_ids[term]
ex<fim_suffix>cept KeyError:
raise TermMissingError(f"Term {term} not present in dictionary. Reindex to add.")
def get_term(self, term_id):
try:
return self.id_to_terms[term_id]
except KeyError:
raise TermMissingError(f"Term at {term_id} not present in dictionary. Reindex to add.")
def compatible(self, other) -> bool:
# Intersect the terms in both dictionaries
terms_self = list(self.term_to_ids.keys())
terms_other = list(other.term_to_ids.keys())
shortest = min(len(terms_self), len(terms_other))
return terms_self[:shortest] == terms_other[:shortest]
# If the intersection is empty, the dictionaries are not compatible
def __len__(self):
return len(self.term_to_ids)
def __repr__(self):
return repr(self.term_to_ids)
@property
def nbytes(self):
bytes_used = sys.getsizeof(self.term_to_ids) + sys.getsizeof(self.id_to_terms)
return bytes_used
<fim_middle> | null | CATCH | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def checked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
d<fim_suffix>ef listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | METHOD | complete_current_header_empty_completion |
<filename>searcharray/searcharray/solr.py<fim_prefix>"""Utility functions for Solr users of searcharray."""
import re
import pandas as pd
import numpy as np
from typing import List, Optional, Dict, Tuple
from searcharray.postings import SearchArray
from searcharray.similarity import Similarity, default_bm25
def parse_min_should_match(num_clauses: int, spec: str) -> int:
"""Parse Solr's min should match (ie mm) spec.
See this ChatGPT translation of mm code from Solr's Java code for parsing this
https://chat.openai.com/share/76642aec-7e05-420f-a53a-83b8e2eea8fb
Parameters
----------
num_clauses : int
spec : str
Returns
-------
int : the number of clauses that must match
"""
def c<fim_suffix>hecked_parse_int(value, error_message):
try:
return int(value)
except ValueError:
raise ValueError(error_message)
result = num_clauses
spec = spec.strip()
if '<' in spec:
# we have conditional spec(s)
space_around_less_than_pattern = re.compile(r'\s*<\s*')
spec = space_around_less_than_pattern.sub('<', spec)
for s in spec.split():
parts = s.split('<', 1)
if len(parts) < 2:
raise ValueError("Invalid 'mm' spec: '" + s + "'. Expecting values before and after '<'")
upper_bound = checked_parse_int(parts[0], "Invalid 'mm' spec. Expecting an integer.")
if num_clauses <= upper_bound:
return result
else:
result = parse_min_should_match(num_clauses, parts[1])
return result
# otherwise, simple expression
if '%' in spec:
# percentage - assume the % was the last char. If not, let int() fail.
spec = spec[:-1]
percent = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
calc = (result * percent) * (1 / 100)
result = result + int(calc) if calc < 0 else int(calc)
else:
calc = checked_parse_int(spec, "Invalid 'mm' spec. Expecting an integer.")
result = result + calc if calc < 0 else calc
return min(num_clauses, max(result, 0))
def parse_field_boosts(field_lists: List[str]) -> dict:
"""Parse Solr's qf, pf, pf2, pf3 field boosts."""
if not field_lists:
return {}
out = {}
carat_pattern = re.compile(r'\^')
for field in field_lists:
parts = carat_pattern.split(field)
out[parts[0]] = None if len(parts) == 1 else float(parts[1])
return out
def get_field(frame, field) -> SearchArray:
if field not in frame.columns:
raise ValueError(f"Field {field} not in dataframe")
if not isinstance(frame[field].array, SearchArray):
raise ValueError(f"Field {field} is not a searcharray field")
return frame[field].array
def parse_query_terms(frame: pd.DataFrame,
query: str,
query_fields: List[str]):
search_terms: Dict[str, List[str]] = {}
num_search_terms = 0
term_centric = True
for field in query_fields:
arr = get_field(frame, field)
tokenizer = arr.tokenizer
search_terms[field] = []
field_num_search_terms = 0
for posn, term in enumerate(tokenizer(query)):
search_terms[field].append(term)
field_num_search_terms += 1
if num_search_terms == 0:
num_search_terms = field_num_search_terms
elif field_num_search_terms != num_search_terms:
term_centric = False
return num_search_terms, search_terms, term_centric
def _edismax_term_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity) -> Tuple[np.ndarray, str]:
explain = []
term_scores = []
for term_posn in range(num_search_terms):
max_scores = np.zeros(len(frame))
term_explain = []
for field, boost in query_fields.items():
term = search_terms[field][term_posn]
post_arr = get_field(frame, field)
field_term_score = post_arr.score(term, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
term_explain.append(f"{field}:{term}^{boost_exp}")
max_scores = np.maximum(max_scores, field_term_score)
term_scores.append(max_scores)
explain.append("(" + " | ".join(term_explain) + ")")
min_should_match = parse_min_should_match(num_search_terms, spec=mm)
qf_scores = np.asarray(term_scores)
matches_gt_mm = np.sum(qf_scores > 0, axis=0) >= min_should_match
qf_scores = np.sum(term_scores, axis=0)
qf_scores[~matches_gt_mm] = 0
return qf_scores, "(" + " ".join(explain) + f")~{min_should_match}"
def _edismax_field_centric(frame: pd.DataFrame,
query_fields: Dict[str, float],
num_search_terms: int,
search_terms: Dict[str, List[str]],
mm: str,
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
field_scores = []
explain = []
for field, boost in query_fields.items():
post_arr = get_field(frame, field)
term_scores = np.array([post_arr.score(term, similarity=similarity)
for term in search_terms[field]])
min_should_match = parse_min_should_match(len(search_terms[field]), spec=mm)
exp = " ".join([f"{field}:{term}" for term in search_terms[field]])
boost_exp = f"{boost}" if boost is not None else "1"
exp = "(" + exp + f")~{min(min_should_match, len(search_terms[field]))}"
exp = "(" + exp + f")^{boost_exp}"
matches_gt_mm = np.sum(term_scores > 0, axis=0) >= min(min_should_match, len(search_terms[field]))
sum_terms_bm25 = np.sum(term_scores, axis=0)
sum_terms_bm25[~matches_gt_mm] = 0
field_scores.append(sum_terms_bm25 * (1 if boost is None else boost))
explain.append(exp)
# Take maximum field scores as qf
qf_scores = np.asarray(field_scores)
qf_scores = np.max(qf_scores, axis=0)
return qf_scores, " | ".join(explain)
def edismax(frame: pd.DataFrame,
q: str,
qf: List[str],
mm: Optional[str] = None,
pf: Optional[List[str]] = None,
pf2: Optional[List[str]] = None,
pf3: Optional[List[str]] = None,
q_op: str = "OR",
similarity: Similarity = default_bm25) -> Tuple[np.ndarray, str]:
"""Run edismax search over dataframe with searcharray fields.
Parameters
----------
q : str
The query string
mm : str
The minimum should match spec
qf : list
The fields to search
pf : list
The fields to search for phrase matches
pf2 : list
The fields to search for bigram matches
pf3 : list
The fields to search for trigram matches
q_op : str, optional
The default operator, by default "OR"
Returns
-------
np.ndarray
The search results
"""
def listify(x):
return x if isinstance(x, list) else [x]
query_fields = parse_field_boosts(listify(qf))
phrase_fields = parse_field_boosts(listify(pf)) if pf else {}
if mm is None:
mm = "1"
if q_op == "AND":
mm = "100%"
# bigram_fields = parse_field_boosts(pf2) if pf2 else {}
# trigram_fields = parse_field_boosts(pf3) if pf3 else {}
num_search_terms, search_terms, term_centric = parse_query_terms(frame, q, list(query_fields.keys()))
if term_centric:
qf_scores, explain = _edismax_term_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
else:
qf_scores, explain = _edismax_field_centric(frame, query_fields,
num_search_terms, search_terms, mm,
similarity=similarity)
phrase_scores = []
for field, boost in phrase_fields.items():
arr = get_field(frame, field)
terms = search_terms[field]
field_phrase_score = arr.score(terms, similarity=similarity) * (1 if boost is None else boost)
boost_exp = f"{boost}" if boost is not None else "1"
explain += f" ({field}:\"{' '.join(terms)}\")^{boost_exp}"
phrase_scores.append(field_phrase_score)
if len(phrase_scores) > 0:
phrase_scores = np.sum(phrase_scores, axis=0)
# Add where term_scores > 0
term_match_idx = np.where(qf_scores)[0]
qf_scores[term_match_idx] += phrase_scores[term_match_idx]
return qf_scores, explain
<fim_middle> | null | METHOD | complete_current_header_empty_completion |
<filename>tanuki_py/src/tanuki/language_models/language_model_manager.py<fim_prefix>import json
from typing import Any, Dict
from tanuki.function_modeler import FunctionModeler
from tanuki.language_models.llm_api_abc import LLM_API
from tanuki.models.function_description import FunctionDescription
from tanuki.models.function_example import FunctionExample
from tanuki.models.language_model_output import LanguageModelOutput
from tanuki.utils import approximate_token_count
from tanuki.validator import Validator
from tanuki.models.api_manager import APIManager
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
import logging
class LanguageModelManager(object):
"""
The LanguageModelManager is responsible for managing the language models and their outputs operationally,
this includes:
- Generating outputs from the language models
- Repairing outputs from the language models
- Saving outputs from the language models
- Finetuning the language models from the saved outputs
"""
def __init__(self,
function_modeler: FunctionModeler,
api_provider: APIManager,
generation_token_limit=512,) -> None:
self.api_provider = api_provider
self.function_modeler = function_modeler
self.default_generation_length = generation_token_limit
self.initialized_functions = {}
self.token_counts = {}
def __call__(self,
args,
function_description: FunctionDescription,
kwargs,
validator: Validator,
generation_parameters: dict) -> Any:
# add the generation length if not there
if "max_new_tokens" not in generation_parameters:
generation_parameters["max_new_tokens"] = self.default_generation_length
output = self.generate(args, kwargs, function_description, generation_parameters)
# start parsing the object, very hacky way for the time being
choice_parsed = self._parse_choice(output)
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
choice, choice_parsed, successful_repair = self.repair_output(args,
kwargs,
function_description,
output.generated_response,
validator,
generation_parameters)
if not successful_repair:
raise TypeError(
f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{output.generated_response}'")
output.generated_response = choice
output.distilled_model = False
datapoint = FunctionExample(args, kwargs, output.generated_response)
if output.suitable_for_finetuning and not output.distilled_model:
self.function_modeler.postprocess_symbolic_datapoint(function_description.__hash__(), function_description,
datapoint, repaired=not valid)
instantiated = validator.instantiate(choice_parsed, function_description.output_type_hint)
return instantiated
def _parse_choice(self, output):
try:
# json load
choice_parsed = json.loads(output.generated_response)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(output.generated_response)
except:
choice_parsed = output.generated_response
return choice_parsed
def generate(self, args, kwargs, function_description, llm_parameters={}):
"""
The main generation function, given the args, kwargs, function description and model type, generate a response and check if the datapoint can be saved to the finetune dataset
"""
func_hash = function_description.__hash__()
prompt, model, save_to_finetune, is_distilled_model = self.get_generation_case(args, kwargs,
function_description,
llm_parameters,
func_hash)
# loggings
current_function_setup = self.initialized_functions.get(func_hash, None) # getting the current function setup - model and align statements
if current_function_setup:
generator_model = current_function_setup["model"]
if is_distilled_model:
logging.info(f"Generating function outputs for {function_description.name} with a finetuned model: {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model == "":
logging.info(f"Found {len(current_function_setup['examples'])} align statements for {function_description.name}. Generating function outputs with {model.model_name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
elif generator_model != model.model_name:
logging.info(f"Switching output generation from {generator_model} to {model.model_name} for function {function_description.name}.")
self.initialized_functions[func_hash]["model"] = model.model_name
choice = self._synthesise_answer(prompt, model, llm_parameters)
output = LanguageModelOutput(choice, save_to_finetune, is_distilled_model)
return output
def _synthesise_answer(self, prompt, model, llm_parameters):
"""
Synthesise an answer given the prompt, model, model_type and llm_parameters
Args:
prompt (str): The prompt to send to the model
model (BaseModelConfig): The model to use for generation
llm_parameters (dict): The parameters to use for generation
return:
choice (str): The generated response
"""
system_message = model.system_message
return self.api_provider[model.provider].generate(model, system_message, prompt, **llm_parameters)
def get_generation_case(self, args, kwargs, function_description, llm_parameters, func_hash):
"""
Get the generation case with the correct prompt and model
First get the current model, then if distilled model, do zero-shot prompt and return False as suitable_for_finetune
If not distilled model, check if suitable for finetuning, create the prompt and return the correct model given the token count
"""
f = str(function_description.__dict__.__repr__())
distilled_model, teacher_models = self.function_modeler.get_models(function_description)
is_distilled_model = distilled_model.model_name != ""
suitable_for_distillation, input_prompt_token_count = self.suitable_for_finetuning_token_check(args, kwargs, f,
distilled_model)
if func_hash not in self.initialized_functions:
# initialise the initialized_functions dict
self.initialized_functions[func_hash] = {"model": "", "examples": []}
# no examples needed, using a finetuned model. Dont save to finetune dataset
if is_distilled_model and suitable_for_distillation:
prompt = self.construct_prompt(f, args, kwargs, [], distilled_model)
return prompt, distilled_model, suitable_for_distillation, True
else:
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=16)
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
# update the examples in the initialized_functions dict
self.initialized_functions[func_hash]["examples"] = examples
examples_token_count = sum([approximate_token_count(example) for example in examples])
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(teacher_models,
examples_token_count + input_prompt_token_count + generation_tokens,
len(examples))
if model:
examples_with_parsing_tokens = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput:{model.parsing_helper_tokens['start_token']}{align['output']}{model.parsing_helper_tokens['end_token']}" for align in
aligns]
prompt = self.construct_prompt(f, args, kwargs, examples_with_parsing_tokens, model)
return prompt, model, suitable_for_distillation, False
else:
raise ValueError(
"The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
def suitable_for_finetuning_token_check(self, args, kwargs, f, distilled_model: BaseModelConfig):
"<fim_suffix>""
Check if the inputs are suitable for finetuning, i.e are below the finetuning token count
"""
# check if finetunable
finetuning_prompt = f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
input_prompt_token_count = approximate_token_count(finetuning_prompt)
if distilled_model.system_message_token_count < 0:
distilled_model.system_message_token_count = approximate_token_count(distilled_model.system_message)
if distilled_model.instruction_token_count < 0:
distilled_model.instruction_token_count = approximate_token_count(distilled_model.instructions)
suitable_for_finetune = input_prompt_token_count + distilled_model.instruction_token_count + distilled_model.system_message_token_count < distilled_model.context_length
return suitable_for_finetune, input_prompt_token_count
def construct_prompt(self, f, args, kwargs, examples, model):
"""
Construct a prompt given the model, function description, args, kwargs and examples
Args:
model (BaseModelConfig): The model to use for generation
f (str): The function description
args (tuple): The args of the function
kwargs (tuple): The kwargs of the function
examples (list): The examples of the function
Returns:
content (str): The prompt to send to the model
"""
if examples:
final_examples = "\n".join(
[f"{align}" for align in
examples])
example_input = f"Examples:{final_examples}\n"
else:
example_input = ""
instruction_prompt = model.instructions
content = f"{instruction_prompt}\nFunction: {f}\n{example_input}---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:"
return content
def repair_generate(self, args, kwargs, f, failed_outputs_list, aligns, models, llm_parameters):
"""
Repair the output given the input, function description, failed outputs list, examples and models
"""
# get the token counts
examples = [f"Inputs:\nArgs: {align['args']}\nKwargs: {align['kwargs']}\nOutput: {align['output']}" for align in
aligns]
examples_token_count = sum([approximate_token_count(example) for example in examples])
failed_examples_token_count = sum([approximate_token_count(failed_output[0]) + approximate_token_count(failed_output[1]) for failed_output in failed_outputs_list])
input_prompt_token_count = approximate_token_count(f"Function: {f}\n---\nInputs:\nArgs: {args}\nKwargs: {kwargs}\nOutput:")
generation_tokens = llm_parameters.get("max_new_tokens", self.default_generation_length)
model = self.choose_model_from_tokens(models,
examples_token_count+input_prompt_token_count+generation_tokens+failed_examples_token_count,
len(examples))
if model:
prompt = self.generate_repair_prompt(args, kwargs, f, failed_outputs_list, examples, model)
logging.info(f"Previous output failed type validation, attempting to repair with {model.model_name}")
choice = self._synthesise_answer(prompt, model, llm_parameters)
return choice
else:
return None
def generate_repair_prompt(self, args, kwargs, f, failed_outputs_list, examples, model):
"""
Generate a repair prompt given the args, kwargs, function description, failed outputs list and examples
"""
if examples:
final_examples = "\n".join(
[f"{model.parsing_helper_tokens['start_token']}{align}{model.parsing_helper_tokens['end_token']}" for align in
examples])
successful_examples = f"Examples:{final_examples}\n"
else:
successful_examples = ""
failed_examples = ""
for failed_output in failed_outputs_list:
failed_examples += f"Output: {failed_output[0]}\nError: {failed_output[1]}\n\n"
end_token_addition = ""
if model.parsing_helper_tokens["end_token"]:
end_token_addition = f"Make sure to add the {model.parsing_helper_tokens['end_token']} token at the end of the output."
prompt = f"{model.repair_instruction}{end_token_addition}\nFUNCTION DESCRIPTION: {f}\n{successful_examples}---{model.parsing_helper_tokens['start_token']}Inputs:\nArgs: {args}\nKwargs: {kwargs}\nFAILED EXAMPLES: {failed_examples}Correct output:"
return prompt
def choose_model_from_tokens(self, models, input_token_count, nr_of_examples=0):
"""
Choose a model from the models given the token count and number of examples
Args:
models (list): The models to choose from
input_token_count (int): The token count of the input
nr_of_examples (int): The number of examples
Returns:
model (BaseModelConfig): The chosen model
"""
for model in models:
# check if input token count is less than the context length
# If the model config has custom messages, then use those, otherwise use the default ones
if model.system_message_token_count < 0:
model.system_message_token_count = approximate_token_count(model.system_message)
if model.instruction_token_count < 0:
model.instruction_token_count = approximate_token_count(model.instructions)
if model.parsing_helper_tokens["start_token"]:
input_token_count += 2*nr_of_examples
if model.parsing_helper_tokens["end_token"]:
input_token_count += 2*nr_of_examples
total_token_count = input_token_count + model.instruction_token_count + model.system_message_token_count
if total_token_count < model.context_length:
return model
return None
def repair_output(self,
args: tuple,
kwargs: dict,
function_description: FunctionDescription,
choice,
validator: Validator,
generation_parameters: dict) -> tuple:
"""
Repair an output, that failed type validation by generating a new output using the teacher model and the error
Args:
args (tuple): The args of the function
kwargs (dict): The kwargs of the function
function_description (FunctionDescription): The function description
choice: The output that failed type validation, type is arbitrary
validator (Validator): The validator object
Returns:
choice (str): The choice that was generated by the language model
choice_parsed: The parsed choice, type is arbitrary
valid (bool): Whether the output was correctly repaired was valid
"""
# get the teacher models
teacher_models = self.function_modeler.get_models(function_description)[1]
valid = False
retry_index = 5
f = str(function_description.__dict__.__repr__() + "\n")
error = f"Output type was not valid. Expected an valid object of type {function_description.output_type_hint}, got '{choice}'"
# instantiate the failed outputs list
failed_outputs_list = [(choice, error)]
while retry_index > 0 and not valid:
# get the alignments
aligns = self.function_modeler.get_symbolic_alignments(function_description.__hash__(), max=5)
# Generate the reparied LLM output
choice = self.repair_generate(args,
kwargs,
f,
failed_outputs_list,
aligns,
teacher_models,
generation_parameters)
if not choice:
# if no choice then the input was too long for the model
# no specific error but the retry index goes down
retry_index -= 1
continue
# start parsing the object
try:
# json load
choice_parsed = json.loads(choice)
except:
# if it fails, it's not a json object, try eval
try:
choice_parsed = eval(choice)
except:
choice_parsed = choice
valid = validator.check_type(choice_parsed, function_description.output_type_hint)
if not valid:
# if it's not valid, add it to the failed outputs list
error = f"Output type was not valid. Expected an object of type {function_description.output_type_hint}, got '{choice}'"
failed_outputs_list.append((choice, error))
retry_index -= 1
if valid:
logging.info(f"Successfully repaired output.")
return choice, choice_parsed, valid
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>tanuki_py/src/tanuki/validator.py<fim_prefix>import abc
from collections import defaultdict
import collections
import typing
from collections import deque
import dataclasses
import inspect
import json
from dataclasses import is_dataclass
from typing import get_origin, get_args, Any, Mapping, MutableMapping, OrderedDict, Literal, Union, get_type_hints, \
Type, Sequence, Tuple, Optional
from pydantic import BaseModel, create_model
import datetime
class Validator:
def __init__(self):
# Extract types from collections and collections.abc
collection_types = {cls for name, cls in collections.__dict__.items() if isinstance(cls, type)}
abc_collection_types = {cls for name, cls in collections.abc.__dict__.items() if isinstance(cls, type)}
# Filter out types that have dictionary-like methods
self.dict_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'keys') and hasattr(cls, 'items')
}
self.list_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'append') and hasattr(cls, 'pop')
}
self.set_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, 'add') and hasattr(cls, 'discard')
}
# Add the general Sequence to list-like types
# if python version is 3.9 or above, use collections.abc.Sequence
if hasattr(collections.abc, 'Sequence'):
self.list_like_types.add(collections.abc.Sequence)
else:
self.list_like_types.add(collections.Sequence)
self.list_like_types.add(typing.List)
# Add the general Mapping to dict-like types
if hasattr(collections.abc, 'Mapping'):
self.dict_like_types.add(collections.abc.Mapping)
else:
self.dict_like_types.add(collections.Mapping)
self.dict_like_types.add(typing.Dict)
# Add the general Set to set-like types
if hasattr(collections.abc, 'Set'):
self.set_like_types.add(collections.abc.Set)
else:
self.set_like_types.add(collections.Set)
self.set_like_types.add(typing.Set)
# Add the general Tuple to tuple-like types
self.tuple_like_types = {
cls for cls in collection_types.union(abc_collection_types)
if hasattr(cls, '__getitem__') and hasattr(cls, '__len__')
}
self.tuple_like_types.add(typing.Tuple)
def is_base_type(self, _type: Any) -> bool:
"""Determine if a type is a base type."""
return _type in {int, float, str, bool, None}
def validate_base_type(self, value: Any, typ: Any) -> bool:
"""Validate base types."""
if typ is None:
return value is None
return isinstance(value, typ)
def validate_output(self, output: str, type_definition: Any) -> bool:
try:
deserialized_output = json.loads(output)
except json.JSONDecodeError:
return False
return self.check_type(deserialized_output, type_definition)
def check_type(self, value: Any, type_definition: Any) -> bool:
""<fim_suffix>"
Validate a value against a type definition.
Args:
value: Any object or primitive value
type_definition: The type definition to validate against
Returns:
Whether the value is valid for the type definition
"""
if type_definition is Any:
return True
if self.is_base_type(type_definition):
return self.validate_base_type(value, type_definition)
origin = get_origin(type_definition) or type_definition
args = get_args(type_definition)
# Handle base types
if self.is_base_type(origin):
return self.validate_base_type(value, origin)
if origin == Literal:
return value in args
if origin == Union:
return any(self.check_type(value, union_type) for union_type in args)
# Handle tuples
if origin == tuple:
if not isinstance(value, tuple):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle lists
if origin == list:
if not isinstance(value, list):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle more complex types that are collections and list-like
if origin is list or issubclass(origin, tuple(self.list_like_types)):
if not any(isinstance(value, t) for t in self.list_like_types):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle sets
if origin == set:
if not isinstance(value, set):
return False
item_type = args[0] if args else Any
return all(self.check_type(v, item_type) for v in value)
# Handle datetime
if origin in [datetime.datetime, datetime.date, datetime.time]:
# try to instantiate datetime
try:
obj = origin(**value)
return True
except:
return False
# Handle dictionaries
if origin is dict or issubclass(origin, tuple(self.dict_like_types)):
if not isinstance(value, (dict, Mapping)):#, MutableMapping, OrderedDict)):
return False
if args:
if len(args) == 1:
key_type = args[0]
value_type = Any # General assumption; specific dict-like types might differ
elif len(args) == 2:
key_type, value_type = args
else:
key_type = value_type = Any
else:
key_type = value_type = Any
return all(
self.check_type(k, key_type) and self.check_type(v, value_type)
for k, v in value.items()
)
# Handle pydantic models
if self.is_pydantic_model(origin):
try:
#temp_model = create_model('TempModel', **value)
if isinstance(value, origin):
return True
#return isinstance(temp_model, origin)
# check if value is dict
if not isinstance(value, dict):
return False
# get all required init arguments for origin
# required arguments are the ones withouyt default values
required_fields = [field for field, field_type in origin.__annotations__.items() if not (typing.get_origin(field_type) is Union and type(None) in typing.get_args(field_type))]
# check that all required arguments are in value and do type checking
for arg in required_fields:
# check if it is in value
if arg not in value:
return False
# get the type of the argument
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
# check that all arguments in value are correct type
# this is additional check, because the above check only checks required arguments
for arg, obj in value.items():
if arg in required_fields:
continue
arg_type = origin.__annotations__[arg]
if not self.check_type(value[arg], arg_type):
return False
#origin.parse_obj(value)
return True
except Exception as e:
print(e)
return False
# Handle dataclasses
if self.is_dataclass_instance(origin):
try:
# for field in dataclasses.fields(origin):
# field_name = field.name
# field_type = field.type
# if field_name not in value or not self.check_type(value[field_name], field_type):
# return False
# return True
obj = origin(**value)
return dataclasses.asdict(obj) == value
except:
return False
# Handle dataclasses and arbitrary class types
if inspect.isclass(origin) and not self.is_base_type(origin):
# Ensure the value is an instance of the class
if not isinstance(value, origin):
return False
# Gather type hints from the class and its bases
type_hints = {}
for cls in reversed(origin.__mro__):
type_hints.update(get_type_hints(cls))
# Validate each attribute of the class
for attr, attr_type in type_hints.items():
attr_value = getattr(value, attr, None)
if not self.check_type(attr_value, attr_type):
return False
return True
return False
@staticmethod
def is_pydantic_model(cls):
return hasattr(cls, 'parse_obj')
@staticmethod
def is_dataclass_instance(cls):
return hasattr(cls, '__annotations__') and hasattr(cls, '__dataclass_fields__')
@staticmethod
def _is_subclass_of_generic(cls: Type, generic: Type) -> bool:
"""Determine if the class is a subclass of a generic type."""
try:
return issubclass(cls, generic) and cls is not generic
except TypeError:
if not hasattr(cls, '__origin__'):
return False
return cls.__origin__ is generic
@staticmethod
def _is_generic(cls: Type) -> bool:
"""Check if the provided type is a generic."""
return hasattr(cls, "__origin__")
def _get_recursive_args(self, target_type: Type) -> Tuple[Type, ...]:
"""
Recursively check the base classes (i.e., the superclass chain) of the target type until we find one that
retains the type arguments.
:return: Type chain
"""
if get_args(target_type):
return get_args(target_type)
for base in target_type.__bases__:
args = self._get_recursive_args(base)
if args:
return args
return ()
def _find_generic_base_and_args(self, target_type: Type) -> Tuple[Type, Tuple[Type, ...]]:
"""
Navigate up the MRO to find the first generic base and its arguments.
"""
# First, check if target_type is a type annotation.
# If so, directly return its origin and arguments.
origin = get_origin(target_type)
args = get_args(target_type)
if origin and args:
return origin, args
# If target_type is a real class, then navigate its MRO.
if hasattr(target_type, '__mro__'):
if hasattr(target_type, '__orig_bases__'):
for base in target_type.__orig_bases__:
if get_args(base):
return base, get_args(base)
for base in target_type.__mro__:
if get_args(base):
return base, get_args(base)
return None, ()
def _is_list_like(self, target_type: Type) -> bool:
"""Determine if the target type is list-like."""
if target_type in {list, typing.List}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {list, typing.List}:
return True
return False
def _is_tuple_like(self, target_type: Type) -> bool:
"""Determine if the target type is tuple-like."""
if target_type in {tuple, typing.Tuple}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {tuple, typing.Tuple}:
return True
return False
def _is_dict_like(self, target_type: Type) -> bool:
"""Determine if the target type is dict-like."""
if target_type in {dict, typing.Dict}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {dict, typing.Dict}:
return True
return False
def _is_set_like(self, target_type: Type) -> bool:
"""Determine if the target type is set-like."""
if target_type in {set, typing.Set}:
return True
if hasattr(target_type, "__origin__") and target_type.__origin__ in {set, typing.Set}:
return True
return False
def instantiate(self, data: Any, target_type: Type) -> Any:
"""
Attempts to convert a JSON-compatible data structure into an instance of the specified type.
Args:
data: JSON-compatible data structure to instantiate the target type.
target_type: The type to instantiate from the given data.
Returns:
An instance of the target type initialized with the data.
"""
# Handle None type
if data is None:
return None
origin = get_origin(target_type) or target_type
# If the target type is a built-in, attempt to instantiate and return
if self.is_base_type(target_type) or target_type is Any:
# If the parsed data is a string and target type is str, return it directly
if isinstance(data, str) and target_type is str:
return data
# If any, return the data directly
if target_type is Any:
return data
try:
return target_type(data)
except (ValueError, TypeError):
# Handle the special case where the string represents a float but we want an integer
if target_type is int:
try:
return int(float(data))
except (ValueError, TypeError):
pass
if target_type is float:
try:
return int(float(data))
except (ValueError, TypeError):
pass
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# special handling for datetime
if origin == datetime.datetime:
# try to instantiate datetime
try:
return datetime.datetime(**data)
except:
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# check if origin is Union, if so, instantiate the first type that works
if origin == Union:
for arg in get_args(target_type):
try:
return self.instantiate(data, arg)
except:
continue
raise TypeError(f"Failed to instantiate {target_type} from provided data.")
# If the data is a dictionary and the target is a custom class that can be instantiated from a dictionary.
if isinstance(data, dict):
if inspect.isclass(target_type) and not self.is_base_type(target_type):
# Special handling for dataclasses
if is_dataclass(target_type):
fields = [f.name for f in dataclasses.fields(target_type)]
type_hints = get_type_hints(target_type)
filtered_data = {k: self.instantiate(v, type_hints.get(k, Any)) for k, v in data.items() if
k in fields}
return target_type(**filtered_data)
# Special handling for Pydantic models
if issubclass(target_type, BaseModel):
# instantiate the sub attributes
for attr, attr_type in target_type.__annotations__.items():
if attr in data:
data[attr] = self.instantiate(data[attr], attr_type)
try:
return target_type.model_validate(data)
except AttributeError as e:
# backwards compatibility with pydantic < 2
return target_type.parse_obj(data)
# For general classes, attempt instantiation
try:
return target_type(**data)
except TypeError:
raise TypeError(f"Failed to instantiate {target_type.__name__} from dictionary.")
# Handle dictionary-like types
# Check if the target type is or inherits from defaultdict
if origin is defaultdict or (isinstance(origin, type) and issubclass(origin, defaultdict)):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# For defaultdict, you'll need a default factory. Here, I'm using `int` for simplicity,
# but you might want to adapt this based on your needs.
return defaultdict(int, instantiated_items)
# Handle set-like dict types like OrderedDict
# the first check needs to be done to ensure origin has the __mro__ attribute
elif inspect.isclass(origin)and any(issubclass(base, dict) for base in origin.__mro__):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_items = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in data.items()}
return origin(instantiated_items)
# Handle other dictionary-like types
elif origin is dict or self._is_subclass_of_generic(origin, dict):
key_type, value_type = get_args(target_type) if get_args(target_type) else (Any, Any)
instantiated_dict = {self.instantiate(k, key_type): self.instantiate(v, value_type) for k, v in
data.items()}
# If the target_type is a subclass of dict, return an instance of target_type
if self._is_subclass_of_generic(target_type, dict) and not self._is_generic(target_type):
return target_type(instantiated_dict)
else:
return dict(instantiated_dict)
# Tuples aren't supported in JSONable types, so we look for lists instead
if isinstance(data, list):
try:
# If the origin or target type is a list-like type, or if it implements a list-like collections type
# e.g Sequence[int]
if origin is list or self._is_subclass_of_generic(origin, list):
base, item_types = self._find_generic_base_and_args(target_type)
item_type = item_types[0] if item_types else Any
instantiated_items = []
for item in data:
# For each item, validate and instantiate it
try:
instantiated_item = self.instantiate(item, item_type)
except ValueError:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
safe = self.check_type(instantiated_item, item_type)
if not safe:
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
instantiated_items.append(instantiated_item)
# If target_type is a subclass of list, return an instance of target_type
if self._is_subclass_of_generic(target_type, list) and not self._is_generic(target_type):
return target_type(instantiated_items)
return instantiated_items
# Handle tuples
if self._is_tuple_like(target_type) or (isinstance(origin, type) and issubclass(origin, tuple)):
base, item_types = self._find_generic_base_and_args(target_type)
instantiated_items = []
# If there are no subscripted types, assume Any
if not item_types:
item_types = (Any,) * len(data)
for i, item in enumerate(data):
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_types[i])
instantiated_items.append(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
_type = item_types[i]
if not isinstance(instantiated_item, _type):
raise TypeError(
f"Item {i} of type {type(item).__name__} does not match expected type {item_types[i].__name__}.")
# Convert the list of instantiated items to a tuple
instantiated_tuple = tuple(instantiated_items)
# If target_type is a subclass of tuple, return an instance of target_type
if self._is_subclass_of_generic(target_type, tuple):
return target_type(instantiated_tuple)
return instantiated_tuple
# Handle sets
if self._is_set_like(target_type) or (isinstance(origin, type) and issubclass(origin, set)):
base, item_type = self._find_generic_base_and_args(target_type)
if not item_type:
item_type = Any
instantiated_items = set()
for item in data:
# For each item, validate and instantiate it
instantiated_item = self.instantiate(item, item_type[0])
instantiated_items.add(instantiated_item)
# If the instantiated item does not match the expected type, raise an exception
if not isinstance(instantiated_item, item_type[0]):
raise TypeError(
f"Item of type {type(item).__name__} does not match expected type {item_type[0].__name__}.")
# If target_type is a subclass of set, return an instance of target_type
if self._is_subclass_of_generic(target_type, set):
return target_type(instantiated_items)
return instantiated_items
# Handle deques
if origin is deque or (isinstance(origin, type) and issubclass(origin, set)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return deque(self.instantiate(item, item_type) for item in data)
if origin is frozenset or (isinstance(origin, type) and issubclass(origin, frozenset)):
item_type = get_args(target_type)[0] if get_args(target_type) else Any
return frozenset(self.instantiate(item, item_type) for item in data)
except TypeError as e:
print(e)
raise TypeError(f"Failed to instantiate {target_type} from list. {e}")
# If none of the above, return the data as-is
return data
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>tanuki_py/src/tanuki/utils.py<fim_prefix>import dataclasses
import datetime
import inspect
import json
import typing
from typing import get_args, Literal
import string
import types
def json_default(thing):
try:
return dataclasses.asdict(thing)
except TypeError:
pass
if isinstance(thing, datetime.datetime):
return thing.isoformat(timespec='microseconds')
if isinstance(thing, type):
return thing.__name__
#if hasattr(typing, "_GenericAlias") and isinstance(thing, typing._GenericAlias):
if hasattr(typing, "_UnionGenericAlias"):
if isinstance(thing, typing._UnionGenericAlias):
return {
"Union": [json_default(arg) for arg in get_args(thing)]
}
if thing == Literal[...]:
return {
"Literal": thing.__args__
}
if isinstance(thing, type(None)):
return "None"
if isinstance(thing, typing._SpecialForm):
return thing._name
if isinstance(thing, typing._GenericAlias) or isinstance(thing, types.GenericAlias):
return {
"GenericAlias": [json_default(arg) for arg in get_args(thing)]
}
if isinstance(thing, str):
return thing
if isinstance(thing, list) or isinstance(thing, tuple) or isinstance(thing, set):
return [json_default(item) for item in thing]
if isinstance(thing, dict):
return {json_default(key): json_default(value) for key, value in thing.items()}
raise TypeError(f"object of type {type(thing).__name__} not serializable")
def json_dumps(thing):
return json.dumps(
thing,
default=json_default,
ensure_ascii=False,
sort_keys=True,
indent=None,
separators=(',', ':'),
)
def get_model(content, logger, func_hash):
"""
Get the model from the content and the logger.
Decide on model depending on the length of the content. if is finetunable, return model, true, otherwise return model, false
Args:
content (str): the content to be aligned
logger (buffered logger): the logger
func_hash (str): the function hash
Returns:
model (str): the model to be used
finetunable (bool): whether the model is finetunable
"""
num_tokens = approximate_token_count(content)
finetune_limit = logger.finetune_token_limit
finetune_model, teacher_models = logger.get_models(func_hash)
if num_tokens < finetune_limit:
return finetune_model, True
else:
# this is just for backwards compatibility currently
if len(teacher_models) == 0 or isinstance(teacher_models[0], str):
teacher_models = [("gpt-4", 7000),("gpt-4-32k", 31000)]
for model, token_limit in teacher_models:
if num_tokens < token_limit:
return model, False
raise ValueError("The input content and align statements combined are too long, please shorten it. The maximum currently allowed token limit is 32000")
def approximate_token_count(content):
"""
Approximate the token count of input
Number of tokens is word tokens (nr of words * 1.33) + nr of special characters (which are usually their own tokens)
Args:
content (str, bytes): the content to be approximated
Returns:
number_of_tokens (int): the number of tokens
"""
common_special_characters = r"\/(){}[]<>|`~@#$%^&*+=-_:;\""
# check if input type is string
if isinstance(content, str):
number_of_word_tokens = int(len(content.split(" "))*1.333)
nr_of_special_characters = sum([content.count(char) for char in common_special_characters])
return number_of_word_tokens + nr_of_special_characters
# check if input is a byte string
if isinstance(content, bytes):
number_of_word_tokens = int(len(content.split(b" "))*1.333)
nr_of_special_characters = sum([content.count(char.encode("utf-8")) for char in common_special_characters])
return number_of_word_tokens + nr_of_special_characters
def _deep_tuple(obj):
"""
Convert a list or dict to a tuple recursively to allow for hashing and becoming a key for mock_behaviors
:param obj:
:return:
"""
# transform pydantic objects into dicts
if hasattr(obj, "__dict__"):
obj = obj.__dict__
if isinstance(obj, list) or isinstance(obj, tuple):
return tuple(_deep_tuple(e) for e in obj)
elif isinstance(obj, dict):
return tuple((k, _deep_tuple(v)) for k, v in sorted(obj.items()))
else:
return obj
def get_key(args, kwargs) -> tuple:
args_tuple = _deep_tuple(args)
kwargs_tuple = _deep_tuple(kwargs)
return args_tuple, kwargs_tuple
def prepare_object_for_saving(input_object):
""<fim_suffix>"
Get a dictionary representation of the object
"""
# check if list
if isinstance(input_object, list):
return [prepare_object_for_saving(item) for item in input_object]
# check if tuple
elif isinstance(input_object, tuple):
return tuple([prepare_object_for_saving(item) for item in input_object])
# check if dict
elif isinstance(input_object, dict):
return {key: prepare_object_for_saving(value) for key, value in input_object.items()}
# check if pydantic object
if hasattr(input_object, "__dict__"):
attributes = input_object.__dict__
for key, value in attributes.items():
attributes[key] = prepare_object_for_saving(value)
return attributes
#
# check if datetime for custom logic
elif isinstance(input_object, datetime.datetime) or isinstance(input_object, datetime.date) or isinstance(input_object, datetime.time):
attrs = ['year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond', 'tzinfo']
attributes = {attr: getattr(input_object, attr, None) for attr in attrs if getattr(input_object, attr, None) is not None}
return attributes
return input_object
def encode_int(n):
# Define the character set for encoding
charset = string.ascii_lowercase + string.digits + "_"
return charset[n]
def decode_int(s):
# Define the character set for encoding
charset = string.ascii_lowercase + string.digits + "_"
return charset.index(s)
def _get_source_ipython(func) -> str:
"""
Get the source code of a function from IPython (to support Colab and Jupyter notebooks)
:param func: The function to get the source code from
:return: The source code of the function
"""
# Get the IPython instance
from IPython import get_ipython
ipython = get_ipython()
# Get the input history
input_cells = ipython.history_manager.input_hist_parsed
class_name = func.__name__
source_code = None
for cell in input_cells:
if f"class {class_name}" in cell:
source_code = cell
break
# If found, print the source code
return source_code
def get_source(func) -> str:
"""
Get the source code of a function
Args:
func (function): the function to get the source code from
Returns:
source (str): the source code of the function
"""
try:
return inspect.getsource(func)
except Exception:
return _get_source_ipython(func)<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>tanuki_py/src/tanuki/trackers/abc_buffered_logger.py<fim_prefix>import json
from abc import abstractmethod
from typing import Dict, Any, Literal
from tanuki.bloom_filter import BloomFilter
from tanuki.constants import EXPECTED_ITEMS, FALSE_POSITIVE_RATE, ALIGN_FILE_EXTENSION, \
POSITIVE_FILE_EXTENSION, NEGATIVE_FILE_EXTENSION, PATCH_FILE_EXTENSION
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
from tanuki.trackers.dataset_worker import DatasetWorker
from tanuki.models.function_config import FunctionConfig
# PATCH_FILE_EXTENSION_TYPE = Literal[".patches"]
# ALIGN_FILE_EXTENSION_TYPE = Literal[".alignments"]
# POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".positive_embedding"]
# NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = Literal[".negative_embedding"]
#
# PATCH_FILE_EXTENSION: PATCH_FILE_EXTENSION_TYPE = ".patches"
# ALIGN_FILE_EXTENSION: ALIGN_FILE_EXTENSION_TYPE = ".alignments"
# POSITIVE_EMBEDDING_FILE_EXTENSION: POSITIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_positives"
# NEGATIVE_EMBEDDING_FILE_EXTENSION: NEGATIVE_EMBEDDING_FILE_EXTENSION_TYPE = ".contrastive_negatives"
#
# EXPECTED_ITEMS = 10000
# FALSE_POSITIVE_RATE = 0.01
# LIB_NAME = "tanuki"
# ENVVAR = "TANUKI_LOG_DIR"
class ABCBufferedLogger(DatasetWorker):
def __init__(self, name, level=15):
self.buffers = {}
self.mapped_files = {}
self.miss_count = 0
self.hit_count = 0
self.flush_limit = {}
self.buffer_rolling_size = {}
self.write_count = 0
self.write_limit = 1000 # Save the Bloom filter every 1000 writes
super().__init__(name, level)
self.bloom_filter = self.create_bloom_filter()
self.load_bloom_filter()
self.default_function_config = FunctionConfig()
@abstractmethod
def get_bloom_filter_persistence(self) -> IBloomFilterPersistence:
"""
Get an instance of the bloom filter persistence provider. This exposes some persistent file storage,
that must support reading and writing raw byte streams.
:return:
"""
pass
@abstractmethod
def load_existing_datasets(self) -> Dict[str, Dict[str, Any]]:
"""
Get the lengths of all datasets backing the registered functions, including aligns.
:return:
"""
pass
@abstractmethod
def ensure_persistence_location_exists(self):
"""
Ensure that the place we will be writing to actually exists. If not, create it.
"""
pass
@abstractmethod
def get_patch_location_for_function(self, func_hash, extension="") -> str:
"""
Get the address of the function patch file.
:param func_hash: The representation of the function
:param extension: Whether this is a patch or an alignment
:return:
"""
pass
@abstractmethod
def write(self, path, data, mode="a") -> None:
pass
@abstractmethod
def read(self, path) -> str:
pass
@abstractmethod
def get_hash_from_path(self, path) -> str:
pass
@abstractmethod
def does_object_exist(self, path) -> bool:
pass
def create_bloom_filter(self):
bloom_filter_persistence = self.get_bloom_filter_persistence()
bloom_filter = BloomFilter(
bloom_filter_persistence,
expected_number_of_elements=EXPECTED_ITEMS,
false_positive_probability=FALSE_POSITIVE_RATE)
return bloom_filter
def load_bloom_filter(self):
try:
self.bloom_filter.load()
except FileNotFoundError:
self.debug("No Bloom filter found. Creating a new one.")
def write_symbolic_align_call(self, func_hash, example) -> bool:
log_file_path = self.get_patch_location_for_function(func_hash, extension=ALIGN_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def write_embeddable_align_call(self, func_hash, example, positive=True) -> bool:
if positive:
log_file_path = self.get_patch_location_for_function(func_hash, extension=POSITIVE_FILE_EXTENSION)
else:
log_file_path = self.get_patch_location_for_function(func_hash, extension=NEGATIVE_FILE_EXTENSION)
try:
# Now, write to the file
dumpable_object = str(example.__dict__)
self.write(log_file_path, dumpable_object + "\n", mode="a")
return True
except Exception as e:
return False
def log_embeddable_align(self, func_hash, example, positive=True, **kws):
"""
Log a contrastive function invocation
Args:
func_hash: A string representation of the function signature and input parameters
example: The example object
positive: Whether the example is positive or negative
**kws:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_embeddable_align_call(func_hash, example, positive)
return successfully_saved, new_datapoint
def log_symbolic_align(self, func_hash, *args, **kws):
"""
Log an align function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param args: Example objects
:param kws:
:return:
"""
successfully_saved, new_datapoint = False, False
try:
self.ensure_persistence_location_exists()
except Exception as e:
return successfully_saved, new_datapoint
example = args[0]
# prepend the function hash to the example
bloom_filter_representation = func_hash + '_' + str(example.__dict__) + '\n'
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
return successfully_saved, new_datapoint
new_datapoint = True
# add to bloom filter
self.bloom_filter.add(bloom_filter_representation)
self.save_bloom_filter()
successfully_saved = self.write_symbolic_align_call(func_hash, example)
return successfully_saved, new_datapoint
def log_symbolic_patch(self, func_hash, example):
"<fim_suffix>""
Log a patched function invocation to the file system
:param func_hash: A string representation of the function signature and input parameters
:param example:
:return:
"""
if not isinstance(func_hash, str):
func_hash = str(func_hash)
example_data = str(example.__dict__).encode('utf-8') + b'\n'
bloom_filter_representation = func_hash + '_' + example_data.decode('utf-8')
# Check Bloom Filter
if self.bloom_filter.lookup(bloom_filter_representation):
self.hit_count += 1
return {}
self.miss_count += 1
# Add to Bloom Filter
self.bloom_filter.add(bloom_filter_representation)
try:
self.ensure_persistence_location_exists()
except Exception as e:
return {}
log_file_path = self.get_patch_location_for_function(func_hash, extension=PATCH_FILE_EXTENSION)
if log_file_path not in self.buffers:
self.buffers[log_file_path] = bytearray()
if log_file_path not in self.flush_limit:
self.flush_limit[log_file_path] = 1
self.buffers[log_file_path].extend(example_data)
self.write_count += 1
if log_file_path not in self.buffer_rolling_size:
self.buffer_rolling_size[log_file_path] = 1
else:
self.buffer_rolling_size[log_file_path] += 1
if self.write_count >= self.write_limit:
written_datapoints = self.flush()
self.save_bloom_filter()
self.write_count = 0 # Reset counter
return written_datapoints
if len(self.buffers[log_file_path]) >= min(self.flush_limit[log_file_path], 4096): # Flush after reaching 4KB
written_datapoints = {}
try:
self.write(log_file_path, self.buffers[log_file_path], mode="a+b")
# update buffers
written_datapoints[func_hash] = self.buffer_rolling_size[log_file_path]
self.buffers[log_file_path].clear()
self.buffer_rolling_size[log_file_path] = 0
self.flush_limit[log_file_path] = 2 * self.flush_limit[log_file_path]
self.save_bloom_filter()
except Exception as e:
pass
return written_datapoints
return {}
def save_bloom_filter(self):
try:
self.bloom_filter.save()
except Exception as e:
self.warning("Could not save Bloom filter: {}".format(e))
def flush(self):
# get log directory
written_datapoints = {}
for log_file_path, buffer in self.buffers.items():
if len(buffer) > 0:
try:
self.write(log_file_path, buffer, mode="a+b")
written_datapoints[self.get_hash_from_path(log_file_path)] = self.buffer_rolling_size[log_file_path]
self.buffer_rolling_size[log_file_path] = 0
buffer.clear()
except Exception as e:
pass
return written_datapoints
def load_function_config(self, func_hash):
"""
Get the config file for the function. Uses the message and log directory
Config file has to be in .json
"""
default = False
try: # try to get the config from the disk. If inaccessible, create a new default one
self.ensure_persistence_location_exists()
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
if not self.does_object_exist(config_path):
function_config = self.default_function_config
default = True
func_config_dict = function_config.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
else:
function_config = FunctionConfig().load_from_dict(self.read_json(config_path))
except Exception as e:
function_config = self.default_function_config
default = True
return function_config, default
def update_function_config(self, func_hash, config_to_be_saved):
"""
Save the config file
"""
log_file_path = self.get_patch_location_for_function(func_hash)
config_path = f"{log_file_path}.json"
try:
func_config_dict = config_to_be_saved.to_dict()
# remove teacher_models from the config
func_config_dict.pop("teacher_models")
self.write_json(config_path, func_config_dict)
except Exception as e:
pass
def write_json(self, path, data):
self.write(path, json.dumps(data))
def read_json(self, path):
return json.loads(self.read(path))
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>tanuki_py/src/tanuki/persistence/filter/filesystem_bloom.py<fim_prefix>import os
from bitarray._bitarray import bitarray
from tanuki.persistence.filter.bloom_interface import IBloomFilterPersistence
class BloomFilterFileSystemDriver(IBloomFilterPersistence):
"""
This is a Filesystem implementation of a Bloom Filter persistence layer.
"""
def __init__(self, log_directory: str):
self.log_directory = log_directory
def save(self, bit_array: bitarray) -> None:
"""
Write a bloom filter array of bits to the local filesystem.
:param bloom_filter: A bloom filter which tracks unique function invocations
"""
bloom_filter_path = os.path.join(self.log_directory, 'bloom_filter_state.bin')
# Append 0 bits to make the length a multiple of 8
while len(bit_array) % 8 != 0:
bit_array.append(0)
with open(bloom_filter_path, 'wb') as f:
f.write(bit_array.tobytes())
def load(self) -> bitarray:
"<fim_suffix>""
Load a bloom filter from the local filesystem.
:return: A bloom filter object containing the state of unique function invocations
"""
bloom_filter_path = os.path.join(self.log_directory, 'bloom_filter_state.bin')
with open(bloom_filter_path, 'rb') as f:
bit_array = bitarray()
bit_array.frombytes(f.read())
while len(bit_array) % 8 != 0:
bit_array.append(0)
return bit_array<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
<filename>tanuki_py/src/tanuki/models/function_config.py<fim_prefix>from pydantic import BaseModel
from typing import Dict, List
from tanuki.language_models.llm_configs.abc_base_config import BaseModelConfig
from tanuki.language_models.llm_configs import DEFAULT_TEACHER_MODELS, DEFAULT_STUDENT_MODELS
from tanuki.constants import DEFAULT_TEACHER_MODEL_NAMES, DEFAULT_DISTILLED_MODEL_NAME, \
DISTILLED_MODEL, TEACHER_MODEL
from tanuki.language_models.llm_configs.model_config_factory import ModelConfigFactory
config_factory = ModelConfigFactory()
class FunctionConfig(BaseModel):
"""
The function config to execute the inference for the function and distillation.
Parameters
----------
distilled_model : BaseModelConfig -- the distilled model config
current_model_stats : Dict -- the current model stats
last_training_run : Dict -- the last training run
current_training_run : Dict -- the current training run
teacher_models : List[BaseModelConfig] -- the teacher models
nr_of_training_runs : int -- the number of training runs
"""
distilled_model: BaseModelConfig = DEFAULT_STUDENT_MODELS[DEFAULT_DISTILLED_MODEL_NAME]
current_model_stats : Dict = {
"trained_on_datapoints": 0,
"running_faults": []}
last_training_run : Dict = {"trained_on_datapoints": 0}
current_training_run : Dict = {}
teacher_models : List[BaseModelConfig] = [DEFAULT_TEACHER_MODELS[teacher_model_name] for teacher_model_name in DEFAULT_TEACHER_MODEL_NAMES]
nr_of_training_runs : int = 0
def load_from_dict(self, json_dict):
"""
Load the function config from a dict
Args:
json_dict: The dict to load the function config from
Returns:
The function config
"""
self.distilled_model = config_factory.create_config(json_dict["distilled_model"], DISTILLED_MODEL)
self.current_model_stats = json_dict["current_model_stats"]
self.last_training_run = json_dict["last_training_run"]
self.current_training_run = json_dict["current_training_run"]
self.nr_of_training_runs = json_dict["nr_of_training_runs"]
if "teacher_models" in json_dict and len(json_dict["teacher_models"]) > 0:
self.teacher_models = [config_factory.create_config(teacher_model, TEACHER_MODEL) for teacher_model in json_dict["teacher_models"]]
return self
def to_dict(self):
"""
Convert the function config to a dict
Returns:
The dict
"""
try:
config_dictionary = self.model_dump()
except AttributeError as e:
config_dictionary = self.dict()
return config_dictionary
def update_with_finetuned_response(self, response):
"<fim_suffix>""
Update the function config with the finetuned response
Args:
response: The finetuned response
"""
if response.status == "failed":
self.current_training_run = {}
else:
self.distilled_model = response.fine_tuned_model
self.last_training_run = self.current_training_run
self.current_model_stats = {
"trained_on_datapoints": self.current_training_run[
"trained_on_datapoints"],
"running_faults": []}
self.nr_of_training_runs += 1
self.current_training_run = {}
<fim_middle> | null | BLOCK_COMMENT | complete_current_header_empty_completion |
Subsets and Splits